-
Notifications
You must be signed in to change notification settings - Fork 0
/
benchmarks.tcl
656 lines (539 loc) · 18 KB
/
benchmarks.tcl
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
# -*- tcl -*- Copyright (c) 2012-2024 Andreas Kupries
# # ## ### ##### ######## ############# #####################
## Handle a tclbench-based benchmarks
# # ## ### ##### ######## ############# #####################
## Repetition setting. Number of repeats (beyond the regular run)
## to perform. Default 0. Positive integer.
## Irrelevant to work database keying.
kettle option define --repeats {
Number of repeats to perform per bench file.
(Number of runs is 1 + repeats).
} 0 {range 0 Inf}
kettle option no-work-key --repeats
# # ## ### ##### ######## ############# #####################
## Iterations setting. Number of iterations to run for a benchmark, if
## not overriden by the benchmark itself. Default 1000. Positive,
## non-zero integer.
## Irrelevant to work database keying.
kettle option define --iters {
Number of iterations to perform per benchmark.
} 1000 {range 1 Inf}
kettle option no-work-key --repeats
# # ## ### ##### ######## ############# #####################
## Collation setting. How to coalesce the data from several
## repeats into a single number.
## Irrelevant to work database keying.
kettle option define --collate {
Method for coalescing the data from multiple runs (repeats > 0).
} min {enum {min max avg}}
kettle option no-work-key --collate
# # ## ### ##### ######## ############# #####################
## Filter settings to select which benchmarks to run.
## Irrelevant to work database keying.
#
# Note: If both --match and --rmatch are specified then _both_
# apply. I.e. a benchmark will be run if and only if it matches both
# patterns.
kettle option define --match {
Run only benchmarks matching the glob pattern.
Default is the empty string, disabling the filter.
} {} string
kettle option no-work-key --match
kettle option define --rmatch {
Run only tests matching the regexp pattern.
Default is the empty string, disabling the filter.
} {} string
kettle option no-work-key --rmatch
# # ## ### ##### ######## ############# #####################
namespace eval ::kettle { namespace export benchmarks }
# # ## ### ##### ######## ############# #####################
## API.
proc ::kettle::benchmarks {{benchsrcdir bench}} {
# Overwrite self, we run only once for effect.
proc ::kettle::benchmarks args {}
# Heuristic search for benchmarks
# Aborts caller when nothing is found.
lassign [path scan \
{tclbench benchmarks} \
$benchsrcdir \
{path bench-file}] \
root benchmarks
# Put the benchmarks into recipes.
recipe define bench {
Run the benchmarks
} {benchsrcdir benchmarks} {
# Note: We build and install the package under profiling (and
# its dependencies) into a local directory (in the current
# working directory). We try to install a debug variant first,
# and if that fails a regular one.
#
# Note 2: If the user explicitly specified a location to build
# to we use that, and do not clean it up aftre the test. This
# makes it easy to investigate a core dump generated during
# test.
if {[option userdefined --prefix]} {
set tmp [option get --prefix]
set cleanup 0
} else {
set tmp [path norm [path tmpfile bench_install_]]
path ensure-cleanup $tmp
set cleanup 1
}
try {
if {![invoke self debug --prefix $tmp] &&
![invoke self install --prefix $tmp]
} {
status fail "Unable to generate local benchmark installation"
}
Bench::Run $benchsrcdir $benchmarks $tmp
} finally {
if {$cleanup} {
file delete -force $tmp
}
}
} $root $benchmarks
return
}
# # ## ### ##### ######## ############# #####################
## Support code for the recipe.
namespace eval ::kettle::Bench {
namespace import ::kettle::path
namespace import ::kettle::io
namespace import ::kettle::status
namespace import ::kettle::option
namespace import ::kettle::strutil
namespace import ::kettle::stream
}
proc ::kettle::Bench::Run {srcdir benchfiles localprefix} {
# We are running each bench file in a separate sub process, to
# catch crashes, etc. ... We assume that the bench file is self
# contained in terms of loading all its dependencies, like
# tclbench itself, utility commands it may need, etc. This
# assumption allows us to run it directly, using our own
# tcl executable as interpreter.
stream to log ============================================================
set main [path norm [option get @kettledir]/benchmain.tcl]
InitState
# Generate map of padded bench file names to ensure vertical
# alignment of output across them.
set short {}
foreach b $benchfiles {
lappend short [file tail $b]
}
foreach b $benchfiles pb [strutil padr $short] {
dict set state fmap $b $pb
}
set repeats [option get --repeats]
# Filter and other settings for the child process.
lappend bconfig MATCH [option get --match]
lappend bconfig RMATCH [option get --rmatch]
lappend bconfig ITERS [option get --iters]
lappend bconfig prefix $localprefix
path in $srcdir {
foreach bench $benchfiles {
stream aopen
for {set round 0} {$round <= $repeats} {incr round} {
dict set state round $round
path pipe line {
io trace {BENCH: $line}
ProcessLine $line
} [option get --with-shell] $main $bconfig [path norm $bench]
}
}
}
stream to raw {$state}
# Summary results...
stream to summary {[FormatTimings $state]}
set fr [FormatResults $state]\n
stream term always $fr
stream to summary {$fr}
# Report ok/fail
status [dict get $state status]
return
}
proc ::kettle::Bench::FormatTimings {state} {
# Extract data ...
set times [dict get $state times]
# Sort by shell and benchmark, re-package into tuples.
set tmp {}
foreach k [lsort -dict [dict keys $times]] {
lassign $k shell suite
lassign [dict get $times $k] nbench sec usec
lappend tmp [list $shell $suite $nbench $sec $usec]
}
# Sort tuples by time per benchmark, and transpose into
# columns. Add the header and footer lines.
lappend sh Shell =====
lappend ts Benchsuite ==========
lappend nb Benchmarks ==========
lappend ns Seconds =======
lappend us uSec/Bench ==========
foreach item [lsort -index 4 -decreasing $tmp] {
lassign $item shell suite nbench sec usec
lappend sh $shell
lappend ts $suite
lappend nb $nbench
lappend ns $sec
lappend us $usec
}
lappend sh =====
lappend ts ==========
lappend nb ==========
lappend ns =======
lappend us ==========
# Print the columns, each padded for vertical alignment.
lappend lines \nTimings...
foreach \
shell [strutil padr $sh] \
suite [strutil padr $ts] \
nbench [strutil padr $nb] \
sec [strutil padr $ns] \
usec [strutil padr $us] {
lappend lines "$shell $suite $nbench $sec $usec"
}
return [join $lines \n]
}
proc ::kettle::Bench::FormatResults {state} {
# Extract data ...
set results [dict get $state results]
# results = dict (key -> list(time))
# key = list (shell ver benchfile description)
# 0 1 2 3
# [no round information, implied in the list of results]
set shell n/a
set ver n/a
# Sort by description, re-package into tuples.
set tmp {}
foreach k [lsort -dict -index 3 [dict keys $results]] {
lassign $k shell ver benchfile description
set times [dict get $results $k]
set times [Collate_[option get --collate] $times]
lappend tmp [list $description $times]
}
# generate a format suitable for Tcllib sak bench/* commands.
stream to sak {# -*- tcl -*- bench/csv}
stream to sak {1}
stream to sak {1,$ver,$shell}
foreach item $tmp {
lassign $item d t
stream to sak {[incr count],$d,$t}
}
# Transpose into columns. Add the header and footer lines.
lappend ds Description ===========
lappend ts Time ====
foreach item $tmp {
lassign $item d t
lappend ds $d
lappend ts $t
}
lappend ds =========== Description
lappend ts ==== Time
# Print the columns, each padded for vertical alignment.
lappend lines \nResults...
foreach \
d [strutil padr $ds] \
t [strutil padr $ts] {
lappend lines "$d $t"
}
return \t[join $lines \n\t]
}
proc ::kettle::Bench::Collate_min {times} {
foreach v [lassign $times min] {
# TODO: skip non-numeric times
if {$v >= $min} continue
set min $v
}
return $min
}
proc ::kettle::Bench::Collate_max {times} {
foreach v [lassign $times max] {
# TODO: skip non-numeric times
if {$v <= $max} continue
set max $v
}
return $max
}
proc ::kettle::Bench::Collate_avg {times} {
set total 0.0
foreach v $times {
# TODO: skip non-numeric times
set total [expr {$total + $v}]
incr n
}
return [expr { $total / $n }]
}
proc ::kettle::Bench::ProcessLine {line} {
# Counters and other state in the calling environment.
upvar 1 state state
set line [string trimright $line]
if {![string match {@@ Progress *} $line]} {
stream term full $line
stream to log {$line}
}
set rline $line
set line [string trim $line]
if {[string equal $line ""]} return
# Recognize various parts written by the sub-shell and act on
# them. If a line is recognized and acted upon the remaining
# matchers are _not_ executed.
Host;Platform;Cwd;Shell;Tcl
Start;End;Benchmark
Support;Benching
CaptureStackStart
CaptureStack
BenchLog;BenchSkipped;BenchStart;BenchTrack;BenchResult
Aborted
AbortCause
Misc
# Unknown lines are simply shown (disturbing the animation, good
# for this situation, actually), also saved for review.
stream term compact !$line
stream to unprocessed {$line}
return
}
# # ## ### ##### ######## ############# #####################
proc ::kettle::Bench::InitState {} {
upvar 1 state state
# The counters are all updated in ProcessLine.
# The status may change to 'fail' in ProcessLine.
set state {
cerrors 0
status ok
host {}
platform {}
cwd {}
shell {}
file {}
bench {}
start {}
times {}
results {}
suite/status ok
cap/state none
cap/stack off
}
return
}
proc ::kettle::Bench::Host {} {
upvar 1 line line state state
if {![regexp "^@@ Host (.*)$" $line -> host]} return
#stream aextend $host
#stream term compact "Host $host"
dict set state host $host
# FUTURE: Write bench results to a storage back end for analysis.
return -code return
}
proc ::kettle::Bench::Platform {} {
upvar 1 line line state state
if {![regexp "^@@ Platform (.*)$" $line -> platform]} return
#stream term compact "Platform $platform"
dict set state platform $platform
#stream aextend ($platform)
return -code return
}
proc ::kettle::Bench::Cwd {} {
upvar 1 line line state state
if {![regexp "^@@ BenchCWD (.*)$" $line -> cwd]} return
#stream term compact "Cwd [path relativecwd $cwd]"
dict set state cwd $cwd
return -code return
}
proc ::kettle::Bench::Shell {} {
upvar 1 line line state state
if {![regexp "^@@ Shell (.*)$" $line -> shell]} return
#stream term compact "Shell $shell"
dict set state shell $shell
#stream aextend [file tail $shell]
return -code return
}
proc ::kettle::Bench::Tcl {} {
upvar 1 line line state state
if {![regexp "^@@ Tcl (.*)$" $line -> tcl]} return
#stream term compact "Tcl $tcl"
dict set state tcl $tcl
stream aextend "\[$tcl\] "
return -code return
}
proc ::kettle::Bench::Misc {} {
upvar 1 line line state state
if {[string match "@@ BenchDir*" $line]} {return -code return}
if {[string match "@@ LocalDir*" $line]} {return -code return}
if {[string match "@@ Match*" $line]} {return -code return}
return
}
proc ::kettle::Bench::Start {} {
upvar 1 line line state state
if {![regexp "^@@ Start (.*)$" $line -> start]} return
#stream term compact "Start [clock format $start]"
dict set state start $start
dict set state benchnum 0
dict set state benchskip 0
return -code return
}
proc ::kettle::Bench::End {} {
upvar 1 line line state state
if {![regexp "^@@ End (.*)$" $line -> end]} return
set start [dict get $state start]
set shell [dict get $state shell]
set file [dict get $state file]
set num [dict get $state benchnum]
set skip [dict get $state benchskip]
set err [dict get $state cerrors]
stream awrite "~~ $num SKIP $skip"
if {$err} {
stream aclose "~~ [io mred ERR] $num SKIP $skip"
} else {
stream aclose "~~ OK $num SKIP $skip"
}
#stream term compact "Started [clock format $start]"
#stream term compact "End [clock format $end]"
set delta [expr {$end - $start}]
if {$num == 0} {
set score $delta
} else {
# Get average number of microseconds per test.
set score [expr {int(($delta/double($num))*1000000)}]
}
set key [list $shell $file]
dict lappend state times $key [list $num $delta $score]
stream to timings {[list TIME $key $num $delta $score]}
return -code return
}
proc ::kettle::Bench::Benchmark {} {
upvar 1 line line state state ; variable xfile
if {![regexp "^@@ Benchmark (.*)$" $line -> file]} return
#stream term compact "Benchmark $file"
dict set state file $file
# map from full path to short, and padded for alignment.
set padded [dict get $state fmap [file tail $file]]
stream aextend "$padded "
return -code return
}
proc ::kettle::Bench::Support {} {
upvar 1 line line state state
#stream awrite "S $package" /when caught
#if {[regexp "^SYSTEM - (.*)$" $line -> package]} {stream term compact "Ss $package";return -code return}
#if {[regexp "^LOCAL - (.*)$" $line -> package]} {stream term compact "Sl $package";return -code return}
if {[regexp "^SYSTEM - (.*)$" $line -> package]} {return -code return}
if {[regexp "^LOCAL - (.*)$" $line -> package]} {return -code return}
return
}
proc ::kettle::Bench::Benching {} {
upvar 1 line line state state
#stream awrite "T $package" /when caught
#if {[regexp "^SYSTEM % (.*)$" $line -> package]} {stream term compact "Bs $package";return -code return}
#if {[regexp "^LOCAL % (.*)$" $line -> package]} {stream term compact "Bl $package";return -code return}
if {[regexp "^SYSTEM % (.*)$" $line -> package]} {return -code return}
if {[regexp "^LOCAL % (.*)$" $line -> package]} {return -code return}
return
}
proc ::kettle::Bench::BenchLog {} {
upvar 1 line line state state
if {![string match {@@ Feedback *} $line]} return
# Ignore unstructured feedback.
return -code return
}
proc ::kettle::Bench::BenchSkipped {} {
upvar 1 line line state state
if {![regexp "^@@ Skipped (.*)$" $line -> data]} return
lassign [lindex $data 0] description
dict incr state benchskip
dict set state bench {}
stream awrite "SKIP $description"
return -code return
}
proc ::kettle::Bench::BenchStart {} {
upvar 1 line line state state
if {![regexp "^@@ StartBench (.*)$" $line -> data]} return
lassign [lindex $data 0] description iter
dict set state bench $description
dict incr state benchnum
set w [string length $iter]
dict set state witer $w
dict set state iter $iter
stream awrite "\[[format %${w}s {}]\] $description"
return -code return
}
proc ::kettle::Bench::BenchTrack {} {
upvar 1 line line state state
if {![regexp "^@@ Progress (.*)$" $line -> data]} return
lassign [lindex $data 0] description at
set w [dict get $state witer]
stream awrite "\[[format %${w}s $at]\] $description"
return -code return
}
proc ::kettle::Bench::BenchResult {} {
upvar 1 line line state state
if {![regexp "^@@ Result (.*)$" $line -> data]} return
lassign [lindex $data 0] description time
#stream awrite "$description = $time"
set sh [dict get $state shell]
set ver [dict get $state tcl]
set file [dict get $state file]
set round [dict get $state round]
set row [list $sh $ver $file $round $description $time]
stream to results {"[join $row {","}]"}
set key [list $sh $ver $file $description]
dict update state results r {
dict lappend r $key $time
}
dict set state bench {}
dict set state witer {}
return -code return
}
proc ::kettle::Bench::CaptureStackStart {} {
upvar 1 line line state state
if {![string match {@+*} $line]} return
dict set state cap/stack on
dict set state stack {}
dict set state suite/status error
dict incr state cerrors
stream aextend "[io mred {Caught Error}] "
return -code return
}
proc ::kettle::Bench::CaptureStack {} {
upvar 1 state state
if {![dict get $state cap/stack]} return
upvar 1 line line
if {![string match {@-*} $line]} {
dict append state stack [string range $line 2 end] \n
return -code return
}
if {[stream active]} {
stream aextend ([io mblue {Stacktrace saved}])
set file [lindex [dict get $state file] end]
set stack [dict get $state stack]
stream to stacktrace {$file StackTrace}
stream to stacktrace ========================================
stream to stacktrace {$stack}
stream to stacktrace ========================================\n\n
} else {
stream aextend "([io mred {Stacktrace not saved}]. [io mblue {Use --log}])"
}
dict set state cap/stack off
dict unset state stack
stream aclose ""
return -code return
}
proc ::kettle::Bench::Aborted {} {
upvar 1 line line state state
if {![string match {Aborting the benchmarks found *} $line]} return
# Ignore aborted status if we already have it, or some other error
# status (like error, or fail). These are more important to show.
if {[dict get $state suite/status] eq "ok"} {
dict set state suite/status aborted
}
stream aextend "[io mred Aborted:] "
return -code return
}
proc ::kettle::Bench::AbortCause {} {
upvar 1 line line state state
if {
![string match {Requir*} $line] &&
![string match {Error in *} $line]
} return ; # {}
stream aclose $line
return -code return
}
# # ## ### ##### ######## ############# #####################
return