forked from jrr6/browncs1260-final-benchmarks
-
Notifications
You must be signed in to change notification settings - Fork 0
/
bench.py
94 lines (81 loc) · 2.78 KB
/
bench.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
#!/usr/bin/env python3
import os
import sys
import argparse
import json
import subprocess
import time
import csv
N = 10
def input_file(benchmark):
pre, _ = os.path.splitext(benchmark)
return pre + ".in"
def bench(passes: "list[str]"):
passes = " ".join("-p {}".format(p) for p in passes)
output_dir = "compiled_benchmarks"
benchmarks = [f for f in os.listdir("benchmarks") if f.endswith(".lisp")]
# First, compile all benchmarks
for benchmark in benchmarks:
print(f'compiling: {benchmark}')
compile_cmd = "dune exec --root . bin/compile.exe -- {} {} {}".format(
passes,
os.path.join(os.path.relpath("benchmarks", start=".."), benchmark),
os.path.relpath(output_dir, start=".."),
)
subprocess.run(
compile_cmd,
shell=True,
cwd="..",
check=True,
)
# Then, run each benchmark N times and record the results
results = {}
for benchmark in benchmarks:
print(f'running: {benchmark}')
runs = []
run_cmd = os.path.join("compiled_benchmarks", "{}.exe".format(benchmark))
stdin = ""
input_path = os.path.join("benchmarks", input_file(benchmark))
if os.path.exists(input_path):
stdin = open(input_path).read()
for i in range(N):
start = time.perf_counter()
subprocess.run(
run_cmd,
input=stdin,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True,
)
end = time.perf_counter()
runs.append((end - start) * 10 ** 9)
results[benchmark] = sum(runs) / len(runs)
return results
def main():
if not os.path.exists("benchmarks"):
print(
"Benchmark script should be run from the root of the benchmarks repository"
)
sys.exit(1)
parser = argparse.ArgumentParser(
description="CSCI 1260 final homework benchmark script"
)
parser.add_argument("--config", type=argparse.FileType("r"), default="config.json")
parser.add_argument("--output", type=argparse.FileType("w"), default="results.csv")
args = parser.parse_args()
config = None
try:
config = json.load(args.config)
except json.decoder.JSONDecodeError as e:
print("Error reading config file {}: {}".format(args.config.name, e))
sys.exit(1)
results = {}
for name in config:
results[name] = bench(config[name])
writer = csv.writer(args.output)
for benchmark in sorted(results[name].keys()):
for name in sorted(results.keys()):
writer.writerow((benchmark, name, results[name][benchmark]))
if __name__ == "__main__":
main()