diff --git a/scripts/run_benchmarks.py b/scripts/run_benchmarks.py index 1a0c9a2b4..0214ecd27 100755 --- a/scripts/run_benchmarks.py +++ b/scripts/run_benchmarks.py @@ -8,6 +8,7 @@ import sys import os import subprocess +import tabulate from typing import Literal from pathlib import Path @@ -15,6 +16,7 @@ sys.path.insert(0, str(topdir)) import test.regression.benchmark # noqa: E402 +from test.regression.benchmark import BenchmarkResult # noqa: E402 from test.regression.pysim import PySimulation # noqa: E402 @@ -58,6 +60,9 @@ def run_benchmarks_with_cocotb(benchmarks: list[str], traces: bool) -> bool: test_cases = ",".join(benchmarks) arglist += [f"TESTCASE={test_cases}"] + verilog_code = topdir.joinpath("core.v") + arglist += [f"VERILOG_SOURCES={verilog_code}"] + if traces: arglist += ["TRACES=1"] @@ -100,6 +105,38 @@ def run_benchmarks(benchmarks: list[str], backend: Literal["pysim", "cocotb"], t return False +def build_result_table(results: dict[str, BenchmarkResult]) -> str: + if len(results) == 0: + return "" + + header = ["Testbench name", "Cycles", "Instructions", "IPC"] + + # First fetch all metrics names to build the header + result = next(iter(results.values())) + for metric_name in sorted(result.metric_values.keys()): + regs = result.metric_values[metric_name] + for reg_name in regs: + header.append(f"{metric_name}/{reg_name}") + + columns = [header] + for benchmark_name, result in results.items(): + ipc = result.instr / result.cycles + + column = [benchmark_name, result.cycles, result.instr, ipc] + + for metric_name in sorted(result.metric_values.keys()): + regs = result.metric_values[metric_name] + for reg_name in regs: + column.append(regs[reg_name]) + + columns.append(column) + + # Transpose the table, as the library expects to get a list of rows (and we have a list of columns). + rows = [list(i) for i in zip(*columns)] + + return tabulate.tabulate(rows, headers="firstrow", tablefmt="simple_outline") + + def main(): parser = argparse.ArgumentParser() parser.add_argument("-l", "--list", action="store_true", help="List all benchmarks") @@ -136,22 +173,23 @@ def main(): print("Benchmark execution failed") sys.exit(1) - results = [] ipcs = [] + + results: dict[str, BenchmarkResult] = {} + for name in benchmarks: with open(f"{str(test.regression.benchmark.results_dir)}/{name}.json", "r") as f: - res = json.load(f) + result = BenchmarkResult.from_json(f.read()) # type: ignore - ipc = res["instr"] / res["cycle"] - ipcs.append(ipc) + results[name] = result - results.append({"name": name, "unit": "Instructions Per Cycle", "value": ipc}) - print(f"Benchmark '{name}': cycles={res['cycle']}, instructions={res['instr']} ipc={ipc:.4f}") + ipc = result.instr / result.cycles + ipcs.append({"name": name, "unit": "Instructions Per Cycle", "value": ipc}) - print(f"Average ipc={sum(ipcs)/len(ipcs):.4f}") + print(build_result_table(results)) with open(args.output, "w") as benchmark_file: - json.dump(results, benchmark_file, indent=4) + json.dump(ipcs, benchmark_file, indent=4) if __name__ == "__main__": diff --git a/scripts/run_signature.py b/scripts/run_signature.py index 7d45bae7f..6fccb031b 100755 --- a/scripts/run_signature.py +++ b/scripts/run_signature.py @@ -31,6 +31,9 @@ def run_with_cocotb(test_name: str, traces: bool, output: str) -> bool: arglist += [f"TESTNAME={test_name}"] arglist += [f"OUTPUT={output}"] + verilog_code = f"{parent}/core.v" + arglist += [f"VERILOG_SOURCES={verilog_code}"] + if traces: arglist += ["TRACES=1"] diff --git a/scripts/run_tests.py b/scripts/run_tests.py index 264daa707..a3d4edd70 100755 --- a/scripts/run_tests.py +++ b/scripts/run_tests.py @@ -60,6 +60,9 @@ def run_regressions_with_cocotb(tests: list[str], traces: bool) -> bool: test_cases = ",".join(tests) arglist += [f"TESTCASE={test_cases}"] + verilog_code = topdir.joinpath("core.v") + arglist += [f"VERILOG_SOURCES={verilog_code}"] + if traces: arglist += ["TRACES=1"] diff --git a/test/regression/benchmark.py b/test/regression/benchmark.py index 66492de44..8f9cb56c2 100644 --- a/test/regression/benchmark.py +++ b/test/regression/benchmark.py @@ -1,5 +1,6 @@ import os -import json +from dataclasses import dataclass +from dataclasses_json import dataclass_json from pathlib import Path from .memory import * @@ -10,6 +11,26 @@ results_dir = test_dir.joinpath("regression/benchmark_results") +@dataclass_json +@dataclass +class BenchmarkResult: + """Result of running a single benchmark. + + Attributes + ---------- + cycles: int + A number of cycles the benchmark took. + instr: int + A count of instructions commited during the benchmark. + metric_values: dict[str, dict[str, int]] + Values of the core metrics taken at the end of the simulation. + """ + + cycles: int + instr: int + metric_values: dict[str, dict[str, int]] + + class MMIO(RandomAccessMemory): """Memory Mapped IO. @@ -54,16 +75,16 @@ async def run_benchmark(sim_backend: SimulationBackend, benchmark_name: str): mem_model = CoreMemoryModel(mem_segments) - success = await sim_backend.run(mem_model, timeout_cycles=2000000) + result = await sim_backend.run(mem_model, timeout_cycles=2000000) - if not success: + if not result.success: raise RuntimeError("Simulation timed out") if mmio.return_code() != 0: raise RuntimeError("The benchmark exited with a non-zero return code: %d" % mmio.return_code()) - results = {"cycle": mmio.cycle_cnt(), "instr": mmio.instr_cnt()} + bench_results = BenchmarkResult(cycles=mmio.cycle_cnt(), instr=mmio.instr_cnt(), metric_values=result.metric_values) os.makedirs(str(results_dir), exist_ok=True) with open(f"{str(results_dir)}/{benchmark_name}.json", "w") as outfile: - json.dump(results, outfile) + outfile.write(bench_results.to_json()) # type: ignore diff --git a/test/regression/cocotb.py b/test/regression/cocotb.py index e59bcef03..77cb0c93e 100644 --- a/test/regression/cocotb.py +++ b/test/regression/cocotb.py @@ -9,9 +9,10 @@ from cocotb.handle import ModifiableObject from cocotb.triggers import FallingEdge, Event, with_timeout from cocotb_bus.bus import Bus +from cocotb.result import SimTimeoutError from .memory import * -from .common import SimulationBackend +from .common import SimulationBackend, SimulationExecutionResult @dataclass @@ -136,7 +137,7 @@ def __init__(self, dut): self.dut = dut self.finish_event = Event() - async def run(self, mem_model: CoreMemoryModel, timeout_cycles: int = 5000) -> bool: + async def run(self, mem_model: CoreMemoryModel, timeout_cycles: int = 5000) -> SimulationExecutionResult: clk = Clock(self.dut.clk, 1, "ns") cocotb.start_soon(clk.start()) @@ -150,9 +151,13 @@ async def run(self, mem_model: CoreMemoryModel, timeout_cycles: int = 5000) -> b data_wb = WishboneSlave(self.dut, "wb_data", self.dut.clk, mem_model, is_instr_bus=False) cocotb.start_soon(data_wb.start()) - res = await with_timeout(self.finish_event.wait(), timeout_cycles, "ns") + success = True + try: + await with_timeout(self.finish_event.wait(), timeout_cycles, "ns") + except SimTimeoutError: + success = False - return res is not None + return SimulationExecutionResult(success) def stop(self): self.finish_event.set() diff --git a/test/regression/cocotb/benchmark.Makefile b/test/regression/cocotb/benchmark.Makefile index 015b79d6e..5c89d3785 100644 --- a/test/regression/cocotb/benchmark.Makefile +++ b/test/regression/cocotb/benchmark.Makefile @@ -4,9 +4,6 @@ SIM ?= verilator TOPLEVEL_LANG ?= verilog -VERILOG_SOURCES += $(PWD)/../../../core.v -# use VHDL_SOURCES for VHDL files - # TOPLEVEL is the name of the toplevel module in your Verilog or VHDL file TOPLEVEL = top diff --git a/test/regression/cocotb/signature.Makefile b/test/regression/cocotb/signature.Makefile index e7da43e25..74b803083 100644 --- a/test/regression/cocotb/signature.Makefile +++ b/test/regression/cocotb/signature.Makefile @@ -4,9 +4,6 @@ SIM ?= verilator TOPLEVEL_LANG ?= verilog -VERILOG_SOURCES += $(PWD)/../../../core.v -# use VHDL_SOURCES for VHDL files - # TOPLEVEL is the name of the toplevel module in your Verilog or VHDL file TOPLEVEL = top diff --git a/test/regression/cocotb/test.Makefile b/test/regression/cocotb/test.Makefile index e81e31804..bda120bc1 100644 --- a/test/regression/cocotb/test.Makefile +++ b/test/regression/cocotb/test.Makefile @@ -4,9 +4,6 @@ SIM ?= verilator TOPLEVEL_LANG ?= verilog -VERILOG_SOURCES += $(PWD)/../../../core.v -# use VHDL_SOURCES for VHDL files - # TOPLEVEL is the name of the toplevel module in your Verilog or VHDL file TOPLEVEL = top diff --git a/test/regression/common.py b/test/regression/common.py index bb61e4613..9124f5711 100644 --- a/test/regression/common.py +++ b/test/regression/common.py @@ -1,11 +1,28 @@ from abc import ABC, abstractmethod - +from dataclasses import dataclass, field from .memory import CoreMemoryModel +@dataclass +class SimulationExecutionResult: + """Information about the result of the simulation. + + Attributes + ---------- + success: bool + Whether the simulation finished successfully, i.e. no timeouts, + no exceptions, no failed assertions etc. + metric_values: dict[str, dict[str, int]] + Values of the core metrics taken at the end of the simulation. + """ + + success: bool + metric_values: dict[str, dict[str, int]] = field(default_factory=dict) + + class SimulationBackend(ABC): @abstractmethod - async def run(self, mem_model: CoreMemoryModel, timeout_cycles: int) -> bool: + async def run(self, mem_model: CoreMemoryModel, timeout_cycles: int) -> SimulationExecutionResult: raise NotImplementedError @abstractmethod diff --git a/test/regression/pysim.py b/test/regression/pysim.py index aedf32f60..99c57e5d9 100644 --- a/test/regression/pysim.py +++ b/test/regression/pysim.py @@ -2,7 +2,7 @@ from amaranth.utils import log2_int from .memory import * -from .common import SimulationBackend +from .common import SimulationBackend, SimulationExecutionResult from ..common import SimpleTestCircuit, PysimSimulator from ..peripherals.test_wishbone import WishboneInterfaceWrapper @@ -89,7 +89,7 @@ def f(): return f - async def run(self, mem_model: CoreMemoryModel, timeout_cycles: int = 5000) -> bool: + async def run(self, mem_model: CoreMemoryModel, timeout_cycles: int = 5000) -> SimulationExecutionResult: wb_instr_bus = WishboneBus(self.gp.wb_params) wb_data_bus = WishboneBus(self.gp.wb_params) core = Core(gen_params=self.gp, wb_instr_bus=wb_instr_bus, wb_data_bus=wb_data_bus) @@ -106,12 +106,12 @@ async def run(self, mem_model: CoreMemoryModel, timeout_cycles: int = 5000) -> b sim.add_sync_process(self._wishbone_slave(mem_model, wb_instr_ctrl, is_instr_bus=True)) sim.add_sync_process(self._wishbone_slave(mem_model, wb_data_ctrl, is_instr_bus=False)) sim.add_sync_process(self._waiter()) - res = sim.run() + success = sim.run() if self.verbose: print(f"Simulation finished in {self.cycle_cnt} cycles") - return res + return SimulationExecutionResult(success) def stop(self): self.running = False diff --git a/test/regression/test.py b/test/regression/test.py index cbe8067cd..daf0ff3b7 100644 --- a/test/regression/test.py +++ b/test/regression/test.py @@ -42,9 +42,9 @@ async def run_test(sim_backend: SimulationBackend, test_name: str): mem_model = CoreMemoryModel(mem_segments) - success = await sim_backend.run(mem_model, timeout_cycles=5000) + result = await sim_backend.run(mem_model, timeout_cycles=5000) - if not success: + if not result.success: raise RuntimeError("Simulation timed out") if mmio.failed_test: diff --git a/transactron/core.py b/transactron/core.py index 0fd6fc29e..99c4e6734 100644 --- a/transactron/core.py +++ b/transactron/core.py @@ -1370,7 +1370,7 @@ def __call__( .. code-block:: python m = Module() - with Transaction.body(m): + with Transaction().body(m): ret = my_sum_method(m, arg1=2, arg2=3) Alternative syntax: @@ -1378,7 +1378,7 @@ def __call__( .. highlight:: python .. code-block:: python - with Transaction.body(m): + with Transaction().body(m): ret = my_sum_method(m, {"arg1": 2, "arg2": 3}) """ arg_rec = Record.like(self.data_in)