Skip to content

Commit

Permalink
Refactor & improvements of regression scripts (#578)
Browse files Browse the repository at this point in the history
  • Loading branch information
Jakub Urbańczyk authored Feb 1, 2024
1 parent 6fd151f commit 26c9d92
Show file tree
Hide file tree
Showing 12 changed files with 114 additions and 36 deletions.
54 changes: 46 additions & 8 deletions scripts/run_benchmarks.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,13 +8,15 @@
import sys
import os
import subprocess
import tabulate
from typing import Literal
from pathlib import Path

topdir = Path(__file__).parent.parent
sys.path.insert(0, str(topdir))

import test.regression.benchmark # noqa: E402
from test.regression.benchmark import BenchmarkResult # noqa: E402
from test.regression.pysim import PySimulation # noqa: E402


Expand Down Expand Up @@ -58,6 +60,9 @@ def run_benchmarks_with_cocotb(benchmarks: list[str], traces: bool) -> bool:
test_cases = ",".join(benchmarks)
arglist += [f"TESTCASE={test_cases}"]

verilog_code = topdir.joinpath("core.v")
arglist += [f"VERILOG_SOURCES={verilog_code}"]

if traces:
arglist += ["TRACES=1"]

Expand Down Expand Up @@ -100,6 +105,38 @@ def run_benchmarks(benchmarks: list[str], backend: Literal["pysim", "cocotb"], t
return False


def build_result_table(results: dict[str, BenchmarkResult]) -> str:
if len(results) == 0:
return ""

header = ["Testbench name", "Cycles", "Instructions", "IPC"]

# First fetch all metrics names to build the header
result = next(iter(results.values()))
for metric_name in sorted(result.metric_values.keys()):
regs = result.metric_values[metric_name]
for reg_name in regs:
header.append(f"{metric_name}/{reg_name}")

columns = [header]
for benchmark_name, result in results.items():
ipc = result.instr / result.cycles

column = [benchmark_name, result.cycles, result.instr, ipc]

for metric_name in sorted(result.metric_values.keys()):
regs = result.metric_values[metric_name]
for reg_name in regs:
column.append(regs[reg_name])

columns.append(column)

# Transpose the table, as the library expects to get a list of rows (and we have a list of columns).
rows = [list(i) for i in zip(*columns)]

return tabulate.tabulate(rows, headers="firstrow", tablefmt="simple_outline")


def main():
parser = argparse.ArgumentParser()
parser.add_argument("-l", "--list", action="store_true", help="List all benchmarks")
Expand Down Expand Up @@ -136,22 +173,23 @@ def main():
print("Benchmark execution failed")
sys.exit(1)

results = []
ipcs = []

results: dict[str, BenchmarkResult] = {}

for name in benchmarks:
with open(f"{str(test.regression.benchmark.results_dir)}/{name}.json", "r") as f:
res = json.load(f)
result = BenchmarkResult.from_json(f.read()) # type: ignore

ipc = res["instr"] / res["cycle"]
ipcs.append(ipc)
results[name] = result

results.append({"name": name, "unit": "Instructions Per Cycle", "value": ipc})
print(f"Benchmark '{name}': cycles={res['cycle']}, instructions={res['instr']} ipc={ipc:.4f}")
ipc = result.instr / result.cycles
ipcs.append({"name": name, "unit": "Instructions Per Cycle", "value": ipc})

print(f"Average ipc={sum(ipcs)/len(ipcs):.4f}")
print(build_result_table(results))

with open(args.output, "w") as benchmark_file:
json.dump(results, benchmark_file, indent=4)
json.dump(ipcs, benchmark_file, indent=4)


if __name__ == "__main__":
Expand Down
3 changes: 3 additions & 0 deletions scripts/run_signature.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,9 @@ def run_with_cocotb(test_name: str, traces: bool, output: str) -> bool:
arglist += [f"TESTNAME={test_name}"]
arglist += [f"OUTPUT={output}"]

verilog_code = f"{parent}/core.v"
arglist += [f"VERILOG_SOURCES={verilog_code}"]

if traces:
arglist += ["TRACES=1"]

Expand Down
3 changes: 3 additions & 0 deletions scripts/run_tests.py
Original file line number Diff line number Diff line change
Expand Up @@ -60,6 +60,9 @@ def run_regressions_with_cocotb(tests: list[str], traces: bool) -> bool:
test_cases = ",".join(tests)
arglist += [f"TESTCASE={test_cases}"]

verilog_code = topdir.joinpath("core.v")
arglist += [f"VERILOG_SOURCES={verilog_code}"]

if traces:
arglist += ["TRACES=1"]

Expand Down
31 changes: 26 additions & 5 deletions test/regression/benchmark.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
import os
import json
from dataclasses import dataclass
from dataclasses_json import dataclass_json
from pathlib import Path

from .memory import *
Expand All @@ -10,6 +11,26 @@
results_dir = test_dir.joinpath("regression/benchmark_results")


@dataclass_json
@dataclass
class BenchmarkResult:
"""Result of running a single benchmark.
Attributes
----------
cycles: int
A number of cycles the benchmark took.
instr: int
A count of instructions commited during the benchmark.
metric_values: dict[str, dict[str, int]]
Values of the core metrics taken at the end of the simulation.
"""

cycles: int
instr: int
metric_values: dict[str, dict[str, int]]


class MMIO(RandomAccessMemory):
"""Memory Mapped IO.
Expand Down Expand Up @@ -54,16 +75,16 @@ async def run_benchmark(sim_backend: SimulationBackend, benchmark_name: str):

mem_model = CoreMemoryModel(mem_segments)

success = await sim_backend.run(mem_model, timeout_cycles=2000000)
result = await sim_backend.run(mem_model, timeout_cycles=2000000)

if not success:
if not result.success:
raise RuntimeError("Simulation timed out")

if mmio.return_code() != 0:
raise RuntimeError("The benchmark exited with a non-zero return code: %d" % mmio.return_code())

results = {"cycle": mmio.cycle_cnt(), "instr": mmio.instr_cnt()}
bench_results = BenchmarkResult(cycles=mmio.cycle_cnt(), instr=mmio.instr_cnt(), metric_values=result.metric_values)

os.makedirs(str(results_dir), exist_ok=True)
with open(f"{str(results_dir)}/{benchmark_name}.json", "w") as outfile:
json.dump(results, outfile)
outfile.write(bench_results.to_json()) # type: ignore
13 changes: 9 additions & 4 deletions test/regression/cocotb.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,9 +9,10 @@
from cocotb.handle import ModifiableObject
from cocotb.triggers import FallingEdge, Event, with_timeout
from cocotb_bus.bus import Bus
from cocotb.result import SimTimeoutError

from .memory import *
from .common import SimulationBackend
from .common import SimulationBackend, SimulationExecutionResult


@dataclass
Expand Down Expand Up @@ -136,7 +137,7 @@ def __init__(self, dut):
self.dut = dut
self.finish_event = Event()

async def run(self, mem_model: CoreMemoryModel, timeout_cycles: int = 5000) -> bool:
async def run(self, mem_model: CoreMemoryModel, timeout_cycles: int = 5000) -> SimulationExecutionResult:
clk = Clock(self.dut.clk, 1, "ns")
cocotb.start_soon(clk.start())

Expand All @@ -150,9 +151,13 @@ async def run(self, mem_model: CoreMemoryModel, timeout_cycles: int = 5000) -> b
data_wb = WishboneSlave(self.dut, "wb_data", self.dut.clk, mem_model, is_instr_bus=False)
cocotb.start_soon(data_wb.start())

res = await with_timeout(self.finish_event.wait(), timeout_cycles, "ns")
success = True
try:
await with_timeout(self.finish_event.wait(), timeout_cycles, "ns")
except SimTimeoutError:
success = False

return res is not None
return SimulationExecutionResult(success)

def stop(self):
self.finish_event.set()
Expand Down
3 changes: 0 additions & 3 deletions test/regression/cocotb/benchmark.Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -4,9 +4,6 @@
SIM ?= verilator
TOPLEVEL_LANG ?= verilog

VERILOG_SOURCES += $(PWD)/../../../core.v
# use VHDL_SOURCES for VHDL files

# TOPLEVEL is the name of the toplevel module in your Verilog or VHDL file
TOPLEVEL = top

Expand Down
3 changes: 0 additions & 3 deletions test/regression/cocotb/signature.Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -4,9 +4,6 @@
SIM ?= verilator
TOPLEVEL_LANG ?= verilog

VERILOG_SOURCES += $(PWD)/../../../core.v
# use VHDL_SOURCES for VHDL files

# TOPLEVEL is the name of the toplevel module in your Verilog or VHDL file
TOPLEVEL = top

Expand Down
3 changes: 0 additions & 3 deletions test/regression/cocotb/test.Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -4,9 +4,6 @@
SIM ?= verilator
TOPLEVEL_LANG ?= verilog

VERILOG_SOURCES += $(PWD)/../../../core.v
# use VHDL_SOURCES for VHDL files

# TOPLEVEL is the name of the toplevel module in your Verilog or VHDL file
TOPLEVEL = top

Expand Down
21 changes: 19 additions & 2 deletions test/regression/common.py
Original file line number Diff line number Diff line change
@@ -1,11 +1,28 @@
from abc import ABC, abstractmethod

from dataclasses import dataclass, field
from .memory import CoreMemoryModel


@dataclass
class SimulationExecutionResult:
"""Information about the result of the simulation.
Attributes
----------
success: bool
Whether the simulation finished successfully, i.e. no timeouts,
no exceptions, no failed assertions etc.
metric_values: dict[str, dict[str, int]]
Values of the core metrics taken at the end of the simulation.
"""

success: bool
metric_values: dict[str, dict[str, int]] = field(default_factory=dict)


class SimulationBackend(ABC):
@abstractmethod
async def run(self, mem_model: CoreMemoryModel, timeout_cycles: int) -> bool:
async def run(self, mem_model: CoreMemoryModel, timeout_cycles: int) -> SimulationExecutionResult:
raise NotImplementedError

@abstractmethod
Expand Down
8 changes: 4 additions & 4 deletions test/regression/pysim.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
from amaranth.utils import log2_int

from .memory import *
from .common import SimulationBackend
from .common import SimulationBackend, SimulationExecutionResult

from ..common import SimpleTestCircuit, PysimSimulator
from ..peripherals.test_wishbone import WishboneInterfaceWrapper
Expand Down Expand Up @@ -89,7 +89,7 @@ def f():

return f

async def run(self, mem_model: CoreMemoryModel, timeout_cycles: int = 5000) -> bool:
async def run(self, mem_model: CoreMemoryModel, timeout_cycles: int = 5000) -> SimulationExecutionResult:
wb_instr_bus = WishboneBus(self.gp.wb_params)
wb_data_bus = WishboneBus(self.gp.wb_params)
core = Core(gen_params=self.gp, wb_instr_bus=wb_instr_bus, wb_data_bus=wb_data_bus)
Expand All @@ -106,12 +106,12 @@ async def run(self, mem_model: CoreMemoryModel, timeout_cycles: int = 5000) -> b
sim.add_sync_process(self._wishbone_slave(mem_model, wb_instr_ctrl, is_instr_bus=True))
sim.add_sync_process(self._wishbone_slave(mem_model, wb_data_ctrl, is_instr_bus=False))
sim.add_sync_process(self._waiter())
res = sim.run()
success = sim.run()

if self.verbose:
print(f"Simulation finished in {self.cycle_cnt} cycles")

return res
return SimulationExecutionResult(success)

def stop(self):
self.running = False
4 changes: 2 additions & 2 deletions test/regression/test.py
Original file line number Diff line number Diff line change
Expand Up @@ -42,9 +42,9 @@ async def run_test(sim_backend: SimulationBackend, test_name: str):

mem_model = CoreMemoryModel(mem_segments)

success = await sim_backend.run(mem_model, timeout_cycles=5000)
result = await sim_backend.run(mem_model, timeout_cycles=5000)

if not success:
if not result.success:
raise RuntimeError("Simulation timed out")

if mmio.failed_test:
Expand Down
4 changes: 2 additions & 2 deletions transactron/core.py
Original file line number Diff line number Diff line change
Expand Up @@ -1370,15 +1370,15 @@ def __call__(
.. code-block:: python
m = Module()
with Transaction.body(m):
with Transaction().body(m):
ret = my_sum_method(m, arg1=2, arg2=3)
Alternative syntax:
.. highlight:: python
.. code-block:: python
with Transaction.body(m):
with Transaction().body(m):
ret = my_sum_method(m, {"arg1": 2, "arg2": 3})
"""
arg_rec = Record.like(self.data_in)
Expand Down

0 comments on commit 26c9d92

Please sign in to comment.