diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index 831efa175..da74c3fea 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -244,7 +244,7 @@ jobs: sudo apt-get install -y binutils-riscv64-unknown-elf - name: Run tests - run: ./scripts/run_tests.py --verbose + run: ./scripts/run_tests.py --verbosity info - name: Check traces and profiles run: ./scripts/run_tests.py -t -p -c 1 TestCore diff --git a/coreblocks/core.py b/coreblocks/core.py index 46f0f6b91..585d79465 100644 --- a/coreblocks/core.py +++ b/coreblocks/core.py @@ -48,8 +48,8 @@ def __init__(self, *, gen_params: GenParams, wb_instr_bus: WishboneBus, wb_data_ self.wb_instr_bus = wb_instr_bus self.wb_data_bus = wb_data_bus - self.wb_master_instr = WishboneMaster(self.gen_params.wb_params) - self.wb_master_data = WishboneMaster(self.gen_params.wb_params) + self.wb_master_instr = WishboneMaster(self.gen_params.wb_params, "instr") + self.wb_master_data = WishboneMaster(self.gen_params.wb_params, "data") self.bus_master_instr_adapter = WishboneMasterAdapter(self.wb_master_instr) self.bus_master_data_adapter = WishboneMasterAdapter(self.wb_master_data) diff --git a/coreblocks/fu/jumpbranch.py b/coreblocks/fu/jumpbranch.py index ae2014e7b..8b4ba52c9 100644 --- a/coreblocks/fu/jumpbranch.py +++ b/coreblocks/fu/jumpbranch.py @@ -7,8 +7,8 @@ from transactron import * from transactron.core import def_method from transactron.lib import * +from transactron.lib import logging from transactron.utils import DependencyManager - from coreblocks.params import * from coreblocks.params.keys import AsyncInterruptInsertSignalKey, BranchVerifyKey from transactron.utils import OneHotSwitch @@ -19,6 +19,9 @@ __all__ = ["JumpBranchFuncUnit", "JumpComponent"] +log = logging.HardwareLogger("backend.fu.jumpbranch") + + class JumpBranchFn(DecoderManager): class Fn(IntFlag): JAL = auto() @@ -209,6 +212,14 @@ def _(arg): with m.If(~is_auipc): self.fifo_branch_resolved.write(m, from_pc=jb.in_pc, next_pc=jump_result, misprediction=misprediction) + log.debug( + m, + True, + "jumping from 0x{:08x} to 0x{:08x}; misprediction: {}", + jb.in_pc, + jump_result, + misprediction, + ) return m diff --git a/coreblocks/peripherals/wishbone.py b/coreblocks/peripherals/wishbone.py index 75f71522a..075a8b5e0 100644 --- a/coreblocks/peripherals/wishbone.py +++ b/coreblocks/peripherals/wishbone.py @@ -10,6 +10,7 @@ from transactron.utils import OneHotSwitchDynamic, assign, RoundRobin from transactron.lib.connectors import Forwarder from transactron.utils.transactron_helpers import make_layout +from transactron.lib import logging class WishboneParameters: @@ -114,6 +115,8 @@ class WishboneMaster(Elaboratable): ---------- wb_params: WishboneParameters Parameters for bus generation. + name: str, optional + Name of this bus. Used for logging. Attributes ---------- @@ -129,7 +132,8 @@ class WishboneMaster(Elaboratable): Returns state of request (error or success) and data (in case of read request) as `result_layout`. """ - def __init__(self, wb_params: WishboneParameters): + def __init__(self, wb_params: WishboneParameters, name: str = ""): + self.name = name self.wb_params = wb_params self.wb_layout = WishboneLayout(wb_params).wb_layout self.wb_master = Record(self.wb_layout) @@ -142,6 +146,11 @@ def __init__(self, wb_params: WishboneParameters): # latched input signals self.txn_req = Signal(self.method_layouts.request_layout) + logger_name = "bus.wishbone" + if name != "": + logger_name += f".{name}" + self.log = logging.HardwareLogger(logger_name) + def elaborate(self, platform): m = TModule() @@ -194,7 +203,17 @@ def FSMWBCycStart(request): # noqa: N802 @def_method(m, self.result) def _(): - return result.read(m) + ret = result.read(m) + + self.log.debug( + m, + True, + "response data=0x{:x} err={}", + ret.data, + ret.err, + ) + + return ret @def_method(m, self.request, ready=request_ready & result.write.ready) def _(arg): @@ -202,6 +221,16 @@ def _(arg): # do WBCycStart state in the same clock cycle FSMWBCycStart(arg) + self.log.debug( + m, + True, + "request addr=0x{:x} data=0x{:x} sel=0x{:x} write={}", + arg.addr, + arg.data, + arg.sel, + arg.we, + ) + result.write.schedule_before(self.request) result.read.schedule_before(self.request) diff --git a/pytest.ini b/pytest.ini index 142b00abe..970b444e5 100644 --- a/pytest.ini +++ b/pytest.ini @@ -6,3 +6,4 @@ norecursedirs = '*.egg', '.*', 'build', 'dist', 'venv', '__traces__', '__pycache filterwarnings = ignore:cannot collect test class 'TestbenchIO':pytest.PytestCollectionWarning ignore:No files were found in testpaths:pytest.PytestConfigWarning: +log_cli=true diff --git a/scripts/run_benchmarks.py b/scripts/run_benchmarks.py index 0dc25098a..e0bbcf90a 100755 --- a/scripts/run_benchmarks.py +++ b/scripts/run_benchmarks.py @@ -74,7 +74,10 @@ def run_benchmarks_with_cocotb(benchmarks: list[str], traces: bool) -> bool: return res.returncode == 0 -def run_benchmarks_with_pysim(benchmarks: list[str], traces: bool, verbose: bool) -> bool: +def run_benchmarks_with_pysim( + benchmarks: list[str], + traces: bool, +) -> bool: suite = unittest.TestSuite() def _gen_test(test_name: str): @@ -82,9 +85,7 @@ def test_fn(): traces_file = None if traces: traces_file = "benchmark." + test_name - asyncio.run( - test.regression.benchmark.run_benchmark(PySimulation(verbose, traces_file=traces_file), test_name) - ) + asyncio.run(test.regression.benchmark.run_benchmark(PySimulation(traces_file=traces_file), test_name)) test_fn.__name__ = test_name test_fn.__qualname__ = test_name @@ -94,17 +95,17 @@ def test_fn(): for test_name in benchmarks: suite.addTest(unittest.FunctionTestCase(_gen_test(test_name))) - runner = unittest.TextTestRunner(verbosity=(2 if verbose else 1)) + runner = unittest.TextTestRunner(verbosity=2) result = runner.run(suite) return result.wasSuccessful() -def run_benchmarks(benchmarks: list[str], backend: Literal["pysim", "cocotb"], traces: bool, verbose: bool) -> bool: +def run_benchmarks(benchmarks: list[str], backend: Literal["pysim", "cocotb"], traces: bool) -> bool: if backend == "cocotb": return run_benchmarks_with_cocotb(benchmarks, traces) elif backend == "pysim": - return run_benchmarks_with_pysim(benchmarks, traces, verbose) + return run_benchmarks_with_pysim(benchmarks, traces) return False @@ -144,8 +145,9 @@ def main(): parser = argparse.ArgumentParser() parser.add_argument("-l", "--list", action="store_true", help="List all benchmarks") parser.add_argument("-t", "--trace", action="store_true", help="Dump waveforms") + parser.add_argument("--log-level", default="WARNING", action="store", help="Level of messages to display.") + parser.add_argument("--log-filter", default=".*", action="store", help="Regexp used to filter out logs.") parser.add_argument("-p", "--profile", action="store_true", help="Write execution profiles") - parser.add_argument("-v", "--verbose", action="store_true", help="Verbose output") parser.add_argument("-b", "--backend", default="cocotb", choices=["cocotb", "pysim"], help="Simulation backend") parser.add_argument( "-o", @@ -164,6 +166,9 @@ def main(): print(name) return + os.environ["__TRANSACTRON_LOG_LEVEL"] = args.log_level + os.environ["__TRANSACTRON_LOG_FILTER"] = args.log_filter + if args.benchmark_name: pattern = re.compile(args.benchmark_name) benchmarks = [name for name in benchmarks if pattern.search(name)] @@ -175,7 +180,7 @@ def main(): if args.profile: os.environ["__TRANSACTRON_PROFILE"] = "1" - success = run_benchmarks(benchmarks, args.backend, args.trace, args.verbose) + success = run_benchmarks(benchmarks, args.backend, args.trace) if not success: print("Benchmark execution failed") sys.exit(1) diff --git a/scripts/run_signature.py b/scripts/run_signature.py index b9b9c1701..2e047f9ae 100755 --- a/scripts/run_signature.py +++ b/scripts/run_signature.py @@ -45,32 +45,31 @@ def run_with_cocotb(test_name: str, traces: bool, output: str) -> bool: return os.path.isfile(output) # completed successfully if signature file was created -def run_with_pysim(test_name: str, traces: bool, verbose: bool, output: str) -> bool: +def run_with_pysim(test_name: str, traces: bool, output: str) -> bool: traces_file = None if traces: traces_file = os.path.basename(test_name) try: - asyncio.run( - test.regression.signature.run_test(PySimulation(verbose, traces_file=traces_file), test_name, output) - ) + asyncio.run(test.regression.signature.run_test(PySimulation(traces_file=traces_file), test_name, output)) except RuntimeError as e: print("RuntimeError:", e) return False return True -def run_test(test: str, backend: Literal["pysim", "cocotb"], traces: bool, verbose: bool, output: str) -> bool: +def run_test(test: str, backend: Literal["pysim", "cocotb"], traces: bool, output: str) -> bool: if backend == "cocotb": return run_with_cocotb(test, traces, output) elif backend == "pysim": - return run_with_pysim(test, traces, verbose, output) + return run_with_pysim(test, traces, output) return False def main(): parser = argparse.ArgumentParser() parser.add_argument("-t", "--trace", action="store_true", help="Dump waveforms") - parser.add_argument("-v", "--verbose", action="store_true", help="Verbose output") + parser.add_argument("--log-level", default="WARNING", action="store", help="Level of messages to display.") + parser.add_argument("--log-filter", default=".*", action="store", help="Regexp used to filter out logs.") parser.add_argument("-b", "--backend", default="pysim", choices=["cocotb", "pysim"], help="Simulation backend") parser.add_argument("-o", "--output", default=None, help="Selects output file to write test signature to") parser.add_argument("path") @@ -79,7 +78,10 @@ def main(): output = args.output if args.output else args.path + ".signature" - success = run_test(args.path, args.backend, args.trace, args.verbose, output) + os.environ["__TRANSACTRON_LOG_LEVEL"] = args.log_level + os.environ["__TRANSACTRON_LOG_FILTER"] = args.log_filter + + success = run_test(args.path, args.backend, args.trace, output) if not success: print(f"{args.path}: Program execution failed") diff --git a/test/conftest.py b/test/conftest.py index d0a77f9ec..306a3df20 100644 --- a/test/conftest.py +++ b/test/conftest.py @@ -28,6 +28,7 @@ def pytest_addoption(parser: pytest.Parser): type=int, help="Number of tests to start. If less than number of all selected tests, then starts only subset of them.", ) + group.addoption("--coreblocks-log-filter", default=".*", action="store", help="Regexp used to filter out logs.") def generate_unittestname(item: pytest.Item) -> str: @@ -104,3 +105,6 @@ def pytest_runtest_setup(item: pytest.Item): if item.config.getoption("--coreblocks-profile", False): # type: ignore os.environ["__TRANSACTRON_PROFILE"] = "1" + + os.environ["__TRANSACTRON_LOG_FILTER"] = item.config.getoption("--coreblocks-log-filter", ".*") # type: ignore + os.environ["__TRANSACTRON_LOG_LEVEL"] = item.config.getoption("--log-level", "WARNING") # type: ignore diff --git a/test/regression/cocotb.py b/test/regression/cocotb.py index 87c043688..f6625861c 100644 --- a/test/regression/cocotb.py +++ b/test/regression/cocotb.py @@ -1,5 +1,6 @@ from decimal import Decimal import inspect +import re import os from typing import Any from collections.abc import Coroutine @@ -87,10 +88,6 @@ async def start(self): sig_s = WishboneSlaveSignals() if sig_m.we: - cocotb.logging.debug( - f"Wishbone bus '{self.name}' write request: " - f"addr=0x{addr:x} data=0x{int(sig_m.dat_w):x} sel={sig_m.sel}" - ) resp = self.model.write( WriteRequest( addr=addr, @@ -100,7 +97,6 @@ async def start(self): ) ) else: - cocotb.logging.debug(f"Wishbone bus '{self.name}' read request: addr=0x{addr:x} sel={sig_m.sel}") resp = self.model.read( ReadRequest( addr=addr, @@ -123,11 +119,6 @@ async def start(self): raise ValueError("Bus doesn't support rty") sig_s.rty = 1 - cocotb.logging.debug( - f"Wishbone bus '{self.name}' response: " - f"ack={sig_s.ack} err={sig_s.err} rty={sig_s.rty} data={int(sig_s.dat_r):x}" - ) - for _ in range(self.delay): await clock_edge_event # type: ignore @@ -148,6 +139,11 @@ def __init__(self, dut): self.gen_info = GenerationInfo.decode(gen_info_path) + self.log_level = os.environ["__TRANSACTRON_LOG_LEVEL"] + self.log_filter = os.environ["__TRANSACTRON_LOG_FILTER"] + + cocotb.logging.getLogger().setLevel(self.log_level) + def get_cocotb_handle(self, path_components: list[str]) -> ModifiableObject: obj = self.dut # Skip the first component, as it is already referenced in "self.dut" @@ -189,14 +185,40 @@ async def profile_handler(self, clock, profile: Profile): await clock_edge_event # type: ignore - async def assert_handler(self, clock): + async def logging_handler(self, clock): clock_edge_event = FallingEdge(clock) + log_level = cocotb.logging.getLogger().level + + logs = [ + (rec, self.get_cocotb_handle(rec.trigger_location)) + for rec in self.gen_info.logs + if rec.level >= log_level and re.search(self.log_filter, rec.logger_name) + ] + while True: - for assert_info in self.gen_info.asserts: - assert_val = self.get_cocotb_handle(assert_info.location) - n, i = assert_info.src_loc - assert assert_val.value, f"Assertion at {n}:{i}" + for rec, trigger_handle in logs: + if not trigger_handle.value: + continue + + values: list[int] = [] + for field in rec.fields_location: + values.append(int(self.get_cocotb_handle(field).value)) + + formatted_msg = rec.format(*values) + + cocotb_log = cocotb.logging.getLogger(rec.logger_name) + + cocotb_log.log( + rec.level, + "%s:%d] %s", + rec.location[0], + rec.location[1], + formatted_msg, + ) + + if rec.level >= cocotb.logging.ERROR: + assert False, f"Assertion failed at {rec.location[0], rec.location[1]}: {formatted_msg}" await clock_edge_event # type: ignore @@ -220,7 +242,7 @@ async def run(self, mem_model: CoreMemoryModel, timeout_cycles: int = 5000) -> S profile.transactions_and_methods = self.gen_info.profile_data.transactions_and_methods cocotb.start_soon(self.profile_handler(self.dut.clk, profile)) - cocotb.start_soon(self.assert_handler(self.dut.clk)) + cocotb.start_soon(self.logging_handler(self.dut.clk)) success = True try: @@ -237,7 +259,7 @@ async def run(self, mem_model: CoreMemoryModel, timeout_cycles: int = 5000) -> S for reg_name, reg_loc in metric_loc.regs.items(): value = int(self.get_cocotb_handle(reg_loc)) result.metric_values[metric_name][reg_name] = value - cocotb.logging.debug(f"Metric {metric_name}/{reg_name}={value}") + cocotb.logging.info(f"Metric {metric_name}/{reg_name}={value}") return result diff --git a/test/regression/cocotb/benchmark_entrypoint.py b/test/regression/cocotb/benchmark_entrypoint.py index fb3fa59c5..d700b3a4e 100644 --- a/test/regression/cocotb/benchmark_entrypoint.py +++ b/test/regression/cocotb/benchmark_entrypoint.py @@ -1,5 +1,4 @@ import sys -import cocotb from pathlib import Path top_dir = Path(__file__).parent.parent.parent.parent @@ -10,7 +9,6 @@ async def _do_benchmark(dut, benchmark_name): - cocotb.logging.getLogger().setLevel(cocotb.logging.INFO) await run_benchmark(CocotbSimulation(dut), benchmark_name) diff --git a/test/regression/cocotb/signature_entrypoint.py b/test/regression/cocotb/signature_entrypoint.py index 1508502fe..4b8a9d212 100644 --- a/test/regression/cocotb/signature_entrypoint.py +++ b/test/regression/cocotb/signature_entrypoint.py @@ -12,8 +12,6 @@ @cocotb.test() async def do_test(dut): - cocotb.logging.getLogger().setLevel(cocotb.logging.INFO) - test_name = os.environ["TESTNAME"] if test_name is None: raise RuntimeError("No ELF file provided") diff --git a/test/regression/cocotb/test_entrypoint.py b/test/regression/cocotb/test_entrypoint.py index 36879b2a6..71b0ed64f 100644 --- a/test/regression/cocotb/test_entrypoint.py +++ b/test/regression/cocotb/test_entrypoint.py @@ -1,5 +1,4 @@ import sys -import cocotb from pathlib import Path top_dir = Path(__file__).parent.parent.parent.parent @@ -14,7 +13,6 @@ async def do_test(dut, test_name): - cocotb.logging.getLogger().setLevel(cocotb.logging.INFO) if test_name == empty_testcase_name: return await run_test(CocotbSimulation(dut), test_name) diff --git a/test/regression/pysim.py b/test/regression/pysim.py index 424d83d8e..0dc4c239d 100644 --- a/test/regression/pysim.py +++ b/test/regression/pysim.py @@ -1,5 +1,6 @@ import re import os +import logging from amaranth.sim import Passive, Settle from amaranth.utils import exact_log2 @@ -10,7 +11,14 @@ from .memory import * from .common import SimulationBackend, SimulationExecutionResult -from transactron.testing import PysimSimulator, TestGen, profiler_process, Profile +from transactron.testing import ( + PysimSimulator, + TestGen, + profiler_process, + Profile, + make_logging_process, + parse_logging_level, +) from transactron.utils.dependencies import DependencyContext, DependencyManager from transactron.lib.metrics import HardwareMetricsManager from ..peripherals.test_wishbone import WishboneInterfaceWrapper @@ -22,13 +30,15 @@ class PySimulation(SimulationBackend): - def __init__(self, verbose: bool, traces_file: Optional[str] = None): + def __init__(self, traces_file: Optional[str] = None): self.gp = GenParams(full_core_config) self.running = False self.cycle_cnt = 0 - self.verbose = verbose self.traces_file = traces_file + self.log_level = parse_logging_level(os.environ["__TRANSACTRON_LOG_LEVEL"]) + self.log_filter = os.environ["__TRANSACTRON_LOG_FILTER"] + self.metrics_manager = HardwareMetricsManager() def _wishbone_slave( @@ -49,17 +59,11 @@ def f(): resp_data = 0 - bus_name = "instr" if is_instr_bus else "data" - if (yield wb_ctrl.wb.we): - if self.verbose: - print(f"Wishbone '{bus_name}' bus write request: addr=0x{addr:x} data={dat_w:x} sel={sel:b}") resp = mem_model.write( WriteRequest(addr=addr, data=dat_w, byte_count=word_width_bytes, byte_sel=sel) ) else: - if self.verbose: - print(f"Wishbone '{bus_name}' bus read request: addr=0x{addr:x} sel={sel:b}") resp = mem_model.read( ReadRequest( addr=addr, @@ -70,9 +74,6 @@ def f(): ) resp_data = resp.data - if self.verbose: - print(f"Wishbone '{bus_name}' bus read response: data=0x{resp.data:x}") - ack = err = rty = 0 match resp.status: case ReplyStatus.OK: @@ -102,8 +103,7 @@ def f(): return f def pretty_dump_metrics(self, metric_values: dict[str, dict[str, int]], filter_regexp: str = ".*"): - print() - print("=== Core metrics dump ===") + str = "=== Core metrics dump ===\n" put_space_before = True for metric_name in sorted(metric_values.keys()): @@ -114,21 +114,23 @@ def pretty_dump_metrics(self, metric_values: dict[str, dict[str, int]], filter_r if metric.description != "": if not put_space_before: - print() + str += "\n" - print(f"# {metric.description}") + str += f"# {metric.description}\n" for reg in metric.regs.values(): reg_value = metric_values[metric_name][reg.name] desc = f" # {reg.description} [reg width={reg.width}]" - print(f"{metric_name}/{reg.name} {reg_value}{desc}") + str += f"{metric_name}/{reg.name} {reg_value}{desc}\n" put_space_before = False if metric.description != "": - print() + str += "\n" put_space_before = True + logging.info(str) + async def run(self, mem_model: CoreMemoryModel, timeout_cycles: int = 5000) -> SimulationExecutionResult: with DependencyContext(DependencyManager()): wb_instr_bus = WishboneBus(self.gp.wb_params) @@ -145,6 +147,11 @@ async def run(self, mem_model: CoreMemoryModel, timeout_cycles: int = 5000) -> S sim.add_sync_process(self._wishbone_slave(mem_model, wb_instr_ctrl, is_instr_bus=True)) sim.add_sync_process(self._wishbone_slave(mem_model, wb_data_ctrl, is_instr_bus=False)) + def on_error(): + raise RuntimeError("Simulation finished due to an error") + + sim.add_sync_process(make_logging_process(self.log_level, self.log_filter, on_error)) + profile = None if "__TRANSACTRON_PROFILE" in os.environ: transaction_manager = DependencyContext.get().get_dependency(TransactionManagerKey()) @@ -166,8 +173,7 @@ def on_sim_finish(): sim.add_sync_process(self._waiter(on_finish=on_sim_finish)) success = sim.run() - if self.verbose: - self.pretty_dump_metrics(metric_values) + self.pretty_dump_metrics(metric_values) return SimulationExecutionResult(success, metric_values, profile) diff --git a/test/regression/test_regression.py b/test/regression/test_regression.py index 53d1d2e95..88fc538f1 100644 --- a/test/regression/test_regression.py +++ b/test/regression/test_regression.py @@ -85,7 +85,7 @@ def regression_body_with_pysim(test_name: str, traces: bool): traces_file = None if traces: traces_file = REGRESSION_TESTS_PREFIX + test_name - asyncio.run(run_test(PySimulation(verbose=False, traces_file=traces_file), test_name)) + asyncio.run(run_test(PySimulation(traces_file=traces_file), test_name)) @pytest.fixture(scope="session") diff --git a/test/transactron/testing/test_assertion.py b/test/transactron/testing/test_assertion.py deleted file mode 100644 index 4becf3062..000000000 --- a/test/transactron/testing/test_assertion.py +++ /dev/null @@ -1,32 +0,0 @@ -from amaranth import * - -from transactron.utils import assertion -from transactron.testing import TestCaseWithSimulator - - -class AssertionTest(Elaboratable): - def __init__(self): - self.input = Signal() - self.output = Signal() - - def elaborate(self, platform): - m = Module() - - m.d.comb += self.output.eq(self.input & ~self.input) - - assertion(m, self.input == self.output) - - return m - - -class TestAssertion(TestCaseWithSimulator): - def test_assertion(self): - m = AssertionTest() - - def proc(): - yield - yield m.input.eq(1) - - with self.assertRaises(AssertionError): - with self.run_simulation(m) as sim: - sim.add_sync_process(proc) diff --git a/test/transactron/testing/test_log.py b/test/transactron/testing/test_log.py new file mode 100644 index 000000000..99040a4a7 --- /dev/null +++ b/test/transactron/testing/test_log.py @@ -0,0 +1,112 @@ +from unittest.mock import patch +from io import StringIO + +from amaranth import * + +from transactron import * +from transactron.testing import TestCaseWithSimulator +from transactron.lib import logging + +log = logging.HardwareLogger("test") + + +class LogTest(Elaboratable): + def __init__(self): + self.input = Signal(range(100)) + self.counter = Signal(range(200)) + + def elaborate(self, platform): + m = TModule() + + with m.If(self.input == 42): + log.warning(m, True, "Log triggered under Amaranth If value+3=0x{:x}", self.input + 3) + + log.warning(m, self.input[0] == 0, "Input is even! input={}, counter={}", self.input, self.counter) + + m.d.sync += self.counter.eq(self.counter + 1) + + return m + + +class ErrorLogTest(Elaboratable): + def __init__(self): + self.input = Signal() + self.output = Signal() + + def elaborate(self, platform): + m = TModule() + + m.d.comb += self.output.eq(self.input & ~self.input) + + log.error( + m, + self.input != self.output, + "Input is different than output! input=0x{:x} output=0x{:x}", + self.input, + self.output, + ) + + return m + + +class AssertionTest(Elaboratable): + def __init__(self): + self.input = Signal() + self.output = Signal() + + def elaborate(self, platform): + m = TModule() + + m.d.comb += self.output.eq(self.input & ~self.input) + + log.assertion(m, self.input == self.output, "Output differs") + + return m + + +class TestLog(TestCaseWithSimulator): + @patch("sys.stdout", new_callable=StringIO) + def test_log(self, stdout): + m = LogTest() + + def proc(): + for i in range(50): + yield + yield m.input.eq(i) + + with self.run_simulation(m) as sim: + sim.add_sync_process(proc) + + self.assertIn("Log triggered under Amaranth If value+3=0x2d", stdout.getvalue()) + for i in range(0, 50, 2): + self.assertIn(f"Input is even! input={i}, counter={i + 2}", stdout.getvalue()) + + @patch("sys.stdout", new_callable=StringIO) + def test_error_log(self, stdout): + m = ErrorLogTest() + + def proc(): + yield + yield m.input.eq(1) + + with self.assertRaises(AssertionError): + with self.run_simulation(m) as sim: + sim.add_sync_process(proc) + + extected_out = "Input is different than output! input=0x1 output=0x0" + self.assertIn(extected_out, stdout.getvalue()) + + @patch("sys.stdout", new_callable=StringIO) + def test_assertion(self, stdout): + m = AssertionTest() + + def proc(): + yield + yield m.input.eq(1) + + with self.assertRaises(AssertionError): + with self.run_simulation(m) as sim: + sim.add_sync_process(proc) + + extected_out = "Output differs" + self.assertIn(extected_out, stdout.getvalue()) diff --git a/transactron/lib/logging.py b/transactron/lib/logging.py new file mode 100644 index 000000000..7eb06deb1 --- /dev/null +++ b/transactron/lib/logging.py @@ -0,0 +1,229 @@ +import os +import re +import operator +import logging +from functools import reduce +from dataclasses import dataclass, field +from dataclasses_json import dataclass_json +from typing import TypeAlias + +from amaranth import * +from amaranth.tracer import get_src_loc + +from transactron.utils import SrcLoc +from transactron.utils._typing import ModuleLike, ValueLike +from transactron.utils.dependencies import DependencyContext, ListKey + +LogLevel: TypeAlias = int + + +@dataclass_json +@dataclass +class LogRecordInfo: + """Simulator-backend-agnostic information about a log record that can + be serialized and used outside the Amaranth context. + + Attributes + ---------- + logger_name: str + + level: LogLevel + The severity level of the log. + format_str: str + The template of the message. Should follow PEP 3101 standard. + location: SrcLoc + Source location of the log. + """ + + logger_name: str + level: LogLevel + format_str: str + location: SrcLoc + + def format(self, *args) -> str: + """Format the log message with a set of concrete arguments.""" + + return self.format_str.format(*args) + + +@dataclass +class LogRecord(LogRecordInfo): + """A LogRecord instance represents an event being logged. + + Attributes + ---------- + trigger: Signal + Amaranth signal triggering the log. + fields: Signal + Amaranth signals that will be used to format the message. + """ + + trigger: Signal + fields: list[Signal] = field(default_factory=list) + + +@dataclass(frozen=True) +class LogKey(ListKey[LogRecord]): + pass + + +class HardwareLogger: + """A class for creating log messages in the hardware. + + Intuitively, the hardware logger works similarly to a normal software + logger. You can log a message anywhere in the circuit, but due to the + parallel nature of the hardware you must specify a special trigger signal + which will indicate if a message shall be reported in that cycle. + + Hardware logs are evaluated and printed during simulation, so both + the trigger and the format fields are Amaranth values, i.e. + signals or arbitrary Amaranth expressions. + + Instances of the HardwareLogger class represent a logger for a single + submodule of the circuit. Exactly how a "submodule" is defined is up + to the developer. Submodule are identified by a unique string and + the names can be nested. Names are organized into a namespace hierarchy + where levels are separated by periods, much like the Python package + namespace. So in the instance, submodules names might be "frontend" + for the upper level, and "frontend.icache" and "frontend.bpu" for + the sub-levels. There is no arbitrary limit to the depth of nesting. + + Attributes + ---------- + name: str + Name of this logger. + """ + + def __init__(self, name: str): + """ + Parameters + ---------- + name: str + Name of this logger. Hierarchy levels are separated by periods, + e.g. "backend.fu.jumpbranch". + """ + self.name = name + + def log(self, m: ModuleLike, level: LogLevel, trigger: ValueLike, format: str, *args, src_loc_at: int = 0): + """Registers a hardware log record with the given severity. + + Parameters + ---------- + m: ModuleLike + The module for which the log record is added. + trigger: ValueLike + If the value of this Amaranth expression is true, the log will reported. + format: str + The format of the message as defined in PEP 3101. + *args + Amaranth values that will be read during simulation and used to format + the message. + src_loc_at: int, optional + How many stack frames below to look for the source location, used to + identify the failing assertion. + """ + + def local_src_loc(src_loc: SrcLoc): + return (os.path.relpath(src_loc[0]), src_loc[1]) + + src_loc = local_src_loc(get_src_loc(src_loc_at + 1)) + + trigger_signal = Signal() + m.d.comb += trigger_signal.eq(trigger) + + record = LogRecord( + logger_name=self.name, level=level, format_str=format, location=src_loc, trigger=trigger_signal + ) + + for arg in args: + sig = Signal.like(arg) + m.d.top_comb += sig.eq(arg) + record.fields.append(sig) + + dependencies = DependencyContext.get() + dependencies.add_dependency(LogKey(), record) + + def debug(self, m: ModuleLike, trigger: ValueLike, format: str, *args, **kwargs): + """Log a message with severity 'DEBUG'. + + See `HardwareLogger.log` function for more details. + """ + self.log(m, logging.DEBUG, trigger, format, *args, **kwargs) + + def info(self, m: ModuleLike, trigger: ValueLike, format: str, *args, **kwargs): + """Log a message with severity 'INFO'. + + See `HardwareLogger.log` function for more details. + """ + self.log(m, logging.INFO, trigger, format, *args, **kwargs) + + def warning(self, m: ModuleLike, trigger: ValueLike, format: str, *args, **kwargs): + """Log a message with severity 'WARNING'. + + See `HardwareLogger.log` function for more details. + """ + self.log(m, logging.WARNING, trigger, format, *args, **kwargs) + + def error(self, m: ModuleLike, trigger: ValueLike, format: str, *args, **kwargs): + """Log a message with severity 'ERROR'. + + This severity level has special semantics. If a log with this serverity + level is triggered, the simulation will be terminated. + + See `HardwareLogger.log` function for more details. + """ + self.log(m, logging.ERROR, trigger, format, *args, **kwargs) + + def assertion(self, m: ModuleLike, value: Value, format: str = "", *args, src_loc_at: int = 0, **kwargs): + """Define an assertion. + + This function might help find some hardware bugs which might otherwise be + hard to detect. If `value` is false, it will terminate the simulation or + it can also be used to turn on a warning LED on a board. + + Internally, this is a convenience wrapper over log.error. + + See `HardwareLogger.log` function for more details. + """ + self.error(m, ~value, format, *args, **kwargs, src_loc_at=src_loc_at + 1) + + +def get_log_records(level: LogLevel, namespace_regexp: str = ".*") -> list[LogRecord]: + """Get log records in for the given severity level and in the + specified namespace. + + This function returns all log records with the severity bigger or equal + to the specified level and belonging to the specified namespace. + + Parameters + ---------- + level: LogLevel + The minimum severity level. + namespace: str, optional + The regexp of the namespace. If not specified, logs from all namespaces + will be processed. + """ + + dependencies = DependencyContext.get() + all_logs = dependencies.get_dependency(LogKey()) + return [rec for rec in all_logs if rec.level >= level and re.search(namespace_regexp, rec.logger_name)] + + +def get_trigger_bit(level: LogLevel, namespace_regexp: str = ".*") -> Value: + """Get a trigger bit for logs of the given severity level and + in the specified namespace. + + The signal returned by this function is high whenever the trigger signal + of any of the records with the severity bigger or equal to the specified + level is high. + + Parameters + ---------- + level: LogLevel + The minimum severity level. + namespace: str, optional + The regexp of the namespace. If not specified, logs from all namespaces + will be processed. + """ + + return reduce(operator.or_, [rec.trigger for rec in get_log_records(level, namespace_regexp)], C(0)) diff --git a/transactron/testing/__init__.py b/transactron/testing/__init__.py index 2ed73b0ff..bc5d38fa2 100644 --- a/transactron/testing/__init__.py +++ b/transactron/testing/__init__.py @@ -3,5 +3,5 @@ from .sugar import * # noqa: F401 from .testbenchio import * # noqa: F401 from .profiler import * # noqa: F401 -from .assertion import * # noqa: F401 +from .logging import * # noqa: F401 from transactron.utils import data_layout # noqa: F401 diff --git a/transactron/testing/assertion.py b/transactron/testing/assertion.py deleted file mode 100644 index 19c5a4149..000000000 --- a/transactron/testing/assertion.py +++ /dev/null @@ -1,20 +0,0 @@ -from collections.abc import Callable -from typing import Any -from amaranth.sim import Passive, Tick -from transactron.utils import assert_bit, assert_bits - - -__all__ = ["make_assert_handler"] - - -def make_assert_handler(my_assert: Callable[[int, str], Any]): - def assert_handler(): - yield Passive() - while True: - yield Tick("sync_neg") - if not (yield assert_bit()): - for v, (n, i) in assert_bits(): - my_assert((yield v), f"Assertion at {n}:{i}") - yield - - return assert_handler diff --git a/transactron/testing/infrastructure.py b/transactron/testing/infrastructure.py index dc4a5404e..a769bba13 100644 --- a/transactron/testing/infrastructure.py +++ b/transactron/testing/infrastructure.py @@ -13,7 +13,7 @@ from .testbenchio import TestbenchIO from .profiler import profiler_process, Profile from .functions import TestGen -from .assertion import make_assert_handler +from .logging import make_logging_process, parse_logging_level from .gtkw_extension import write_vcd_ext from transactron import Method from transactron.lib import AdapterTrans @@ -263,7 +263,12 @@ def run_simulation(self, module: HasElaborate, max_cycles: float = 10e4, add_tra profiler_process(sim.tested_module.manager.get_dependency(TransactionManagerKey()), profile) ) - sim.add_sync_process(make_assert_handler(self.assertTrue)) + def on_error(): + self.assertTrue(False, "Simulation finished due to an error") + + log_level = parse_logging_level(os.environ["__TRANSACTRON_LOG_LEVEL"]) + log_filter = os.environ["__TRANSACTRON_LOG_FILTER"] + sim.add_sync_process(make_logging_process(log_level, log_filter, on_error)) res = sim.run() diff --git a/transactron/testing/logging.py b/transactron/testing/logging.py new file mode 100644 index 000000000..6a2ad0881 --- /dev/null +++ b/transactron/testing/logging.py @@ -0,0 +1,108 @@ +from collections.abc import Callable +from typing import Any +import logging + +from amaranth.sim import Passive, Tick +from transactron.lib import logging as tlog + + +__all__ = ["make_logging_process", "parse_logging_level"] + + +def parse_logging_level(str: str) -> tlog.LogLevel: + """Parse the log level from a string. + + The level can be either a non-negative integer or a string representation + of one of the predefined levels. + + Raises an exception if the level cannot be parsed. + """ + str = str.upper() + names_mapping = logging.getLevelNamesMapping() + if str in names_mapping: + return names_mapping[str] + + # try convert to int + try: + return int(str) + except ValueError: + pass + + raise ValueError("Log level must be either {error, warn, info, debug} or a non-negative integer.") + + +_sim_cycle = 0 + + +class _LogFormatter(logging.Formatter): + """ + Log formatter to provide colors and to inject simulator times into + the log messages. Adapted from https://stackoverflow.com/a/56944256/3638629 + """ + + magenta = "\033[0;35m" + grey = "\033[0;34m" + blue = "\033[0;34m" + yellow = "\033[0;33m" + red = "\033[0;31m" + reset = "\033[0m" + + loglevel2colour = { + logging.DEBUG: grey + "{}" + reset, + logging.INFO: magenta + "{}" + reset, + logging.WARNING: yellow + "{}" + reset, + logging.ERROR: red + "{}" + reset, + } + + def format(self, record: logging.LogRecord): + level_name = self.loglevel2colour[record.levelno].format(record.levelname) + return f"{_sim_cycle} {level_name} {record.name} {record.getMessage()}" + + +def make_logging_process(level: tlog.LogLevel, namespace_regexp: str, on_error: Callable[[], Any]): + combined_trigger = tlog.get_trigger_bit(level, namespace_regexp) + records = tlog.get_log_records(level, namespace_regexp) + + root_logger = logging.getLogger() + ch = logging.StreamHandler() + formatter = _LogFormatter() + ch.setFormatter(formatter) + root_logger.handlers = [ch] + + def handle_logs(): + if not (yield combined_trigger): + return + + for record in records: + if not (yield record.trigger): + continue + + values: list[int] = [] + for field in record.fields: + values.append((yield field)) + + formatted_msg = record.format(*values) + + logger = root_logger.getChild(record.logger_name) + logger.log( + record.level, + "%s:%d] %s", + record.location[0], + record.location[1], + formatted_msg, + ) + + if record.level >= logging.ERROR: + on_error() + + def log_process(): + global _sim_cycle + + yield Passive() + while True: + yield Tick("sync_neg") + yield from handle_logs() + yield + _sim_cycle += 1 + + return log_process diff --git a/transactron/utils/__init__.py b/transactron/utils/__init__.py index 703b2aad9..ebf845b7d 100644 --- a/transactron/utils/__init__.py +++ b/transactron/utils/__init__.py @@ -4,7 +4,6 @@ from .assign import * # noqa: F401 from .amaranth_ext import * # noqa: F401 from .transactron_helpers import * # noqa: F401 -from .assertion import * # noqa: F401 from .dependencies import * # noqa: F401 from .depcache import * # noqa: F401 from .idgen import * # noqa: F401 diff --git a/transactron/utils/assertion.py b/transactron/utils/assertion.py deleted file mode 100644 index b79a74fef..000000000 --- a/transactron/utils/assertion.py +++ /dev/null @@ -1,60 +0,0 @@ -from amaranth import * -from amaranth.tracer import get_src_loc -from functools import reduce -import operator -from dataclasses import dataclass -from transactron.utils import SrcLoc -from transactron.utils._typing import ModuleLike, ValueLike -from transactron.utils.dependencies import DependencyContext, ListKey - -__all__ = ["AssertKey", "assertion", "assert_bit", "assert_bits"] - - -@dataclass(frozen=True) -class AssertKey(ListKey[tuple[Signal, SrcLoc]]): - pass - - -def assertion(m: ModuleLike, value: ValueLike, *, src_loc_at: int = 0): - """Define an assertion. - - This function might help find some hardware bugs which might otherwise be - hard to detect. If `value` is false on any assertion, the value returned - from the `assert_bit` function is false. This terminates the simulation, - it can also be used to turn on a warning LED on a board. - - Parameters - ---------- - m: Module - Module in which the assertion is defined. - value : Value - If the value of this Amaranth expression is false, the assertion will - fail. - src_loc_at : int, optional - How many stack frames below to look for the source location, used to - identify the failing assertion. - """ - src_loc = get_src_loc(src_loc_at) - sig = Signal() - m.d.comb += sig.eq(value) - dependencies = DependencyContext.get() - dependencies.add_dependency(AssertKey(), (sig, src_loc)) - - -def assert_bits() -> list[tuple[Signal, SrcLoc]]: - """Gets assertion bits. - - This function returns all the assertion signals created by `assertion`, - together with their source locations. - """ - dependencies = DependencyContext.get() - return dependencies.get_dependency(AssertKey()) - - -def assert_bit() -> Signal: - """Gets assertion bit. - - The signal returned by this function is false if and only if there exists - a false signal among assertion bits created by `assertion`. - """ - return reduce(operator.and_, [a[0] for a in assert_bits()], C(1)) diff --git a/transactron/utils/gen.py b/transactron/utils/gen.py index 2ff40dec2..df7ca11ea 100644 --- a/transactron/utils/gen.py +++ b/transactron/utils/gen.py @@ -1,5 +1,6 @@ from dataclasses import dataclass, field from dataclasses_json import dataclass_json +from typing import TypeAlias from amaranth import * from amaranth.back import verilog @@ -7,11 +8,10 @@ from transactron.core import TransactionManager, MethodMap, TransactionManagerKey from transactron.lib.metrics import HardwareMetricsManager +from transactron.lib import logging from transactron.utils.dependencies import DependencyContext from transactron.utils.idgen import IdGenerator from transactron.profiler import ProfileData -from transactron.utils._typing import SrcLoc -from transactron.utils.assertion import assert_bits from typing import TYPE_CHECKING @@ -21,11 +21,16 @@ __all__ = [ "MetricLocation", - "AssertLocation", + "GeneratedLog", "GenerationInfo", "generate_verilog", ] +SignalHandle: TypeAlias = list[str] +"""The location of a signal is a list of Verilog identifiers that denote a path +consisting of module names (and the signal name at the end) leading +to the signal wire.""" + @dataclass_json @dataclass @@ -34,13 +39,11 @@ class MetricLocation: Attributes ---------- - regs : dict[str, list[str]] - The location of each register of that metric. The location is a list of - Verilog identifiers that denote a path consiting of modules names - (and the signal name at the end) leading to the register wire. + regs : dict[str, SignalHandle] + The location of each register of that metric. """ - regs: dict[str, list[str]] = field(default_factory=dict) + regs: dict[str, SignalHandle] = field(default_factory=dict) @dataclass_json @@ -79,21 +82,19 @@ class MethodSignalsLocation: @dataclass_json @dataclass -class AssertLocation: - """Information about an assert signal in the generated Verilog code. +class GeneratedLog(logging.LogRecordInfo): + """Information about a log record in the generated Verilog code. Attributes ---------- - location : list[str] - The location of the assert signal. The location is a list of Verilog - identifiers that denote a path consisting of module names (and the - signal name at the end) leading to the signal wire. - src_loc : SrcLoc - Source location of the assertion. + trigger_location : SignalHandle + The location of the trigger signal. + fields_location : list[SignalHandle] + Locations of the log fields. """ - location: list[str] - src_loc: SrcLoc + trigger_location: SignalHandle + fields_location: list[SignalHandle] @dataclass_json @@ -106,16 +107,15 @@ class GenerationInfo: metrics_location : dict[str, MetricInfo] Mapping from a metric name to an object storing Verilog locations of its registers. - asserts : list[AssertLocation] - Locations and metadata for assertion signals. + logs : list[GeneratedLog] + Locations and metadata for all log records. """ metrics_location: dict[str, MetricLocation] transaction_signals_location: dict[int, TransactionSignalsLocation] method_signals_location: dict[int, MethodSignalsLocation] profile_data: ProfileData - metrics_location: dict[str, MetricLocation] - asserts: list[AssertLocation] + logs: list[GeneratedLog] def encode(self, file_name: str): """ @@ -158,7 +158,7 @@ def escape_verilog_identifier(identifier: str) -> str: return identifier -def get_signal_location(signal: Signal, name_map: "SignalDict") -> list[str]: +def get_signal_location(signal: Signal, name_map: "SignalDict") -> SignalHandle: raw_location = name_map[signal] # Amaranth escapes identifiers when generating Verilog code, but returns non-escaped identifiers @@ -207,13 +207,24 @@ def collect_transaction_method_signals( return (transaction_signals_location, method_signals_location) -def collect_asserts(name_map: "SignalDict") -> list[AssertLocation]: - asserts: list[AssertLocation] = [] - - for v, src_loc in assert_bits(): - asserts.append(AssertLocation(get_signal_location(v, name_map), src_loc)) +def collect_logs(name_map: "SignalDict") -> list[GeneratedLog]: + logs: list[GeneratedLog] = [] + + # Get all records. + for record in logging.get_log_records(0): + trigger_loc = get_signal_location(record.trigger, name_map) + fields_loc = [get_signal_location(field, name_map) for field in record.fields] + log = GeneratedLog( + logger_name=record.logger_name, + level=record.level, + format_str=record.format_str, + location=record.location, + trigger_location=trigger_loc, + fields_location=fields_loc, + ) + logs.append(log) - return asserts + return logs def generate_verilog( @@ -232,7 +243,7 @@ def generate_verilog( transaction_signals_location=transaction_signals, method_signals_location=method_signals, profile_data=profile_data, - asserts=collect_asserts(name_map), + logs=collect_logs(name_map), ) return verilog_text, gen_info