Skip to content

Commit

Permalink
Debug logs
Browse files Browse the repository at this point in the history
  • Loading branch information
Jacob Urbanczyk committed Mar 1, 2024
1 parent 5a7390c commit 0514051
Show file tree
Hide file tree
Showing 14 changed files with 326 additions and 45 deletions.
2 changes: 1 addition & 1 deletion .github/workflows/main.yml
Original file line number Diff line number Diff line change
Expand Up @@ -247,7 +247,7 @@ jobs:
sudo apt-get install -y binutils-riscv64-unknown-elf
- name: Run tests
run: ./scripts/run_tests.py --verbose
run: ./scripts/run_tests.py --verbosity info

- name: Check traces and profiles
run: ./scripts/run_tests.py -t -p -c 1 TestCore
Expand Down
10 changes: 9 additions & 1 deletion coreblocks/fu/jumpbranch.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,8 +7,8 @@
from transactron import *
from transactron.core import def_method
from transactron.lib import *
from transactron.lib import log
from transactron.utils import DependencyManager

from coreblocks.params import *
from coreblocks.params.keys import AsyncInterruptInsertSignalKey, BranchVerifyKey
from transactron.utils import OneHotSwitch
Expand Down Expand Up @@ -209,6 +209,14 @@ def _(arg):

with m.If(~is_auipc):
self.fifo_branch_resolved.write(m, from_pc=jb.in_pc, next_pc=jump_result, misprediction=misprediction)
log.debug(
m,
True,
"jumping from 0x{:08x} to 0x{:08x}; misprediction: {}",
jb.in_pc,
jump_result,
misprediction,
)

return m

Expand Down
20 changes: 12 additions & 8 deletions scripts/run_benchmarks.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@
import test.regression.benchmark # noqa: E402
from test.regression.benchmark import BenchmarkResult # noqa: E402
from test.regression.pysim import PySimulation # noqa: E402
from transactron.lib import log # noqa: E402


def cd_to_topdir():
Expand Down Expand Up @@ -74,17 +75,15 @@ def run_benchmarks_with_cocotb(benchmarks: list[str], traces: bool) -> bool:
return res.returncode == 0


def run_benchmarks_with_pysim(benchmarks: list[str], traces: bool, verbose: bool) -> bool:
def run_benchmarks_with_pysim(benchmarks: list[str], traces: bool, log_level: log.LogLevel) -> bool:
suite = unittest.TestSuite()

def _gen_test(test_name: str):
def test_fn():
traces_file = None
if traces:
traces_file = "benchmark." + test_name
asyncio.run(
test.regression.benchmark.run_benchmark(PySimulation(verbose, traces_file=traces_file), test_name)
)
asyncio.run(test.regression.benchmark.run_benchmark(PySimulation(traces_file=traces_file), test_name))

test_fn.__name__ = test_name
test_fn.__qualname__ = test_name
Expand All @@ -94,17 +93,19 @@ def test_fn():
for test_name in benchmarks:
suite.addTest(unittest.FunctionTestCase(_gen_test(test_name)))

runner = unittest.TextTestRunner(verbosity=(2 if verbose else 1))
runner = unittest.TextTestRunner(verbosity=(2 if log_level < log.WARNING else 1))
result = runner.run(suite)

return result.wasSuccessful()


def run_benchmarks(benchmarks: list[str], backend: Literal["pysim", "cocotb"], traces: bool, verbose: bool) -> bool:
def run_benchmarks(
benchmarks: list[str], backend: Literal["pysim", "cocotb"], traces: bool, log_level: log.LogLevel
) -> bool:
if backend == "cocotb":
return run_benchmarks_with_cocotb(benchmarks, traces)
elif backend == "pysim":
return run_benchmarks_with_pysim(benchmarks, traces, verbose)
return run_benchmarks_with_pysim(benchmarks, traces, log_level)
return False


Expand Down Expand Up @@ -144,7 +145,7 @@ def main():
parser = argparse.ArgumentParser()
parser.add_argument("-l", "--list", action="store_true", help="List all benchmarks")
parser.add_argument("-t", "--trace", action="store_true", help="Dump waveforms")
parser.add_argument("-v", "--verbose", action="store_true", help="Verbose output")
parser.add_argument("-v", "--verbosity", default="warn", action="store", help="Verbosity of the output logs")
parser.add_argument("-b", "--backend", default="cocotb", choices=["cocotb", "pysim"], help="Simulation backend")
parser.add_argument(
"-o",
Expand All @@ -163,6 +164,9 @@ def main():
print(name)
return

log_level = log.parse_level(args.verbosity)
os.environ["__TRANSACTRON_LOG_LEVEL"] = str(log_level)

if args.benchmark_name:
pattern = re.compile(args.benchmark_name)
benchmarks = [name for name in benchmarks if pattern.search(name)]
Expand Down
18 changes: 10 additions & 8 deletions scripts/run_signature.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@

import test.regression.signature # noqa: E402
from test.regression.pysim import PySimulation # noqa: E402
from transactron.lib import log # noqa: E402


def run_with_cocotb(test_name: str, traces: bool, output: str) -> bool:
Expand Down Expand Up @@ -45,32 +46,30 @@ def run_with_cocotb(test_name: str, traces: bool, output: str) -> bool:
return os.path.isfile(output) # completed successfully if signature file was created


def run_with_pysim(test_name: str, traces: bool, verbose: bool, output: str) -> bool:
def run_with_pysim(test_name: str, traces: bool, output: str) -> bool:
traces_file = None
if traces:
traces_file = os.path.basename(test_name)
try:
asyncio.run(
test.regression.signature.run_test(PySimulation(verbose, traces_file=traces_file), test_name, output)
)
asyncio.run(test.regression.signature.run_test(PySimulation(traces_file=traces_file), test_name, output))
except RuntimeError as e:
print("RuntimeError:", e)
return False
return True


def run_test(test: str, backend: Literal["pysim", "cocotb"], traces: bool, verbose: bool, output: str) -> bool:
def run_test(test: str, backend: Literal["pysim", "cocotb"], traces: bool, output: str) -> bool:
if backend == "cocotb":
return run_with_cocotb(test, traces, output)
elif backend == "pysim":
return run_with_pysim(test, traces, verbose, output)
return run_with_pysim(test, traces, output)
return False


def main():
parser = argparse.ArgumentParser()
parser.add_argument("-t", "--trace", action="store_true", help="Dump waveforms")
parser.add_argument("-v", "--verbose", action="store_true", help="Verbose output")
parser.add_argument("-v", "--verbosity", default="warn", action="store", help="Verbosity of the output logs")
parser.add_argument("-b", "--backend", default="pysim", choices=["cocotb", "pysim"], help="Simulation backend")
parser.add_argument("-o", "--output", default=None, help="Selects output file to write test signature to")
parser.add_argument("path")
Expand All @@ -79,7 +78,10 @@ def main():

output = args.output if args.output else args.path + ".signature"

success = run_test(args.path, args.backend, args.trace, args.verbose, output)
log_level = log.parse_level(args.verbosity)
os.environ["__TRANSACTRON_LOG_LEVEL"] = str(log_level)

success = run_test(args.path, args.backend, args.trace, output)
if not success:
print(f"{args.path}: Program execution failed")

Expand Down
22 changes: 14 additions & 8 deletions scripts/run_tests.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@

import test.regression.test # noqa: E402
from test.regression.pysim import PySimulation # noqa: E402
from transactron.lib import log # noqa: E402

REGRESSION_TESTS_PREFIX = "test.regression."

Expand Down Expand Up @@ -74,15 +75,15 @@ def run_regressions_with_cocotb(tests: list[str], traces: bool) -> bool:
return res.returncode == 0


def run_regressions_with_pysim(tests: list[str], traces: bool, verbose: bool) -> bool:
def run_regressions_with_pysim(tests: list[str], traces: bool, log_level: log.LogLevel) -> bool:
suite = unittest.TestSuite()

def _gen_test(test_name: str):
def test_fn():
traces_file = None
if traces:
traces_file = REGRESSION_TESTS_PREFIX + test_name
asyncio.run(test.regression.test.run_test(PySimulation(verbose, traces_file=traces_file), test_name))
asyncio.run(test.regression.test.run_test(PySimulation(traces_file=traces_file), test_name))

test_fn.__name__ = test_name
test_fn.__qualname__ = test_name
Expand All @@ -92,17 +93,19 @@ def test_fn():
for test_name in tests:
suite.addTest(unittest.FunctionTestCase(_gen_test(test_name)))

runner = unittest.TextTestRunner(verbosity=(2 if verbose else 1))
runner = unittest.TextTestRunner(verbosity=(2 if log_level < log.WARNING else 1))
result = runner.run(suite)

return result.wasSuccessful()


def run_regression_tests(tests: list[str], backend: Literal["pysim", "cocotb"], traces: bool, verbose: bool) -> bool:
def run_regression_tests(
tests: list[str], backend: Literal["pysim", "cocotb"], traces: bool, log_level: log.LogLevel
) -> bool:
if backend == "cocotb":
return run_regressions_with_cocotb(tests, traces)
elif backend == "pysim":
return run_regressions_with_pysim(tests, traces, verbose)
return run_regressions_with_pysim(tests, traces, log_level)
return False


Expand All @@ -111,7 +114,7 @@ def main():
parser.add_argument("-l", "--list", action="store_true", help="List all tests")
parser.add_argument("-t", "--trace", action="store_true", help="Dump waveforms")
parser.add_argument("-p", "--profile", action="store_true", help="Write execution profiles")
parser.add_argument("-v", "--verbose", action="store_true", help="Verbose output")
parser.add_argument("-v", "--verbosity", default="warn", action="store", help="Verbosity of the output logs")
parser.add_argument("-a", "--all", action="store_true", default=False, help="Run all tests")
parser.add_argument(
"-b", "--backend", default="cocotb", choices=["cocotb", "pysim"], help="Simulation backend for regression tests"
Expand All @@ -131,6 +134,9 @@ def main():
print(REGRESSION_TESTS_PREFIX + name)
return

log_level = log.parse_level(args.verbosity)
os.environ["__TRANSACTRON_LOG_LEVEL"] = str(log_level)

if args.trace:
os.environ["__COREBLOCKS_DUMP_TRACES"] = "1"

Expand All @@ -148,13 +154,13 @@ def main():

unit_tests_success = True
if unit_tests:
runner = unittest.TextTestRunner(verbosity=(2 if args.verbose else 1))
runner = unittest.TextTestRunner(verbosity=(2 if log_level < log.WARNING else 1))
result = runner.run(unittest.TestSuite(list(unit_tests.values())[: args.count]))
unit_tests_success = result.wasSuccessful()

regression_tests_success = True
if regression_tests:
regression_tests_success = run_regression_tests(regression_tests, args.backend, args.trace, args.verbose)
regression_tests_success = run_regression_tests(regression_tests, args.backend, args.trace, log_level)

sys.exit(not (unit_tests_success and regression_tests_success))

Expand Down
5 changes: 4 additions & 1 deletion test/regression/cocotb.py
Original file line number Diff line number Diff line change
Expand Up @@ -147,6 +147,9 @@ def __init__(self, dut):

self.gen_info = GenerationInfo.decode(gen_info_path)

self.log_level = int(os.environ["__TRANSACTRON_LOG_LEVEL"])
cocotb.logging.getLogger().setLevel(self.log_level)

def get_cocotb_handle(self, path_components: list[str]) -> ModifiableObject:
obj = self.dut
# Skip the first component, as it is already referenced in "self.dut"
Expand Down Expand Up @@ -184,7 +187,7 @@ async def run(self, mem_model: CoreMemoryModel, timeout_cycles: int = 5000) -> S
for reg_name, reg_loc in metric_loc.regs.items():
value = int(self.get_cocotb_handle(reg_loc))
result.metric_values[metric_name][reg_name] = value
cocotb.logging.debug(f"Metric {metric_name}/{reg_name}={value}")
cocotb.logging.info(f"Metric {metric_name}/{reg_name}={value}")

return result

Expand Down
2 changes: 0 additions & 2 deletions test/regression/cocotb/benchmark_entrypoint.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,4 @@
import sys
import cocotb
from pathlib import Path

top_dir = Path(__file__).parent.parent.parent.parent
Expand All @@ -10,7 +9,6 @@


async def _do_benchmark(dut, benchmark_name):
cocotb.logging.getLogger().setLevel(cocotb.logging.INFO)
await run_benchmark(CocotbSimulation(dut), benchmark_name)


Expand Down
2 changes: 0 additions & 2 deletions test/regression/cocotb/signature_entrypoint.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,8 +12,6 @@

@cocotb.test()
async def do_test(dut):
cocotb.logging.getLogger().setLevel(cocotb.logging.INFO)

test_name = os.environ["TESTNAME"]
if test_name is None:
raise RuntimeError("No ELF file provided")
Expand Down
2 changes: 0 additions & 2 deletions test/regression/cocotb/test_entrypoint.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,4 @@
import sys
import cocotb
from pathlib import Path

top_dir = Path(__file__).parent.parent.parent.parent
Expand All @@ -10,7 +9,6 @@


async def do_test(dut, test_name):
cocotb.logging.getLogger().setLevel(cocotb.logging.INFO)
await run_test(CocotbSimulation(dut), test_name)


Expand Down
24 changes: 12 additions & 12 deletions test/regression/pysim.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
import re
import os

from amaranth.sim import Passive, Settle
from amaranth.utils import log2_int
Expand All @@ -8,6 +9,8 @@
from .common import SimulationBackend, SimulationExecutionResult

from transactron.testing import PysimSimulator, TestGen
from transactron.testing.log import *
from transactron.lib import log
from transactron.utils.dependencies import DependencyContext, DependencyManager
from transactron.lib.metrics import HardwareMetricsManager
from ..peripherals.test_wishbone import WishboneInterfaceWrapper
Expand All @@ -19,13 +22,14 @@


class PySimulation(SimulationBackend):
def __init__(self, verbose: bool, traces_file: Optional[str] = None):
def __init__(self, traces_file: Optional[str] = None):
self.gp = GenParams(full_core_config)
self.running = False
self.cycle_cnt = 0
self.verbose = verbose
self.traces_file = traces_file

self.log_level = int(os.environ["__TRANSACTRON_LOG_LEVEL"])

self.metrics_manager = HardwareMetricsManager()

def _wishbone_slave(
Expand All @@ -46,17 +50,11 @@ def f():

resp_data = 0

bus_name = "instr" if is_instr_bus else "data"

if (yield wb_ctrl.wb.we):
if self.verbose:
print(f"Wishbone '{bus_name}' bus write request: addr=0x{addr:x} data={dat_w:x} sel={sel:b}")
resp = mem_model.write(
WriteRequest(addr=addr, data=dat_w, byte_count=word_width_bytes, byte_sel=sel)
)
else:
if self.verbose:
print(f"Wishbone '{bus_name}' bus read request: addr=0x{addr:x} sel={sel:b}")
resp = mem_model.read(
ReadRequest(
addr=addr,
Expand All @@ -67,9 +65,6 @@ def f():
)
resp_data = resp.data

if self.verbose:
print(f"Wishbone '{bus_name}' bus read response: data=0x{resp.data:x}")

ack = err = rty = 0
match resp.status:
case ReplyStatus.OK:
Expand Down Expand Up @@ -142,6 +137,11 @@ async def run(self, mem_model: CoreMemoryModel, timeout_cycles: int = 5000) -> S
sim.add_sync_process(self._wishbone_slave(mem_model, wb_instr_ctrl, is_instr_bus=True))
sim.add_sync_process(self._wishbone_slave(mem_model, wb_data_ctrl, is_instr_bus=False))

def on_error():
raise RuntimeError("Simulation finished due to an error")

sim.add_sync_process(make_log_process(self.log_level, on_error))

metric_values: dict[str, dict[str, int]] = {}

def on_sim_finish():
Expand All @@ -157,7 +157,7 @@ def on_sim_finish():
sim.add_sync_process(self._waiter(on_finish=on_sim_finish))
success = sim.run()

if self.verbose:
if self.log_level <= log.INFO:
self.pretty_dump_metrics(metric_values)

return SimulationExecutionResult(success, metric_values)
Expand Down
Loading

0 comments on commit 0514051

Please sign in to comment.