diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index e62f91115..831efa175 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -220,9 +220,6 @@ jobs: . venv/bin/activate scripts/run_tests.py -a regression - - name: Check for test failure - run: ./scripts/check_test_results.py - unit-test: name: Run unit tests runs-on: ubuntu-latest diff --git a/.gitignore b/.gitignore index 8a817bc60..c40fe28de 100644 --- a/.gitignore +++ b/.gitignore @@ -25,6 +25,9 @@ venv.bak/ # Tests outputs test/__traces__ test/__profiles__/*.json +pytestdebug.log +_coreblocks_regression.lock +_coreblocks_regression.counter # cocotb build /test/regression/cocotb/build diff --git a/pytest.ini b/pytest.ini new file mode 100644 index 000000000..142b00abe --- /dev/null +++ b/pytest.ini @@ -0,0 +1,8 @@ +[pytest] +minversion = 7.2.2 +testpaths = + tests +norecursedirs = '*.egg', '.*', 'build', 'dist', 'venv', '__traces__', '__pycache__' +filterwarnings = + ignore:cannot collect test class 'TestbenchIO':pytest.PytestCollectionWarning + ignore:No files were found in testpaths:pytest.PytestConfigWarning: diff --git a/requirements-dev.txt b/requirements-dev.txt index 0111c536e..1d9530305 100644 --- a/requirements-dev.txt +++ b/requirements-dev.txt @@ -13,8 +13,10 @@ pyright==1.1.332 Sphinx==5.1.1 sphinx-rtd-theme==1.0.0 sphinxcontrib-mermaid==0.8.1 -cocotb==1.7.2 +cocotb==1.8.1 cocotb-bus==0.2.1 -pytest==7.2.2 +pytest==8.0.0 +pytest-xdist==3.5.0 pyelftools==0.29 tabulate==0.9.0 +filelock==3.13.1 diff --git a/scripts/check_test_results.py b/scripts/check_test_results.py deleted file mode 100755 index c10af9bc2..000000000 --- a/scripts/check_test_results.py +++ /dev/null @@ -1,22 +0,0 @@ -#!/usr/bin/env python3 - -import sys -import os -import pathlib -import xml.etree.ElementTree as eT - -FAILURE_TAG = "failure" -TOP_DIR = pathlib.Path(__file__).parent.parent -TEST_RESULTS_FILE = TOP_DIR.joinpath("test/regression/cocotb/results.xml") - -if not os.path.exists(TEST_RESULTS_FILE): - print("File not found: ", TEST_RESULTS_FILE) - sys.exit(1) - -tree = eT.parse(TEST_RESULTS_FILE) - -if len(list(tree.iter(FAILURE_TAG))) > 0: - print("Some regression tests failed") - sys.exit(1) - -print("All regression tests pass") diff --git a/scripts/run_tests.py b/scripts/run_tests.py index 9923dd5e0..d75cb1e53 100755 --- a/scripts/run_tests.py +++ b/scripts/run_tests.py @@ -1,109 +1,15 @@ #!/usr/bin/env python3 -import unittest -import asyncio +import pytest import argparse -import re -import sys import os -import subprocess -from typing import Literal from pathlib import Path topdir = Path(__file__).parent.parent -sys.path.insert(0, str(topdir)) - -import test.regression.test # noqa: E402 -from test.regression.pysim import PySimulation # noqa: E402 - -REGRESSION_TESTS_PREFIX = "test.regression." def cd_to_topdir(): - os.chdir(str(topdir)) - - -def load_unit_tests(): - suite = unittest.TestLoader().discover(".") - - tests = {} - - def flatten(suite): - if hasattr(suite, "__iter__"): - for x in suite: - flatten(x) - else: - tests[suite.id()] = suite - - flatten(suite) - - return tests - - -def load_regression_tests() -> list[str]: - all_tests = test.regression.test.get_all_test_names() - if len(all_tests) == 0: - res = subprocess.run(["make", "-C", "test/external/riscv-tests"]) - if res.returncode != 0: - print("Couldn't build regression tests") - sys.exit(1) - - exclude = {"rv32ui-ma_data", "rv32ui-fence_i"} - - return list(all_tests - exclude) - - -def run_regressions_with_cocotb(tests: list[str], traces: bool) -> bool: - cpu_count = len(os.sched_getaffinity(0)) - arglist = ["make", "-C", "test/regression/cocotb", "-f", "test.Makefile", f"-j{cpu_count}"] - - test_cases = ",".join(tests) - arglist += [f"TESTCASE={test_cases}"] - - verilog_code = topdir.joinpath("core.v") - gen_info_path = f"{verilog_code}.json" - - arglist += [f"VERILOG_SOURCES={verilog_code}"] - arglist += [f"_COREBLOCKS_GEN_INFO={gen_info_path}"] - - if traces: - arglist += ["TRACES=1"] - - res = subprocess.run(arglist) - - return res.returncode == 0 - - -def run_regressions_with_pysim(tests: list[str], traces: bool, verbose: bool) -> bool: - suite = unittest.TestSuite() - - def _gen_test(test_name: str): - def test_fn(): - traces_file = None - if traces: - traces_file = REGRESSION_TESTS_PREFIX + test_name - asyncio.run(test.regression.test.run_test(PySimulation(verbose, traces_file=traces_file), test_name)) - - test_fn.__name__ = test_name - test_fn.__qualname__ = test_name - - return test_fn - - for test_name in tests: - suite.addTest(unittest.FunctionTestCase(_gen_test(test_name))) - - runner = unittest.TextTestRunner(verbosity=(2 if verbose else 1)) - result = runner.run(suite) - - return result.wasSuccessful() - - -def run_regression_tests(tests: list[str], backend: Literal["pysim", "cocotb"], traces: bool, verbose: bool) -> bool: - if backend == "cocotb": - return run_regressions_with_cocotb(tests, traces) - elif backend == "pysim": - return run_regressions_with_pysim(tests, traces, verbose) - return False + os.chdir(topdir) def main(): @@ -117,46 +23,41 @@ def main(): "-b", "--backend", default="cocotb", choices=["cocotb", "pysim"], help="Simulation backend for regression tests" ) parser.add_argument("-c", "--count", type=int, help="Start `c` first tests which match regexp") + parser.add_argument( + "-j", "--jobs", type=int, default=len(os.sched_getaffinity(0)), help="Start `j` jobs in parallel. Default: all" + ) parser.add_argument("test_name", nargs="?") args = parser.parse_args() - unit_tests = load_unit_tests() - regression_tests = load_regression_tests() if args.all else [] - - if args.list: - for name in list(unit_tests.keys()): - print(name) - for name in regression_tests: - print(REGRESSION_TESTS_PREFIX + name) - return + pytest_arguments = ["--max-worker-restart=1"] if args.trace: os.environ["__COREBLOCKS_DUMP_TRACES"] = "1" + pytest_arguments.append("--coreblocks-traces") if args.profile: os.environ["__TRANSACTRON_PROFILE"] = "1" if args.test_name: - pattern = re.compile(args.test_name) - unit_tests = {name: test for name, test in unit_tests.items() if pattern.search(name)} - regression_tests = [test for test in regression_tests if pattern.search(REGRESSION_TESTS_PREFIX + test)] - - if not unit_tests and not regression_tests: - print(f"Could not find test matching '{args.test_name}'") - sys.exit(1) - - unit_tests_success = True - if unit_tests: - runner = unittest.TextTestRunner(verbosity=(2 if args.verbose else 1)) - result = runner.run(unittest.TestSuite(list(unit_tests.values())[: args.count])) - unit_tests_success = result.wasSuccessful() - - regression_tests_success = True - if regression_tests: - regression_tests_success = run_regression_tests(regression_tests, args.backend, args.trace, args.verbose) - - sys.exit(not (unit_tests_success and regression_tests_success)) + pytest_arguments += [f"--coreblocks-test-name={args.test_name}"] + if args.count: + pytest_arguments += ["--coreblocks-test-count", str(args.count)] + if args.list: + pytest_arguments.append("--coreblocks-list") + if args.jobs and not args.list: + # To list tests we can not use xdist, because it doesn't support forwarding of stdout from workers. + pytest_arguments += ["-n", str(args.jobs)] + if args.all: + pytest_arguments.append("--coreblocks-regression") + if args.verbose: + pytest_arguments.append("--verbose") + if args.backend: + pytest_arguments += [f"--coreblocks-backend={args.backend}"] + + ret = pytest.main(pytest_arguments, []) + + exit(ret) if __name__ == "__main__": diff --git a/test/conftest.py b/test/conftest.py new file mode 100644 index 000000000..1095fba03 --- /dev/null +++ b/test/conftest.py @@ -0,0 +1,92 @@ +import re +from typing import Optional +import pytest + + +def pytest_addoption(parser: pytest.Parser): + group = parser.getgroup("coreblocks") + group.addoption("--coreblocks-regression", action="store_true", help="Run also regression tests.") + group.addoption( + "--coreblocks-backend", + default="cocotb", + choices=["cocotb", "pysim"], + help="Simulation backend for regression tests", + ) + group.addoption("--coreblocks-traces", action="store_true", help="Generate traces from regression tests") + group.addoption("--coreblocks-list", action="store_true", help="List all tests in flatten format.") + group.addoption( + "--coreblocks-test-name", + action="store", + type=str, + help="Name or regexp in flatten format matching the tests to run.", + ) + group.addoption( + "--coreblocks-test-count", + action="store", + type=int, + help="Number of tests to start. If less than number of all selected tests, then starts only subset of them.", + ) + + +def generate_unittestname(item: pytest.Item) -> str: + full_name = ".".join(map(lambda s: s[:-3] if s[-3:] == ".py" else s, map(lambda x: x.name, item.listchain()))) + return full_name + + +def generate_test_cases_list(session: pytest.Session) -> list[str]: + tests_list = [] + for item in session.items: + full_name = generate_unittestname(item) + tests_list.append(full_name) + return tests_list + + +def pytest_collection_finish(session: pytest.Session): + if session.config.getoption("coreblocks_list"): + full_names = generate_test_cases_list(session) + for i in full_names: + print(i) + + +@pytest.hookimpl(tryfirst=True) +def pytest_runtestloop(session: pytest.Session) -> Optional[bool]: + if session.config.getoption("coreblocks_list"): + return True + return None + + +def deselect_based_on_flatten_name(items: list[pytest.Item], config: pytest.Config) -> None: + coreblocks_test_name = config.getoption("coreblocks_test_name") + if not isinstance(coreblocks_test_name, str): + return + + deselected = [] + remaining = [] + regexp = re.compile(coreblocks_test_name) + for item in items: + full_name = generate_unittestname(item) + match = regexp.search(full_name) + if match is None: + deselected.append(item) + else: + remaining.append(item) + if deselected: + config.hook.pytest_deselected(items=deselected) + items[:] = remaining + + +def deselect_based_on_count(items: list[pytest.Item], config: pytest.Config) -> None: + coreblocks_test_count = config.getoption("coreblocks_test_count") + if not isinstance(coreblocks_test_count, int): + return + + deselected = items[coreblocks_test_count:] + remaining = items[:coreblocks_test_count] + if deselected: + config.hook.pytest_deselected(items=deselected) + items[:] = remaining + + +def pytest_collection_modifyitems(items: list[pytest.Item], config: pytest.Config) -> None: + deselect_based_on_flatten_name(items, config) + deselect_based_on_count(items, config) diff --git a/test/regression/cocotb/test_entrypoint.py b/test/regression/cocotb/test_entrypoint.py index d5e8fb7a9..36879b2a6 100644 --- a/test/regression/cocotb/test_entrypoint.py +++ b/test/regression/cocotb/test_entrypoint.py @@ -6,12 +6,18 @@ sys.path.insert(0, str(top_dir)) from test.regression.cocotb import CocotbSimulation, generate_tests # noqa: E402 -from test.regression.test import run_test, get_all_test_names # noqa: E402 +from test.regression.test_regression import run_test # noqa: E402 +from test.regression.conftest import get_all_test_names # noqa: E402 + +# used to build the Verilator model without starting tests +empty_testcase_name = "SKIP" async def do_test(dut, test_name): cocotb.logging.getLogger().setLevel(cocotb.logging.INFO) + if test_name == empty_testcase_name: + return await run_test(CocotbSimulation(dut), test_name) -generate_tests(do_test, list(get_all_test_names())) +generate_tests(do_test, list(get_all_test_names()) + [empty_testcase_name]) diff --git a/test/regression/conftest.py b/test/regression/conftest.py new file mode 100644 index 000000000..54648bbb5 --- /dev/null +++ b/test/regression/conftest.py @@ -0,0 +1,45 @@ +from glob import glob +from pathlib import Path +import pytest +import subprocess +import sys + +test_dir = Path(__file__).parent.parent +riscv_tests_dir = test_dir.joinpath("external/riscv-tests") + + +def get_all_test_names(): + return sorted([name[5:] for name in glob("test-*", root_dir=riscv_tests_dir)]) + + +def load_regression_tests() -> list[str]: + all_tests = set(get_all_test_names()) + if len(all_tests) == 0: + res = subprocess.run(["make", "-C", "test/external/riscv-tests"]) + if res.returncode != 0: + print("Couldn't build regression tests") + sys.exit(1) + all_tests = set(get_all_test_names()) + + exclude = {"rv32ui-ma_data", "rv32ui-fence_i"} + + return sorted(list(all_tests - exclude)) + + +def pytest_generate_tests(metafunc: pytest.Metafunc): + if not metafunc.config.getoption("coreblocks_regression"): + # Add regression to skiped tests + metafunc.parametrize(["test_name", "backend", "traces", "verbose"], []) + return + + all_tests = ( + load_regression_tests() + ) # The list has to be always in the same order (e.g. sorted) to allow for parallel testing + traces = metafunc.config.getoption("coreblocks_traces") + backend = metafunc.config.getoption("coreblocks_backend") + verbose = bool(metafunc.config.getoption("verbose")) + if {"test_name", "backend", "traces", "verbose"}.issubset(metafunc.fixturenames): + metafunc.parametrize( + ["test_name", "backend", "traces", "verbose"], + [(test_name, backend, traces, verbose) for test_name in all_tests], + ) diff --git a/test/regression/test.py b/test/regression/test.py deleted file mode 100644 index daf0ff3b7..000000000 --- a/test/regression/test.py +++ /dev/null @@ -1,51 +0,0 @@ -from glob import glob -from pathlib import Path - -from .memory import * -from .common import SimulationBackend - -test_dir = Path(__file__).parent.parent -riscv_tests_dir = test_dir.joinpath("external/riscv-tests") - -# disable write protection for specific tests with writes to .text section -exclude_write_protection = ["rv32uc-rvc"] - - -class MMIO(MemorySegment): - def __init__(self, on_finish: Callable[[], None]): - super().__init__(range(0x80000000, 0x80000000 + 4), SegmentFlags.READ | SegmentFlags.WRITE) - self.on_finish = on_finish - self.failed_test = 0 - - def read(self, req: ReadRequest) -> ReadReply: - return ReadReply() - - def write(self, req: WriteRequest) -> WriteReply: - self.failed_test = req.data - self.on_finish() - return WriteReply() - - -def get_all_test_names(): - return {name[5:] for name in glob("test-*", root_dir=riscv_tests_dir)} - - -async def run_test(sim_backend: SimulationBackend, test_name: str): - mmio = MMIO(lambda: sim_backend.stop()) - - mem_segments: list[MemorySegment] = [] - mem_segments += load_segments_from_elf( - str(riscv_tests_dir.joinpath("test-" + test_name)), - disable_write_protection=test_name in exclude_write_protection, - ) - mem_segments.append(mmio) - - mem_model = CoreMemoryModel(mem_segments) - - result = await sim_backend.run(mem_model, timeout_cycles=5000) - - if not result.success: - raise RuntimeError("Simulation timed out") - - if mmio.failed_test: - raise RuntimeError("Failing test: %d" % mmio.failed_test) diff --git a/test/regression/test_regression.py b/test/regression/test_regression.py new file mode 100644 index 000000000..220b6b2d5 --- /dev/null +++ b/test/regression/test_regression.py @@ -0,0 +1,127 @@ +from .memory import * +from .common import SimulationBackend +from .conftest import riscv_tests_dir +from test.regression.pysim import PySimulation +import xml.etree.ElementTree as eT +import asyncio +from typing import Literal +import os +import pytest +import subprocess +import json +import tempfile +from filelock import FileLock + +REGRESSION_TESTS_PREFIX = "test.regression." + + +# disable write protection for specific tests with writes to .text section +exclude_write_protection = ["rv32uc-rvc"] + + +class MMIO(MemorySegment): + def __init__(self, on_finish: Callable[[], None]): + super().__init__(range(0x80000000, 0x80000000 + 4), SegmentFlags.READ | SegmentFlags.WRITE) + self.on_finish = on_finish + self.failed_test = 0 + + def read(self, req: ReadRequest) -> ReadReply: + return ReadReply() + + def write(self, req: WriteRequest) -> WriteReply: + self.failed_test = req.data + self.on_finish() + return WriteReply() + + +async def run_test(sim_backend: SimulationBackend, test_name: str): + mmio = MMIO(lambda: sim_backend.stop()) + + mem_segments: list[MemorySegment] = [] + mem_segments += load_segments_from_elf( + str(riscv_tests_dir.joinpath("test-" + test_name)), + disable_write_protection=test_name in exclude_write_protection, + ) + mem_segments.append(mmio) + + mem_model = CoreMemoryModel(mem_segments) + + result = await sim_backend.run(mem_model, timeout_cycles=5000) + + if not result.success: + raise RuntimeError("Simulation timed out") + + if mmio.failed_test: + raise RuntimeError("Failing test: %d" % mmio.failed_test) + + +def regression_body_with_cocotb(test_name: str, traces: bool): + arglist = ["make", "-C", "test/regression/cocotb", "-f", "test.Makefile"] + arglist += [f"TESTCASE={test_name}"] + + verilog_code = os.path.join(os.getcwd(), "core.v") + gen_info_path = f"{verilog_code}.json" + arglist += [f"_COREBLOCKS_GEN_INFO={gen_info_path}"] + arglist += [f"VERILOG_SOURCES={verilog_code}"] + tmp_result_file = tempfile.NamedTemporaryFile("r") + arglist += [f"COCOTB_RESULTS_FILE={tmp_result_file.name}"] + + if traces: + arglist += ["TRACES=1"] + + res = subprocess.run(arglist) + + assert res.returncode == 0 + + tree = eT.parse(tmp_result_file.name) + assert len(list(tree.iter("failure"))) == 0 + + +def regression_body_with_pysim(test_name: str, traces: bool, verbose: bool): + traces_file = None + if traces: + traces_file = REGRESSION_TESTS_PREFIX + test_name + asyncio.run(run_test(PySimulation(verbose, traces_file=traces_file), test_name)) + + +@pytest.fixture(scope="session") +def verilate_model(worker_id, request: pytest.FixtureRequest): + """ + Fixture to prevent races on verilating the coreblocks model. It is run only in + distributed, cocotb, mode. It executes a 'SKIP' regression test which verilates the model. + """ + if request.session.config.getoption("coreblocks_backend") != "cocotb" or worker_id == "master": + return + + lock_path = "_coreblocks_regression.lock" + counter_path = "_coreblocks_regression.counter" + with FileLock(lock_path): + regression_body_with_cocotb("SKIP", False) + if os.path.exists(counter_path): + with open(counter_path, "r") as counter_file: + c = json.load(counter_file) + else: + c = 0 + with open(counter_path, "w") as counter_file: + json.dump(c + 1, counter_file) + yield + # Session teardown + deferred_remove = False + with FileLock(lock_path): + with open(counter_path, "r") as counter_file: + c = json.load(counter_file) + if c == 1: + deferred_remove = True + else: + with open(counter_path, "w") as counter_file: + json.dump(c - 1, counter_file) + if deferred_remove: + os.remove(lock_path) + os.remove(counter_path) + + +def test_entrypoint(test_name: str, backend: Literal["pysim", "cocotb"], traces: bool, verbose: bool, verilate_model): + if backend == "cocotb": + regression_body_with_cocotb(test_name, traces) + elif backend == "pysim": + regression_body_with_pysim(test_name, traces, verbose)