Skip to content

Commit

Permalink
Add pytest - step 2 (#554)
Browse files Browse the repository at this point in the history
  • Loading branch information
lekcyjna123 authored Mar 6, 2024
1 parent 903ab2e commit 01d54f4
Show file tree
Hide file tree
Showing 11 changed files with 312 additions and 204 deletions.
3 changes: 0 additions & 3 deletions .github/workflows/main.yml
Original file line number Diff line number Diff line change
Expand Up @@ -220,9 +220,6 @@ jobs:
. venv/bin/activate
scripts/run_tests.py -a regression
- name: Check for test failure
run: ./scripts/check_test_results.py

unit-test:
name: Run unit tests
runs-on: ubuntu-latest
Expand Down
3 changes: 3 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,9 @@ venv.bak/
# Tests outputs
test/__traces__
test/__profiles__/*.json
pytestdebug.log
_coreblocks_regression.lock
_coreblocks_regression.counter

# cocotb build
/test/regression/cocotb/build
Expand Down
8 changes: 8 additions & 0 deletions pytest.ini
Original file line number Diff line number Diff line change
@@ -0,0 +1,8 @@
[pytest]
minversion = 7.2.2
testpaths =
tests
norecursedirs = '*.egg', '.*', 'build', 'dist', 'venv', '__traces__', '__pycache__'
filterwarnings =
ignore:cannot collect test class 'TestbenchIO':pytest.PytestCollectionWarning
ignore:No files were found in testpaths:pytest.PytestConfigWarning:
6 changes: 4 additions & 2 deletions requirements-dev.txt
Original file line number Diff line number Diff line change
Expand Up @@ -13,8 +13,10 @@ pyright==1.1.332
Sphinx==5.1.1
sphinx-rtd-theme==1.0.0
sphinxcontrib-mermaid==0.8.1
cocotb==1.7.2
cocotb==1.8.1
cocotb-bus==0.2.1
pytest==7.2.2
pytest==8.0.0
pytest-xdist==3.5.0
pyelftools==0.29
tabulate==0.9.0
filelock==3.13.1
22 changes: 0 additions & 22 deletions scripts/check_test_results.py

This file was deleted.

149 changes: 25 additions & 124 deletions scripts/run_tests.py
Original file line number Diff line number Diff line change
@@ -1,109 +1,15 @@
#!/usr/bin/env python3

import unittest
import asyncio
import pytest
import argparse
import re
import sys
import os
import subprocess
from typing import Literal
from pathlib import Path

topdir = Path(__file__).parent.parent
sys.path.insert(0, str(topdir))

import test.regression.test # noqa: E402
from test.regression.pysim import PySimulation # noqa: E402

REGRESSION_TESTS_PREFIX = "test.regression."


def cd_to_topdir():
os.chdir(str(topdir))


def load_unit_tests():
suite = unittest.TestLoader().discover(".")

tests = {}

def flatten(suite):
if hasattr(suite, "__iter__"):
for x in suite:
flatten(x)
else:
tests[suite.id()] = suite

flatten(suite)

return tests


def load_regression_tests() -> list[str]:
all_tests = test.regression.test.get_all_test_names()
if len(all_tests) == 0:
res = subprocess.run(["make", "-C", "test/external/riscv-tests"])
if res.returncode != 0:
print("Couldn't build regression tests")
sys.exit(1)

exclude = {"rv32ui-ma_data", "rv32ui-fence_i"}

return list(all_tests - exclude)


def run_regressions_with_cocotb(tests: list[str], traces: bool) -> bool:
cpu_count = len(os.sched_getaffinity(0))
arglist = ["make", "-C", "test/regression/cocotb", "-f", "test.Makefile", f"-j{cpu_count}"]

test_cases = ",".join(tests)
arglist += [f"TESTCASE={test_cases}"]

verilog_code = topdir.joinpath("core.v")
gen_info_path = f"{verilog_code}.json"

arglist += [f"VERILOG_SOURCES={verilog_code}"]
arglist += [f"_COREBLOCKS_GEN_INFO={gen_info_path}"]

if traces:
arglist += ["TRACES=1"]

res = subprocess.run(arglist)

return res.returncode == 0


def run_regressions_with_pysim(tests: list[str], traces: bool, verbose: bool) -> bool:
suite = unittest.TestSuite()

def _gen_test(test_name: str):
def test_fn():
traces_file = None
if traces:
traces_file = REGRESSION_TESTS_PREFIX + test_name
asyncio.run(test.regression.test.run_test(PySimulation(verbose, traces_file=traces_file), test_name))

test_fn.__name__ = test_name
test_fn.__qualname__ = test_name

return test_fn

for test_name in tests:
suite.addTest(unittest.FunctionTestCase(_gen_test(test_name)))

runner = unittest.TextTestRunner(verbosity=(2 if verbose else 1))
result = runner.run(suite)

return result.wasSuccessful()


def run_regression_tests(tests: list[str], backend: Literal["pysim", "cocotb"], traces: bool, verbose: bool) -> bool:
if backend == "cocotb":
return run_regressions_with_cocotb(tests, traces)
elif backend == "pysim":
return run_regressions_with_pysim(tests, traces, verbose)
return False
os.chdir(topdir)


def main():
Expand All @@ -117,46 +23,41 @@ def main():
"-b", "--backend", default="cocotb", choices=["cocotb", "pysim"], help="Simulation backend for regression tests"
)
parser.add_argument("-c", "--count", type=int, help="Start `c` first tests which match regexp")
parser.add_argument(
"-j", "--jobs", type=int, default=len(os.sched_getaffinity(0)), help="Start `j` jobs in parallel. Default: all"
)
parser.add_argument("test_name", nargs="?")

args = parser.parse_args()

unit_tests = load_unit_tests()
regression_tests = load_regression_tests() if args.all else []

if args.list:
for name in list(unit_tests.keys()):
print(name)
for name in regression_tests:
print(REGRESSION_TESTS_PREFIX + name)
return
pytest_arguments = ["--max-worker-restart=1"]

if args.trace:
os.environ["__COREBLOCKS_DUMP_TRACES"] = "1"
pytest_arguments.append("--coreblocks-traces")

if args.profile:
os.environ["__TRANSACTRON_PROFILE"] = "1"

if args.test_name:
pattern = re.compile(args.test_name)
unit_tests = {name: test for name, test in unit_tests.items() if pattern.search(name)}
regression_tests = [test for test in regression_tests if pattern.search(REGRESSION_TESTS_PREFIX + test)]

if not unit_tests and not regression_tests:
print(f"Could not find test matching '{args.test_name}'")
sys.exit(1)

unit_tests_success = True
if unit_tests:
runner = unittest.TextTestRunner(verbosity=(2 if args.verbose else 1))
result = runner.run(unittest.TestSuite(list(unit_tests.values())[: args.count]))
unit_tests_success = result.wasSuccessful()

regression_tests_success = True
if regression_tests:
regression_tests_success = run_regression_tests(regression_tests, args.backend, args.trace, args.verbose)

sys.exit(not (unit_tests_success and regression_tests_success))
pytest_arguments += [f"--coreblocks-test-name={args.test_name}"]
if args.count:
pytest_arguments += ["--coreblocks-test-count", str(args.count)]
if args.list:
pytest_arguments.append("--coreblocks-list")
if args.jobs and not args.list:
# To list tests we can not use xdist, because it doesn't support forwarding of stdout from workers.
pytest_arguments += ["-n", str(args.jobs)]
if args.all:
pytest_arguments.append("--coreblocks-regression")
if args.verbose:
pytest_arguments.append("--verbose")
if args.backend:
pytest_arguments += [f"--coreblocks-backend={args.backend}"]

ret = pytest.main(pytest_arguments, [])

exit(ret)


if __name__ == "__main__":
Expand Down
92 changes: 92 additions & 0 deletions test/conftest.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,92 @@
import re
from typing import Optional
import pytest


def pytest_addoption(parser: pytest.Parser):
group = parser.getgroup("coreblocks")
group.addoption("--coreblocks-regression", action="store_true", help="Run also regression tests.")
group.addoption(
"--coreblocks-backend",
default="cocotb",
choices=["cocotb", "pysim"],
help="Simulation backend for regression tests",
)
group.addoption("--coreblocks-traces", action="store_true", help="Generate traces from regression tests")
group.addoption("--coreblocks-list", action="store_true", help="List all tests in flatten format.")
group.addoption(
"--coreblocks-test-name",
action="store",
type=str,
help="Name or regexp in flatten format matching the tests to run.",
)
group.addoption(
"--coreblocks-test-count",
action="store",
type=int,
help="Number of tests to start. If less than number of all selected tests, then starts only subset of them.",
)


def generate_unittestname(item: pytest.Item) -> str:
full_name = ".".join(map(lambda s: s[:-3] if s[-3:] == ".py" else s, map(lambda x: x.name, item.listchain())))
return full_name


def generate_test_cases_list(session: pytest.Session) -> list[str]:
tests_list = []
for item in session.items:
full_name = generate_unittestname(item)
tests_list.append(full_name)
return tests_list


def pytest_collection_finish(session: pytest.Session):
if session.config.getoption("coreblocks_list"):
full_names = generate_test_cases_list(session)
for i in full_names:
print(i)


@pytest.hookimpl(tryfirst=True)
def pytest_runtestloop(session: pytest.Session) -> Optional[bool]:
if session.config.getoption("coreblocks_list"):
return True
return None


def deselect_based_on_flatten_name(items: list[pytest.Item], config: pytest.Config) -> None:
coreblocks_test_name = config.getoption("coreblocks_test_name")
if not isinstance(coreblocks_test_name, str):
return

deselected = []
remaining = []
regexp = re.compile(coreblocks_test_name)
for item in items:
full_name = generate_unittestname(item)
match = regexp.search(full_name)
if match is None:
deselected.append(item)
else:
remaining.append(item)
if deselected:
config.hook.pytest_deselected(items=deselected)
items[:] = remaining


def deselect_based_on_count(items: list[pytest.Item], config: pytest.Config) -> None:
coreblocks_test_count = config.getoption("coreblocks_test_count")
if not isinstance(coreblocks_test_count, int):
return

deselected = items[coreblocks_test_count:]
remaining = items[:coreblocks_test_count]
if deselected:
config.hook.pytest_deselected(items=deselected)
items[:] = remaining


def pytest_collection_modifyitems(items: list[pytest.Item], config: pytest.Config) -> None:
deselect_based_on_flatten_name(items, config)
deselect_based_on_count(items, config)
10 changes: 8 additions & 2 deletions test/regression/cocotb/test_entrypoint.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,12 +6,18 @@
sys.path.insert(0, str(top_dir))

from test.regression.cocotb import CocotbSimulation, generate_tests # noqa: E402
from test.regression.test import run_test, get_all_test_names # noqa: E402
from test.regression.test_regression import run_test # noqa: E402
from test.regression.conftest import get_all_test_names # noqa: E402

# used to build the Verilator model without starting tests
empty_testcase_name = "SKIP"


async def do_test(dut, test_name):
cocotb.logging.getLogger().setLevel(cocotb.logging.INFO)
if test_name == empty_testcase_name:
return
await run_test(CocotbSimulation(dut), test_name)


generate_tests(do_test, list(get_all_test_names()))
generate_tests(do_test, list(get_all_test_names()) + [empty_testcase_name])
Loading

0 comments on commit 01d54f4

Please sign in to comment.