From d6dc0e1c769ed4da64625c8da9f77fe51fb3e377 Mon Sep 17 00:00:00 2001 From: Marek Materzok Date: Wed, 6 Nov 2024 13:44:20 +0100 Subject: [PATCH] Async instruction cache tests --- test/cache/test_icache.py | 462 +++++++++++++++++------------------ transactron/testing/sugar.py | 3 + 2 files changed, 222 insertions(+), 243 deletions(-) diff --git a/test/cache/test_icache.py b/test/cache/test_icache.py index a52d75f35..2d4e3679d 100644 --- a/test/cache/test_icache.py +++ b/test/cache/test_icache.py @@ -1,9 +1,10 @@ from collections import deque +from amaranth_types import AnySimulatorContext +from amaranth_types.types import TestbenchContext from parameterized import parameterized_class import random from amaranth import Elaboratable, Module -from amaranth.sim import Passive, Settle, Tick from amaranth.utils import exact_log2 from transactron.lib import AdapterTrans, Adapter @@ -15,8 +16,10 @@ from coreblocks.params.configurations import test_core_config from coreblocks.cache.refiller import SimpleCommonBusCacheRefiller -from transactron.testing import TestCaseWithSimulator, TestbenchIO, def_method_mock, RecordIntDictRet -from ..peripherals.test_wishbone import WishboneInterfaceWrapper +from transactron.testing import TestCaseWithSimulator, AsyncTestbenchIO, async_def_method_mock +from transactron.testing.functions import MethodData +from transactron.testing.sugar import MethodMock +from ..peripherals.test_wishbone import AsyncWishboneInterfaceWrapper class SimpleCommonBusCacheRefillerTestCircuit(Elaboratable): @@ -38,8 +41,8 @@ def elaborate(self, platform): self.gen_params.get(ICacheLayouts), self.cp, self.bus_master_adapter ) - self.start_refill = TestbenchIO(AdapterTrans(self.refiller.start_refill)) - self.accept_refill = TestbenchIO(AdapterTrans(self.refiller.accept_refill)) + self.start_refill = AsyncTestbenchIO(AdapterTrans(self.refiller.start_refill)) + self.accept_refill = AsyncTestbenchIO(AdapterTrans(self.refiller.accept_refill)) m.submodules.wb_master = self.wb_master m.submodules.bus_master_adapter = self.bus_master_adapter @@ -47,7 +50,7 @@ def elaborate(self, platform): m.submodules.start_refill = self.start_refill m.submodules.accept_refill = self.accept_refill - self.wb_ctrl = WishboneInterfaceWrapper(self.wb_master.wb_master) + self.wb_ctrl = AsyncWishboneInterfaceWrapper(self.wb_master.wb_master) return m @@ -98,35 +101,29 @@ def setup_method(self) -> None: self.bad_addresses.add(bad_addr) self.bad_fetch_blocks.add(bad_addr & ~(self.cp.fetch_block_bytes - 1)) - def wishbone_slave(self): - yield Passive() - + async def wishbone_slave(self, sim: TestbenchContext): while True: - yield from self.test_module.wb_ctrl.slave_wait() + adr, *_ = await self.test_module.wb_ctrl.slave_wait(sim) # Wishbone is addressing words, so we need to shift it a bit to get the real address. - addr = (yield self.test_module.wb_ctrl.wb.adr) << exact_log2(self.cp.word_width_bytes) + addr = adr << exact_log2(self.cp.word_width_bytes) - yield Tick() - while random.random() < 0.5: - yield Tick() + await self.async_random_wait_geom(sim, 0.5) err = 1 if addr in self.bad_addresses else 0 data = random.randrange(2**self.gen_params.isa.xlen) self.mem[addr] = data - yield from self.test_module.wb_ctrl.slave_respond(data, err=err) - - yield Settle() + await self.test_module.wb_ctrl.slave_respond(sim, data, err=err) - def refiller_process(self): + async def refiller_process(self, sim: AnySimulatorContext): while self.requests: req_addr = self.requests.pop() - yield from self.test_module.start_refill.call(addr=req_addr) + await self.test_module.start_refill.call(sim, addr=req_addr) for i in range(self.cp.fetch_blocks_in_line): - ret = yield from self.test_module.accept_refill.call() + ret = await self.test_module.accept_refill.call(sim) cur_addr = req_addr + i * self.cp.fetch_block_bytes @@ -149,8 +146,8 @@ def refiller_process(self): def test(self): with self.run_simulation(self.test_module) as sim: - sim.add_process(self.wishbone_slave) - sim.add_process(self.refiller_process) + sim.add_testbench(self.wishbone_slave, background=True) + sim.add_testbench(self.refiller_process) class ICacheBypassTestCircuit(Elaboratable): @@ -171,10 +168,10 @@ def elaborate(self, platform): m.submodules.bypass = self.bypass = ICacheBypass( self.gen_params.get(ICacheLayouts), self.cp, self.bus_master_adapter ) - m.submodules.issue_req = self.issue_req = TestbenchIO(AdapterTrans(self.bypass.issue_req)) - m.submodules.accept_res = self.accept_res = TestbenchIO(AdapterTrans(self.bypass.accept_res)) + m.submodules.issue_req = self.issue_req = AsyncTestbenchIO(AdapterTrans(self.bypass.issue_req)) + m.submodules.accept_res = self.accept_res = AsyncTestbenchIO(AdapterTrans(self.bypass.accept_res)) - self.wb_ctrl = WishboneInterfaceWrapper(self.wb_master.wb_master) + self.wb_ctrl = AsyncWishboneInterfaceWrapper(self.wb_master.wb_master) return m @@ -220,17 +217,14 @@ def load_or_gen_mem(self, addr: int): self.mem[addr] = random.randrange(2**self.gen_params.isa.ilen) return self.mem[addr] - def wishbone_slave(self): - yield Passive() - + async def wishbone_slave(self, sim: TestbenchContext): while True: - yield from self.m.wb_ctrl.slave_wait() + adr, *_ = await self.m.wb_ctrl.slave_wait(sim) # Wishbone is addressing words, so we need to shift it a bit to get the real address. - addr = (yield self.m.wb_ctrl.wb.adr) << exact_log2(self.cp.word_width_bytes) + addr = adr << exact_log2(self.cp.word_width_bytes) - while random.random() < 0.5: - yield Tick() + await self.async_random_wait_geom(sim, 0.5) err = 1 if addr in self.bad_addrs else 0 @@ -238,19 +232,16 @@ def wishbone_slave(self): if self.gen_params.isa.xlen == 64: data = self.load_or_gen_mem(addr + 4) << 32 | data - yield from self.m.wb_ctrl.slave_respond(data, err=err) + await self.m.wb_ctrl.slave_respond(sim, data, err=err) - yield Settle() - - def user_process(self): + async def user_process(self, sim: TestbenchContext): while self.requests: req_addr = self.requests.popleft() & ~(self.cp.fetch_block_bytes - 1) - yield from self.m.issue_req.call(addr=req_addr) + await self.m.issue_req.call(sim, addr=req_addr) - while random.random() < 0.5: - yield Tick() + await self.async_random_wait_geom(sim, 0.5) - ret = yield from self.m.accept_res.call() + ret = await self.m.accept_res.call(sim) if (req_addr & ~(self.cp.word_width_bytes - 1)) in self.bad_addrs: assert ret["error"] @@ -262,21 +253,20 @@ def user_process(self): data |= self.mem[req_addr + 4] << 32 assert ret["fetch_block"] == data - while random.random() < 0.5: - yield Tick() + await self.async_random_wait_geom(sim, 0.5) def test(self): with self.run_simulation(self.m) as sim: - sim.add_process(self.wishbone_slave) - sim.add_process(self.user_process) + sim.add_testbench(self.wishbone_slave, background=True) + sim.add_testbench(self.user_process) class MockedCacheRefiller(Elaboratable, CacheRefillerInterface): def __init__(self, gen_params: GenParams): layouts = gen_params.get(ICacheLayouts) - self.start_refill_mock = TestbenchIO(Adapter(i=layouts.start_refill)) - self.accept_refill_mock = TestbenchIO(Adapter(o=layouts.accept_refill)) + self.start_refill_mock = AsyncTestbenchIO(Adapter(i=layouts.start_refill)) + self.accept_refill_mock = AsyncTestbenchIO(Adapter(o=layouts.accept_refill)) self.start_refill = self.start_refill_mock.adapter.iface self.accept_refill = self.accept_refill_mock.adapter.iface @@ -300,9 +290,9 @@ def elaborate(self, platform): m.submodules.refiller = self.refiller = MockedCacheRefiller(self.gen_params) m.submodules.cache = self.cache = ICache(self.gen_params.get(ICacheLayouts), self.cp, self.refiller) - m.submodules.issue_req = self.issue_req = TestbenchIO(AdapterTrans(self.cache.issue_req)) - m.submodules.accept_res = self.accept_res = TestbenchIO(AdapterTrans(self.cache.accept_res)) - m.submodules.flush_cache = self.flush_cache = TestbenchIO(AdapterTrans(self.cache.flush)) + m.submodules.issue_req = self.issue_req = AsyncTestbenchIO(AdapterTrans(self.cache.issue_req)) + m.submodules.accept_res = self.accept_res = AsyncTestbenchIO(AdapterTrans(self.cache.accept_res)) + m.submodules.flush_cache = self.flush_cache = AsyncTestbenchIO(AdapterTrans(self.cache.flush)) return m @@ -328,6 +318,7 @@ def setup_method(self) -> None: self.bad_addrs = set() self.bad_cache_lines = set() self.refill_requests = deque() + self.refill_block_cnt = 0 self.issued_requests = deque() self.accept_refill_request = True @@ -349,14 +340,21 @@ def init_module(self, ways, sets) -> None: self.cp = self.gen_params.icache_params self.m = ICacheTestCircuit(self.gen_params) - @def_method_mock(lambda self: self.m.refiller.start_refill_mock, enable=lambda self: self.accept_refill_request) + @async_def_method_mock( + lambda self: self.m.refiller.start_refill_mock, enable=lambda self: self.accept_refill_request + ) def start_refill_mock(self, addr): - self.refill_requests.append(addr) - self.refill_block_cnt = 0 - self.refill_in_fly = True - self.refill_addr = addr + @MethodMock.effect + def eff(): + self.refill_requests.append(addr) + self.refill_block_cnt = 0 + self.refill_in_fly = True + self.refill_addr = addr + + def enen(self): + return self.refill_in_fly - @def_method_mock(lambda self: self.m.refiller.accept_refill_mock, enable=lambda self: self.refill_in_fly) + @async_def_method_mock(lambda self: self.m.refiller.accept_refill_mock, enable=enen) def accept_refill_mock(self): addr = self.refill_addr + self.refill_block_cnt * self.cp.fetch_block_bytes @@ -367,12 +365,14 @@ def accept_refill_mock(self): if addr + i in self.bad_addrs: bad_addr = True - self.refill_block_cnt += 1 + last = self.refill_block_cnt + 1 == self.cp.fetch_blocks_in_line or bad_addr - last = self.refill_block_cnt == self.cp.fetch_blocks_in_line or bad_addr + @MethodMock.effect + def eff(): + self.refill_block_cnt += 1 - if last: - self.refill_in_fly = False + if last: + self.refill_in_fly = False return { "addr": addr, @@ -390,18 +390,19 @@ def add_bad_addr(self, addr: int): self.bad_addrs.add(addr) self.bad_cache_lines.add(addr & ~((1 << self.cp.offset_bits) - 1)) - def send_req(self, addr: int): + async def send_req(self, sim: AnySimulatorContext, addr: int): self.issued_requests.append(addr) - yield from self.m.issue_req.call(addr=addr) + await self.m.issue_req.call(sim, addr=addr) - def expect_resp(self, wait=False): - yield Settle() + async def expect_resp(self, sim: AnySimulatorContext, wait=False): if wait: - yield from self.m.accept_res.wait_until_done() + *_, resp = await self.m.accept_res.sample_outputs_until_done(sim) + else: + *_, resp = await self.m.accept_res.sample_outputs(sim) - self.assert_resp((yield from self.m.accept_res.get_outputs())) + self.assert_resp(resp) - def assert_resp(self, resp: RecordIntDictRet): + def assert_resp(self, resp: MethodData): addr = self.issued_requests.popleft() & ~(self.cp.fetch_block_bytes - 1) if (addr & ~((1 << self.cp.offset_bits) - 1)) in self.bad_cache_lines: @@ -417,343 +418,324 @@ def assert_resp(self, resp: RecordIntDictRet): def expect_refill(self, addr: int): assert self.refill_requests.popleft() == addr - def call_cache(self, addr: int): - yield from self.send_req(addr) - yield from self.m.accept_res.enable() - yield from self.expect_resp(wait=True) - yield Tick() - yield from self.m.accept_res.disable() + async def call_cache(self, sim: AnySimulatorContext, addr: int): + await self.send_req(sim, addr) + self.m.accept_res.enable(sim) + await self.expect_resp(sim, wait=True) + self.m.accept_res.disable(sim) def test_1_way(self): self.init_module(1, 4) - def cache_user_process(): + async def cache_user_process(sim: TestbenchContext): # The first request should cause a cache miss - yield from self.call_cache(0x00010004) + await self.call_cache(sim, 0x00010004) self.expect_refill(0x00010000) # Accesses to the same cache line shouldn't cause a cache miss for i in range(self.cp.fetch_blocks_in_line): - yield from self.call_cache(0x00010000 + i * self.cp.fetch_block_bytes) + await self.call_cache(sim, 0x00010000 + i * self.cp.fetch_block_bytes) assert len(self.refill_requests) == 0 # Now go beyond the first cache line - yield from self.call_cache(0x00010000 + self.cp.line_size_bytes) + await self.call_cache(sim, 0x00010000 + self.cp.line_size_bytes) self.expect_refill(0x00010000 + self.cp.line_size_bytes) # Trigger cache aliasing - yield from self.call_cache(0x00020000) - yield from self.call_cache(0x00010000) + await self.call_cache(sim, 0x00020000) + await self.call_cache(sim, 0x00010000) self.expect_refill(0x00020000) self.expect_refill(0x00010000) # Fill the whole cache for i in range(0, self.cp.line_size_bytes * self.cp.num_of_sets, 4): - yield from self.call_cache(i) + await self.call_cache(sim, i) for i in range(self.cp.num_of_sets): self.expect_refill(i * self.cp.line_size_bytes) # Now do some accesses within the cached memory for i in range(50): - yield from self.call_cache(random.randrange(0, self.cp.line_size_bytes * self.cp.num_of_sets, 4)) + await self.call_cache(sim, random.randrange(0, self.cp.line_size_bytes * self.cp.num_of_sets, 4)) assert len(self.refill_requests) == 0 with self.run_simulation(self.m) as sim: - sim.add_process(cache_user_process) + sim.add_testbench(cache_user_process) def test_2_way(self): self.init_module(2, 4) - def cache_process(): + async def cache_process(sim: TestbenchContext): # Fill the first set of both ways - yield from self.call_cache(0x00010000) - yield from self.call_cache(0x00020000) + await self.call_cache(sim, 0x00010000) + await self.call_cache(sim, 0x00020000) self.expect_refill(0x00010000) self.expect_refill(0x00020000) # And now both lines should be in the cache - yield from self.call_cache(0x00010004) - yield from self.call_cache(0x00020004) + await self.call_cache(sim, 0x00010004) + await self.call_cache(sim, 0x00020004) assert len(self.refill_requests) == 0 with self.run_simulation(self.m) as sim: - sim.add_process(cache_process) + sim.add_testbench(cache_process) # Tests whether the cache is fully pipelined and the latency between requests and response is exactly one cycle. def test_pipeline(self): self.init_module(2, 4) - def cache_process(): + async def cache_process(sim: TestbenchContext): # Fill the cache for i in range(self.cp.num_of_sets): addr = 0x00010000 + i * self.cp.line_size_bytes - yield from self.call_cache(addr) + await self.call_cache(sim, addr) self.expect_refill(addr) - yield from self.tick(5) + await self.async_tick(sim, 4) # Create a stream of requests to ensure the pipeline is working - yield from self.m.accept_res.enable() + self.m.accept_res.enable(sim) for i in range(0, self.cp.num_of_sets * self.cp.line_size_bytes, 4): addr = 0x00010000 + i self.issued_requests.append(addr) # Send the request - yield from self.m.issue_req.call_init(addr=addr) - yield Settle() - assert (yield from self.m.issue_req.done()) + ret = await self.m.issue_req.call_try(sim, addr=addr) + assert ret is not None # After a cycle the response should be ready - yield Tick() - yield from self.expect_resp() - yield from self.m.issue_req.disable() + await self.expect_resp(sim) - yield Tick() - yield from self.m.accept_res.disable() + self.m.accept_res.disable(sim) - yield from self.tick(5) + await self.async_tick(sim, 4) # Check how the cache handles queuing the requests - yield from self.send_req(addr=0x00010000 + 3 * self.cp.line_size_bytes) - yield from self.send_req(addr=0x00010004) + await self.send_req(sim, addr=0x00010000 + 3 * self.cp.line_size_bytes) + await self.send_req(sim, addr=0x00010004) # Wait a few cycles. There are two requests queued - yield from self.tick(5) + await self.async_tick(sim, 4) - yield from self.m.accept_res.enable() - yield from self.expect_resp() - yield Tick() - yield from self.expect_resp() - yield from self.send_req(addr=0x0001000C) - yield from self.expect_resp() + self.m.accept_res.enable(sim) + await self.expect_resp( + sim, + ) + await self.expect_resp( + sim, + ) + await self.send_req(sim, addr=0x0001000C) + await self.expect_resp( + sim, + ) - yield Tick() - yield from self.m.accept_res.disable() + self.m.accept_res.disable(sim) - yield from self.tick(5) + await self.async_tick(sim, 4) # Schedule two requests, the first one causing a cache miss - yield from self.send_req(addr=0x00020000) - yield from self.send_req(addr=0x00010000 + self.cp.line_size_bytes) + await self.send_req(sim, addr=0x00020000) + await self.send_req(sim, addr=0x00010000 + self.cp.line_size_bytes) - yield from self.m.accept_res.enable() + self.m.accept_res.enable(sim) - yield from self.expect_resp(wait=True) - yield Tick() - yield from self.expect_resp() - yield Tick() - yield from self.m.accept_res.disable() + await self.expect_resp(sim, wait=True) + await self.expect_resp( + sim, + ) + self.m.accept_res.disable(sim) - yield from self.tick(3) + await self.async_tick(sim, 2) # Schedule two requests, the second one causing a cache miss - yield from self.send_req(addr=0x00020004) - yield from self.send_req(addr=0x00030000 + self.cp.line_size_bytes) + await self.send_req(sim, addr=0x00020004) + await self.send_req(sim, addr=0x00030000 + self.cp.line_size_bytes) - yield from self.m.accept_res.enable() + self.m.accept_res.enable(sim) - yield from self.expect_resp() - yield Tick() - yield from self.expect_resp(wait=True) - yield Tick() - yield from self.m.accept_res.disable() + await self.expect_resp( + sim, + ) + await self.expect_resp(sim, wait=True) + self.m.accept_res.disable(sim) - yield from self.tick(3) + await self.async_tick(sim, 2) # Schedule two requests, both causing a cache miss - yield from self.send_req(addr=0x00040000) - yield from self.send_req(addr=0x00050000 + self.cp.line_size_bytes) + await self.send_req(sim, addr=0x00040000) + await self.send_req(sim, addr=0x00050000 + self.cp.line_size_bytes) - yield from self.m.accept_res.enable() + self.m.accept_res.enable(sim) - yield from self.expect_resp(wait=True) - yield Tick() - yield from self.expect_resp(wait=True) - yield Tick() - yield from self.m.accept_res.disable() + await self.expect_resp(sim, wait=True) + await self.expect_resp(sim, wait=True) + self.m.accept_res.disable(sim) with self.run_simulation(self.m) as sim: - sim.add_process(cache_process) + sim.add_testbench(cache_process) def test_flush(self): self.init_module(2, 4) - def cache_process(): + async def cache_process(sim: TestbenchContext): # Fill the whole cache for s in range(self.cp.num_of_sets): for w in range(self.cp.num_of_ways): addr = w * 0x00010000 + s * self.cp.line_size_bytes - yield from self.call_cache(addr) + await self.call_cache(sim, addr) self.expect_refill(addr) # Everything should be in the cache for s in range(self.cp.num_of_sets): for w in range(self.cp.num_of_ways): addr = w * 0x00010000 + s * self.cp.line_size_bytes - yield from self.call_cache(addr) + await self.call_cache(sim, addr) assert len(self.refill_requests) == 0 - yield from self.m.flush_cache.call() + await self.m.flush_cache.call(sim) # The cache should be empty for s in range(self.cp.num_of_sets): for w in range(self.cp.num_of_ways): addr = w * 0x00010000 + s * self.cp.line_size_bytes - yield from self.call_cache(addr) + await self.call_cache(sim, addr) self.expect_refill(addr) # Try to flush during refilling the line - yield from self.send_req(0x00030000) - yield from self.m.flush_cache.call() + await self.send_req(sim, 0x00030000) + await self.m.flush_cache.call(sim) # We still should be able to accept the response for the last request - self.assert_resp((yield from self.m.accept_res.call())) + self.assert_resp(await self.m.accept_res.call(sim)) self.expect_refill(0x00030000) - yield from self.call_cache(0x00010000) + await self.call_cache(sim, 0x00010000) self.expect_refill(0x00010000) - yield Tick() - # Try to execute issue_req and flush_cache methods at the same time - yield from self.m.issue_req.call_init(addr=0x00010000) + self.m.issue_req.call_init(sim, addr=0x00010000) self.issued_requests.append(0x00010000) - yield from self.m.flush_cache.call_init() - yield Settle() - assert not (yield from self.m.issue_req.done()) - assert (yield from self.m.flush_cache.done()) - yield Tick() - yield from self.m.flush_cache.call_do() - yield from self.m.issue_req.call_do() - self.assert_resp((yield from self.m.accept_res.call())) + self.m.flush_cache.call_init(sim) + *_, issue_req_done, flush_cache_done = await sim.tick().sample( + self.m.issue_req.done, self.m.flush_cache.done + ) + assert not issue_req_done + assert flush_cache_done + self.m.flush_cache.disable(sim) + await self.m.issue_req.call_do(sim) + self.assert_resp(await self.m.accept_res.call(sim)) self.expect_refill(0x00010000) - yield Tick() - # Schedule two requests and then flush - yield from self.send_req(0x00000000 + self.cp.line_size_bytes) - yield from self.send_req(0x00010000) + await self.send_req(sim, 0x00000000 + self.cp.line_size_bytes) + await self.send_req(sim, 0x00010000) - yield from self.m.flush_cache.call_init() - yield Tick() + res = await self.m.flush_cache.call_try(sim) # We cannot flush until there are two pending requests - assert not (yield from self.m.flush_cache.done()) - yield Tick() - yield from self.m.flush_cache.disable() - yield Tick() + assert res is None + res = await self.m.flush_cache.call_try(sim) + assert res is None # Accept the first response - self.assert_resp((yield from self.m.accept_res.call())) + self.assert_resp(await self.m.accept_res.call(sim)) - yield from self.m.flush_cache.call() + await self.m.flush_cache.call(sim) # And accept the second response ensuring that we got old data - self.assert_resp((yield from self.m.accept_res.call())) + self.assert_resp(await self.m.accept_res.call(sim)) self.expect_refill(0x00000000 + self.cp.line_size_bytes) # Just make sure that the line is truly flushed - yield from self.call_cache(0x00010000) + await self.call_cache(sim, 0x00010000) self.expect_refill(0x00010000) with self.run_simulation(self.m) as sim: - sim.add_process(cache_process) + sim.add_testbench(cache_process) def test_errors(self): self.init_module(1, 4) - def cache_process(): + async def cache_process(sim: TestbenchContext): self.add_bad_addr(0x00010000) # Bad addr at the beggining of the line self.add_bad_addr(0x00020008) # Bad addr in the middle of the line self.add_bad_addr( 0x00030000 + self.cp.line_size_bytes - self.cp.word_width_bytes ) # Bad addr at the end of the line - yield from self.call_cache(0x00010008) + await self.call_cache(sim, 0x00010008) self.expect_refill(0x00010000) # Requesting a bad addr again should retrigger refill - yield from self.call_cache(0x00010008) + await self.call_cache(sim, 0x00010008) self.expect_refill(0x00010000) - yield from self.call_cache(0x00020000) + await self.call_cache(sim, 0x00020000) self.expect_refill(0x00020000) - yield from self.call_cache(0x00030008) + await self.call_cache(sim, 0x00030008) self.expect_refill(0x00030000) # Test how pipelining works with errors - yield from self.m.accept_res.disable() - yield Tick() + self.m.accept_res.disable(sim) # Schedule two requests, the first one causing an error - yield from self.send_req(addr=0x00020000) - yield from self.send_req(addr=0x00011000) + await self.send_req(sim, addr=0x00020000) + await self.send_req(sim, addr=0x00011000) - yield from self.m.accept_res.enable() + self.m.accept_res.enable(sim) - yield from self.expect_resp(wait=True) - yield Tick() - yield from self.expect_resp(wait=True) - yield Tick() - yield from self.m.accept_res.disable() + await self.expect_resp(sim, wait=True) + await self.expect_resp(sim, wait=True) + self.m.accept_res.disable(sim) - yield from self.tick(3) + await self.async_tick(sim, 3) # Schedule two requests, the second one causing an error - yield from self.send_req(addr=0x00021004) - yield from self.send_req(addr=0x00030000) + await self.send_req(sim, addr=0x00021004) + await self.send_req(sim, addr=0x00030000) - yield from self.tick(10) + await self.async_tick(sim, 10) - yield from self.m.accept_res.enable() + self.m.accept_res.enable(sim) - yield from self.expect_resp(wait=True) - yield Tick() - yield from self.expect_resp(wait=True) - yield Tick() - yield from self.m.accept_res.disable() + await self.expect_resp(sim, wait=True) + await self.expect_resp(sim, wait=True) + self.m.accept_res.disable(sim) - yield from self.tick(3) + await self.async_tick(sim, 3) # Schedule two requests, both causing an error - yield from self.send_req(addr=0x00020000) - yield from self.send_req(addr=0x00010000) + await self.send_req(sim, addr=0x00020000) + await self.send_req(sim, addr=0x00010000) - yield from self.m.accept_res.enable() + self.m.accept_res.enable(sim) - yield from self.expect_resp(wait=True) - yield Tick() - yield from self.expect_resp(wait=True) - yield Tick() - yield from self.m.accept_res.disable() - yield Tick() + await self.expect_resp(sim, wait=True) + await self.expect_resp(sim, wait=True) + self.m.accept_res.disable(sim) # The second request will cause an error - yield from self.send_req(addr=0x00021004) - yield from self.send_req(addr=0x00030000) + await self.send_req(sim, addr=0x00021004) + await self.send_req(sim, addr=0x00030000) - yield from self.tick(10) + await self.async_tick(sim, 10) # Accept the first response - yield from self.m.accept_res.enable() - yield from self.expect_resp(wait=True) - yield Tick() + self.m.accept_res.enable(sim) + await self.expect_resp(sim, wait=True) # Wait before accepting the second response - yield from self.m.accept_res.disable() - yield from self.tick(10) - yield from self.m.accept_res.enable() - yield from self.expect_resp(wait=True) - - yield Tick() + self.m.accept_res.disable(sim) + await self.async_tick(sim, 10) + self.m.accept_res.enable(sim) + await self.expect_resp(sim, wait=True) # This request should not cause an error - yield from self.send_req(addr=0x00011000) - yield from self.expect_resp(wait=True) + await self.send_req(sim, addr=0x00011000) + await self.expect_resp(sim, wait=True) with self.run_simulation(self.m) as sim: - sim.add_process(cache_process) + sim.add_testbench(cache_process) def test_random(self): self.init_module(4, 8) @@ -765,34 +747,28 @@ def test_random(self): if random.random() < 0.05: self.add_bad_addr(i) - def refiller_ctrl(): - yield Passive() - + async def refiller_ctrl(sim: TestbenchContext): while True: - yield from self.random_wait_geom(0.4) + await self.async_random_wait_geom(sim, 0.4) self.accept_refill_request = False - yield from self.random_wait_geom(0.7) + await self.async_random_wait_geom(sim, 0.7) self.accept_refill_request = True - def sender(): + async def sender(sim: TestbenchContext): for _ in range(iterations): - yield from self.send_req(random.randrange(0, max_addr, 4)) + await self.send_req(sim, random.randrange(0, max_addr, 4)) + await self.async_random_wait_geom(sim, 0.5) - while random.random() < 0.5: - yield Tick() - - def receiver(): + async def receiver(sim: TestbenchContext): for _ in range(iterations): while len(self.issued_requests) == 0: - yield Tick() - - self.assert_resp((yield from self.m.accept_res.call())) + await sim.tick() - while random.random() < 0.2: - yield Tick() + self.assert_resp(await self.m.accept_res.call(sim)) + await self.async_random_wait_geom(sim, 0.2) with self.run_simulation(self.m) as sim: - sim.add_process(sender) - sim.add_process(receiver) - sim.add_process(refiller_ctrl) + sim.add_testbench(sender) + sim.add_testbench(receiver) + sim.add_testbench(refiller_ctrl, background=True) diff --git a/transactron/testing/sugar.py b/transactron/testing/sugar.py index 5afaaf7ce..2622da66e 100644 --- a/transactron/testing/sugar.py +++ b/transactron/testing/sugar.py @@ -65,6 +65,9 @@ async def effect_process(self, sim: AnySimulatorContext) -> None: for eff in self._effects: eff() + # Ensure that the effects of all mocks are applied + await sim.delay(1e-12) + # Next, update combinational signals taking the new state into account. # In case the input signals get updated later, the other processes will perform the update again. self._effects = []