Skip to content

Commit

Permalink
mshr: add trait
Browse files Browse the repository at this point in the history
  • Loading branch information
romnn committed Aug 20, 2023
1 parent dc6fcbc commit 6100cb3
Show file tree
Hide file tree
Showing 4 changed files with 128 additions and 79 deletions.
12 changes: 8 additions & 4 deletions src/cache/base.rs
Original file line number Diff line number Diff line change
@@ -1,5 +1,9 @@
use crate::mem_sub_partition::SECTOR_SIZE;
use crate::{address, config, interconn as ic, mem_fetch, mshr, tag_array, Cycle};
use crate::{
address, config, interconn as ic, mem_fetch,
mem_sub_partition::SECTOR_SIZE,
mshr::{self, MSHR},
tag_array, Cycle,
};
use console::style;
use std::collections::{HashMap, VecDeque};
use std::sync::{Arc, Mutex};
Expand Down Expand Up @@ -36,7 +40,7 @@ pub struct Base<I> {

pub miss_queue: VecDeque<mem_fetch::MemFetch>,
pub miss_queue_status: mem_fetch::Status,
pub mshrs: mshr::Table,
pub mshrs: mshr::Table<mem_fetch::MemFetch>,
pub tag_array: tag_array::TagArray<()>,

pending: HashMap<mem_fetch::MemFetch, PendingRequest>,
Expand Down Expand Up @@ -165,7 +169,7 @@ impl<I> Base<I> {
let mut evicted = None;

let mshr_addr = self.cache_config.mshr_addr(fetch.addr());
let mshr_hit = self.mshrs.probe(mshr_addr);
let mshr_hit = self.mshrs.get(mshr_addr).is_some();
let mshr_full = self.mshrs.full(mshr_addr);

log::debug!(
Expand Down
39 changes: 19 additions & 20 deletions src/cache/data.rs
Original file line number Diff line number Diff line change
@@ -1,5 +1,4 @@
use super::{base, event};
use crate::{address, cache, config, interconn as ic, mem_fetch, tag_array, Cycle};
use crate::{address, cache, config, interconn as ic, mem_fetch, mshr::MSHR, tag_array, Cycle};
use std::collections::VecDeque;
use std::sync::{Arc, Mutex};

Expand All @@ -10,7 +9,7 @@ use std::sync::{Arc, Mutex};
/// (the policy used in fermi according to the CUDA manual)
#[derive(Debug)]
pub struct Data<I> {
pub inner: base::Base<I>,
pub inner: cache::base::Base<I>,
/// Specifies type of write allocate request (e.g., L1 or L2)
write_alloc_type: mem_fetch::AccessKind,
/// Specifies type of writeback request (e.g., L1 or L2)
Expand Down Expand Up @@ -62,7 +61,7 @@ where
cache_index: Option<usize>,
fetch: &mem_fetch::MemFetch,
time: u64,
_events: &mut [event::Event],
_events: &mut [cache::Event],
_probe_status: cache::RequestStatus,
) -> cache::RequestStatus {
debug_assert_eq!(addr, fetch.addr());
Expand Down Expand Up @@ -120,7 +119,7 @@ where
// cache_index: usize,
fetch: &mem_fetch::MemFetch,
time: u64,
_events: &mut [event::Event],
_events: &mut [cache::event::Event],
_probe_status: cache::RequestStatus,
) -> cache::RequestStatus {
let super::base::Base {
Expand Down Expand Up @@ -151,9 +150,9 @@ where
pub fn send_write_request(
&mut self,
mut fetch: mem_fetch::MemFetch,
request: event::Event,
request: cache::Event,
time: u64,
events: &mut Vec<event::Event>,
events: &mut Vec<cache::Event>,
) {
log::debug!("data_cache::send_write_request({})", fetch);
events.push(request);
Expand All @@ -171,7 +170,7 @@ where
cache_index: Option<usize>,
fetch: &mem_fetch::MemFetch,
time: u64,
events: &mut Vec<event::Event>,
events: &mut Vec<cache::Event>,
_probe_status: cache::RequestStatus,
) -> cache::RequestStatus {
if !self.inner.miss_queue_can_fit(1) {
Expand Down Expand Up @@ -244,7 +243,7 @@ where
// address from the original mf
writeback_fetch.tlx_addr.chip = fetch.tlx_addr.chip;
writeback_fetch.tlx_addr.sub_partition = fetch.tlx_addr.sub_partition;
let event = event::Event::WriteBackRequestSent {
let event = cache::Event::WriteBackRequestSent {
evicted_block: None,
};

Expand All @@ -269,7 +268,7 @@ where
_cache_index: Option<usize>,
fetch: mem_fetch::MemFetch,
time: u64,
events: &mut Vec<event::Event>,
events: &mut Vec<cache::Event>,
_probe_status: cache::RequestStatus,
) -> cache::RequestStatus {
debug_assert_eq!(addr, fetch.addr());
Expand All @@ -291,7 +290,7 @@ where
}

// on miss, generate write through
let event = event::Event::WriteRequestSent;
let event = cache::Event::WriteRequestSent;
self.send_write_request(fetch, event, time, events);
cache::RequestStatus::MISS
}
Expand All @@ -303,7 +302,7 @@ where
cache_index: Option<usize>,
fetch: mem_fetch::MemFetch,
time: u64,
events: &mut Vec<event::Event>,
events: &mut Vec<cache::Event>,
probe_status: cache::RequestStatus,
) -> cache::RequestStatus {
// what exactly is the difference between the addr and the fetch addr?
Expand All @@ -316,7 +315,7 @@ where
// (write miss, read request, write back request)
//
// Conservatively ensure the worst-case request can be handled this cycle
let mshr_hit = self.inner.mshrs.probe(mshr_addr);
let mshr_hit = self.inner.mshrs.get(mshr_addr).is_some();
let mshr_free = !self.inner.mshrs.full(mshr_addr);
let mshr_miss_but_free = !mshr_hit && mshr_free && !self.inner.miss_queue_full();

Expand Down Expand Up @@ -344,7 +343,7 @@ where
return cache::RequestStatus::RESERVATION_FAIL;
}

let event = event::Event::WriteRequestSent;
let event = cache::Event::WriteRequestSent;
self.send_write_request(fetch.clone(), event, time, events);

let is_write = false;
Expand Down Expand Up @@ -383,7 +382,7 @@ where
is_write_allocate,
);

events.push(event::Event::WriteAllocateSent);
events.push(cache::Event::WriteAllocateSent);

if should_miss {
// If evicted block is modified and not a write-through
Expand Down Expand Up @@ -428,7 +427,7 @@ where
// is used, so set the right chip address from the original mf
writeback_fetch.tlx_addr.chip = fetch.tlx_addr.chip;
writeback_fetch.tlx_addr.sub_partition = fetch.tlx_addr.sub_partition;
let event = event::Event::WriteBackRequestSent {
let event = cache::Event::WriteBackRequestSent {
evicted_block: Some(evicted),
};

Expand All @@ -447,7 +446,7 @@ where
cache_index: Option<usize>,
fetch: mem_fetch::MemFetch,
time: u64,
events: &mut Vec<event::Event>,
events: &mut Vec<cache::Event>,
probe_status: cache::RequestStatus,
) -> cache::RequestStatus {
let func = match self.inner.cache_config.write_allocate_policy {
Expand Down Expand Up @@ -475,7 +474,7 @@ where
cache_index: Option<usize>,
fetch: &mem_fetch::MemFetch,
time: u64,
events: &mut [event::Event],
events: &mut [cache::Event],
probe_status: cache::RequestStatus,
) -> cache::RequestStatus {
let func = match self.inner.cache_config.write_policy {
Expand All @@ -501,7 +500,7 @@ where
addr: address,
cache_index: Option<usize>,
fetch: mem_fetch::MemFetch,
events: &mut Vec<event::Event>,
events: &mut Vec<cache::Event>,
time: u64,
) -> cache::RequestStatus {
// dbg!(cache_index, probe_status);
Expand Down Expand Up @@ -587,7 +586,7 @@ where
&mut self,
addr: address,
fetch: mem_fetch::MemFetch,
events: &mut Vec<event::Event>,
events: &mut Vec<cache::Event>,
time: u64,
) -> cache::RequestStatus {
let super::base::Base {
Expand Down
4 changes: 0 additions & 4 deletions src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -430,10 +430,6 @@ where
let start_total = Instant::now();
// int clock_mask = next_clock_domain();

// fn is_send<T: Send>(_: T) {}
// fn is_sync<T: Sync>(_: T) {}
// fn is_par_iter<T: rayon::iter::ParallelIterator>(_: T) {}

// shader core loading (pop from ICNT into core)
let start = Instant::now();
if false && self.parallel_simulation {
Expand Down
Loading

0 comments on commit 6100cb3

Please sign in to comment.