diff --git a/Cargo.lock b/Cargo.lock index 5c99187a..1a58a374 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -319,7 +319,6 @@ name = "hvm64-runtime" version = "0.3.0" dependencies = [ "hvm64-util", - "nohash-hasher", "parking_lot", ] @@ -414,12 +413,6 @@ version = "2.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6c8640c5d730cb13ebd907d8d04b52f55ac9a2eec55b440c8892f40d56c76c1d" -[[package]] -name = "nohash-hasher" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2bf50223579dc7cdcfb3bfcacf7069ff68243f8c363f62ffa99cf000a6b9c451" - [[package]] name = "num-traits" version = "0.2.18" diff --git a/cspell.json b/cspell.json index e7549f74..cf312877 100644 --- a/cspell.json +++ b/cspell.json @@ -31,7 +31,6 @@ "monomorphized", "newtype", "nilary", - "nohash", "nomicon", "oper", "outdir", diff --git a/host/src/encode.rs b/host/src/encode.rs index a15bf8a8..b98b07d2 100644 --- a/host/src/encode.rs +++ b/host/src/encode.rs @@ -2,7 +2,7 @@ use crate::prelude::*; use crate::Host; use hvm64_ast::{Lab, Net as AstNet, Tree}; -use hvm64_runtime::{Instruction, InterpretedDef, Mode, Net, Port, Trg, TrgId}; +use hvm64_runtime::{Instruction, InterpretedDef, Net, Port, Trg, TrgId}; use hvm64_util::{maybe_grow, ops::TypedOp as Op}; impl Host { @@ -19,7 +19,7 @@ impl Host { /// Encode `tree` directly into `trg`, skipping the intermediate `Def` /// representation. - pub fn encode_tree(&self, net: &mut Net, trg: Trg, tree: &Tree) { + pub fn encode_tree(&self, net: &mut Net, trg: Trg, tree: &Tree) { let mut state = State { host: self, encoder: net, scope: Default::default() }; state.visit_tree(tree, trg); state.finish(); @@ -27,7 +27,7 @@ impl Host { /// Encode the root of `ast_net` directly into `trg` and encode its redexes /// into `net` redex list. - pub fn encode_net(&self, net: &mut Net, trg: Trg, ast_net: &AstNet) { + pub fn encode_net(&self, net: &mut Net, trg: Trg, ast_net: &AstNet) { let mut state = State { host: self, encoder: net, scope: Default::default() }; state.visit_net(ast_net, trg); state.finish(); @@ -193,7 +193,7 @@ impl Encoder for InterpretedDef { } } -impl<'a, M: Mode> Encoder for Net<'a, M> { +impl<'a> Encoder for Net<'a> { type Trg = Trg; fn link_const(&mut self, trg: Self::Trg, port: Port) { diff --git a/host/src/host.rs b/host/src/host.rs index b19d9ecd..1424917d 100644 --- a/host/src/host.rs +++ b/host/src/host.rs @@ -7,7 +7,7 @@ include!("../../prelude.rs"); use crate::prelude::*; use hvm64_ast::{Book, Tree}; -use hvm64_runtime::{Addr, Def, InterpretedDef, LabSet, Mode, Port, Tag, Wire}; +use hvm64_runtime::{Addr, Def, InterpretedDef, LabSet, Port, Tag, Wire}; use core::ops::{Deref, DerefMut}; diff --git a/host/src/readback.rs b/host/src/readback.rs index 62b49d79..eb9753a6 100644 --- a/host/src/readback.rs +++ b/host/src/readback.rs @@ -1,6 +1,6 @@ use crate::prelude::*; -use super::{Addr, Host, Mode, Port, Tag, Wire}; +use super::{Addr, Host, Port, Tag, Wire}; use core::ops::RangeFrom; @@ -19,7 +19,7 @@ impl Host { /// resulting ast net, as it is impossible to read these back from the runtime /// net representation. In the case of vicious circles, this may result in /// unbound variables. - pub fn readback(&self, rt_net: &hvm64_runtime::Net) -> Net { + pub fn readback(&self, rt_net: &hvm64_runtime::Net) -> Net { let mut state = ReadbackState { host: self, vars: Default::default(), var_id: 0 .. }; let mut net = Net::default(); diff --git a/host/src/stdlib.rs b/host/src/stdlib.rs index 7bec4d60..cb144887 100644 --- a/host/src/stdlib.rs +++ b/host/src/stdlib.rs @@ -10,13 +10,13 @@ use parking_lot::Mutex; use crate::{DefRef, Host}; use hvm64_ast::Tree; -use hvm64_runtime::{dispatch_dyn_net, AsDef, Def, DynNetMut, LabSet, Mode, Net, Port, Tag, Trg}; +use hvm64_runtime::{AsDef, Def, LabSet, Net, Port, Tag, Trg}; use hvm64_util::create_var; /// `@IDENTITY = (x x)` -pub const IDENTITY: *const Def = const { &Def::new(LabSet::from_bits(&[1]), (call_identity, call_identity)) }.upcast(); +pub const IDENTITY: *const Def = const { &Def::new(LabSet::from_bits(&[1]), call_identity) }.upcast(); -fn call_identity(net: &mut Net, port: Port) { +fn call_identity(net: &mut Net, port: Port) { let (a, b) = net.do_ctr(0, Trg::port(port)); net.link_trg(a, b); } @@ -41,7 +41,7 @@ impl LogDef { pub struct LogDef(Arc>, F); impl AsHostedDef for LogDef { - fn call(def: &Def, net: &mut Net, port: Port) { + fn call(def: &Def, net: &mut Net, port: Port) { let (arg, seq) = net.do_ctr(0, Trg::port(port)); let seq = net.wire_to_trg(seq); // SAFETY: the function inside `readback` won't live longer than @@ -50,9 +50,7 @@ impl AsHostedDef for LogDef { let def: &'static Def = unsafe { mem::transmute(def) }; readback(net, def.data.0.clone(), arg, |net, tree| { (def.data.1)(tree); - dispatch_dyn_net!(net => { - net.link_wire_port(seq, Port::new_ref(unsafe { &*IDENTITY })); - }); + net.link_wire_port(seq, Port::new_ref(unsafe { &*IDENTITY })); }); } } @@ -102,11 +100,11 @@ impl BoxDef { } pub trait AsBoxDef: Send + Sync + 'static { - fn call(slf: Box>, net: &mut Net, port: Port); + fn call(slf: Box>, net: &mut Net, port: Port); } impl AsDef for BoxDef { - unsafe fn call(slf: *const Def, net: &mut Net, port: Port) { + unsafe fn call(slf: *const Def, net: &mut Net, port: Port) { T::call(Box::from_raw(slf as *mut _), net, port) } } @@ -136,11 +134,11 @@ impl ArcDef { } pub trait AsArcDef: Send + Sync + 'static { - fn call(slf: Arc>, net: &mut Net, port: Port); + fn call(slf: Arc>, net: &mut Net, port: Port); } impl AsDef for ArcDef { - unsafe fn call(slf: *const Def, net: &mut Net, port: Port) { + unsafe fn call(slf: *const Def, net: &mut Net, port: Port) { T::call(Arc::from_raw(slf as *mut _), net, port); } } @@ -162,11 +160,11 @@ impl HostedDef { } pub trait AsHostedDef: Send + Sync + 'static { - fn call(slf: &Def, net: &mut Net, port: Port); + fn call(slf: &Def, net: &mut Net, port: Port); } impl AsDef for HostedDef { - unsafe fn call(slf: *const Def, net: &mut Net, port: Port) { + unsafe fn call(slf: *const Def, net: &mut Net, port: Port) { T::call((slf as *const Def).as_ref().unwrap(), net, port) } } @@ -182,15 +180,15 @@ impl UniqueTreePtr { } } -pub struct ReadbackDef { +pub struct ReadbackDef { root: Arc, host: Arc>, var_idx: Arc, tree: UniqueTreePtr, } -impl ReadbackDef { - fn maybe_finish(net: DynNetMut<'_, '_>, root: Arc) { +impl ReadbackDef { + fn maybe_finish(net: &mut Net, root: Arc) { let Some(root) = Arc::into_inner(root) else { return }; (root)(net) } @@ -204,8 +202,8 @@ impl ReadbackDef { } } -impl AsBoxDef for ReadbackDef { - fn call(def: Box>, net: &mut Net, port: Port) { +impl AsBoxDef for ReadbackDef { + fn call(def: Box>, net: &mut Net, port: Port) { match port.tag() { Tag::Var | Tag::Red => { unreachable!() @@ -218,7 +216,7 @@ impl AsBoxDef for ReadbackDef { (*def.data.tree.0) = var.clone(); (*other.data.0.tree.0) = var; } - Self::maybe_finish(DynNetMut::from(&mut *net), other.data.0.root); + Self::maybe_finish(net, other.data.0.root); } else if let Some(back) = def.data.host.lock().back.get(&port.addr()) { unsafe { *(def.data.tree.0) = Tree::Ref { nam: back.clone() } }; } else { @@ -267,38 +265,30 @@ impl AsBoxDef for ReadbackDef { net.link_wire_port(old.p2, def.data.with(rhs)); } } - Self::maybe_finish(DynNetMut::from(net), def.data.root); + Self::maybe_finish(net, def.data.root); } } -pub fn readback( - net: &mut Net, +pub fn readback( + net: &mut Net, host: Arc>, from: Trg, - f: impl FnOnce(DynNetMut, Tree) + Send + Sync + 'static, + f: impl FnOnce(&mut Net, Tree) + Send + Sync + 'static, ) { let root = UniqueTreePtr(Box::leak(Box::default())); - if M::LAZY { - let from = net.wire_to_trg(from); - net.normal_from(from.clone()); - let tree = host.lock().readback_tree(&from); - net.link_wire_port(from, Port::ERA); - f(DynNetMut::from(net), tree); - } else { - let closure: Box = Box::new(move |net| { - let root = unsafe { root.to_box() }; - f(net, *root); - }); + let closure: Box = Box::new(move |net| { + let root = unsafe { root.to_box() }; + f(net, *root); + }); - net.link_trg_port( - from, - Port::new_ref(Box::leak(BoxDef::new_boxed(LabSet::ALL, ReadbackDef { - root: Arc::new(closure), - host, - tree: root, - var_idx: Arc::new(AtomicUsize::from(0)), - }))), - ); - } + net.link_trg_port( + from, + Port::new_ref(Box::leak(BoxDef::new_boxed(LabSet::ALL, ReadbackDef { + root: Arc::new(closure), + host, + tree: root, + var_idx: Arc::new(AtomicUsize::from(0)), + }))), + ); } diff --git a/runtime/Cargo.toml b/runtime/Cargo.toml index 549130d5..9b96d848 100644 --- a/runtime/Cargo.toml +++ b/runtime/Cargo.toml @@ -8,12 +8,11 @@ path = "src/runtime.rs" [dependencies] parking_lot = "0.12.2" -nohash-hasher = { version = "0.2.0", optional = true } hvm64-util = { path = "../util", default-features = false } [features] -std = ["hvm64-util/std", "dep:nohash-hasher"] +std = ["hvm64-util/std"] trace = [] [lints] diff --git a/runtime/src/addr.rs b/runtime/src/addr.rs index b0adf117..edcb7d4d 100644 --- a/runtime/src/addr.rs +++ b/runtime/src/addr.rs @@ -12,9 +12,6 @@ use super::*; #[must_use] pub struct Addr(pub usize); -#[cfg(feature = "std")] -impl nohash_hasher::IsEnabled for Addr {} - impl fmt::Debug for Addr { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "{:012x?}", self.0) diff --git a/runtime/src/def.rs b/runtime/src/def.rs index 652c056e..939714d3 100644 --- a/runtime/src/def.rs +++ b/runtime/src/def.rs @@ -88,8 +88,7 @@ pub struct Def { /// interaction combinator whose label is not in this set. pub labs: LabSet, ty: TypeId, - call_strict: unsafe fn(*const Def, &mut Net, port: Port), - call_lazy: unsafe fn(*const Def, &mut Net, port: Port), + call: unsafe fn(*const Def, &mut Net, port: Port), pub data: T, } @@ -106,7 +105,7 @@ unsafe impl Send for Dynamic {} unsafe impl Sync for Dynamic {} pub trait AsDef: Any + Send + Sync { - unsafe fn call(slf: *const Def, net: &mut Net, port: Port); + unsafe fn call(slf: *const Def, net: &mut Net, port: Port); } impl Def { @@ -114,7 +113,7 @@ impl Def { where T: AsDef, { - Def { labs, ty: TypeId::of::(), call_strict: T::call::, call_lazy: T::call::, data } + Def { labs, ty: TypeId::of::(), call: T::call, data } } #[inline(always)] @@ -146,11 +145,8 @@ impl Def { unsafe { Def::downcast_mut_ptr(self).map(|x| &mut *x) } } #[inline(always)] - pub unsafe fn call(slf: *const Def, net: &mut Net, port: Port) { - match net.as_dyn_mut() { - DynNetMut::Strict(net) => ((*slf).call_strict)(slf as *const _, net, port), - DynNetMut::Lazy(net) => ((*slf).call_lazy)(slf as *const _, net, port), - } + pub unsafe fn call(slf: *const Def, net: &mut Net, port: Port) { + ((*slf).call)(slf as *const _, net, port) } } @@ -169,18 +165,13 @@ impl DerefMut for Def { } } -impl, Port) + Send + Sync + 'static, G: Fn(&mut Net, Port) + Send + Sync + 'static> AsDef - for (F, G) -{ - unsafe fn call(slf: *const Def, net: &mut Net, port: Port) { - match net.as_dyn_mut() { - DynNetMut::Strict(net) => ((*slf).data.0)(net, port), - DynNetMut::Lazy(net) => ((*slf).data.1)(net, port), - } +impl AsDef for F { + unsafe fn call(slf: *const Def, net: &mut Net, port: Port) { + ((*slf).data)(net, port) } } -impl<'a, M: Mode> Net<'a, M> { +impl<'a> Net<'a> { /// Expands a [`Ref`] node connected to `trg`. #[inline(never)] pub fn call(&mut self, port: Port, trg: Port) { @@ -223,7 +214,7 @@ impl InterpretedDef { } impl AsDef for InterpretedDef { - unsafe fn call(def: *const Def, net: &mut Net, trg: Port) { + unsafe fn call(def: *const Def, net: &mut Net, trg: Port) { let def = unsafe { &*def }; let def = &def.data; let instructions = &def.instr; diff --git a/runtime/src/dyn_net.rs b/runtime/src/dyn_net.rs deleted file mode 100644 index 52e2a49b..00000000 --- a/runtime/src/dyn_net.rs +++ /dev/null @@ -1,71 +0,0 @@ -use super::{Heap, Lazy, Mode, Net, Strict}; - -use crate::prelude::*; - -/// A [`Net`] whose mode is determined dynamically, at runtime. -/// -/// Use [`dispatch_dyn_net!`] to wrap operations on the inner net. -pub type DynNet<'a> = DynNetInner, Net<'a, Strict>>; - -/// A mutable reference to a [`Net`] whose mode is determined dynamically, at -/// runtime. -/// -/// Use [`dispatch_dyn_net!`] to wrap operations on the inner net. -pub type DynNetMut<'r, 'h> = DynNetInner<&'r mut Net<'h, Lazy>, &'r mut Net<'h, Strict>>; - -pub enum DynNetInner { - Lazy(L), - Strict(S), -} - -impl<'h> DynNet<'h> { - pub fn new(heap: &'h Heap, lazy: bool) -> Self { - if lazy { DynNet::Lazy(Net::new(heap)) } else { DynNet::Strict(Net::new(heap)) } - } -} - -impl<'r, 'h, M: Mode> From<&'r mut Net<'h, M>> for DynNetMut<'r, 'h> { - fn from(value: &'r mut Net<'h, M>) -> Self { - value.as_dyn_mut() - } -} - -impl<'h, M: Mode> Net<'h, M> { - pub fn as_dyn_mut(&mut self) -> DynNetMut<'_, 'h> { - if M::LAZY { - DynNetMut::Lazy(unsafe { mem::transmute(self) }) - } else { - DynNetMut::Strict(unsafe { mem::transmute(self) }) - } - } - - pub fn into_dyn(self) -> DynNet<'h> { - if M::LAZY { - DynNet::Lazy(unsafe { mem::transmute(self) }) - } else { - DynNet::Strict(unsafe { mem::transmute(self) }) - } - } -} - -#[macro_export] -macro_rules! dispatch_dyn_net { - ($pat:pat = $expr:expr => $body:expr) => { - match $expr { - $crate::DynNetInner::Lazy($pat) => $body, - $crate::DynNetInner::Strict($pat) => $body, - } - }; - ($net:ident => $body:expr) => { - dispatch_dyn_net! { $net = $net => $body } - }; - (mut $net:ident => $body:expr) => { - dispatch_dyn_net! { mut $net = $net => $body } - }; - (&$net:ident => $body:expr) => { - dispatch_dyn_net! { $net = &$net => $body } - }; - (&mut $net:ident => $body:expr) => { - dispatch_dyn_net! { $net = &mut $net => $body } - }; -} diff --git a/runtime/src/instruction.rs b/runtime/src/instruction.rs index 61d46d76..1c100846 100644 --- a/runtime/src/instruction.rs +++ b/runtime/src/instruction.rs @@ -107,20 +107,20 @@ impl fmt::Debug for TrgId { } } -impl<'a, M: Mode> Net<'a, M> { +impl<'a> Net<'a> { /// `trg ~ {#lab x y}` #[inline(always)] pub fn do_ctr(&mut self, lab: Lab, trg: Trg) -> (Trg, Trg) { let port = trg.target(); #[allow(clippy::overly_complex_bool_expr)] - if !M::LAZY && port.tag() == Ctr && port.lab() == lab { + if port.tag() == Ctr && port.lab() == lab { trace!(self.tracer, "fast"); self.free_trg(trg); let node = port.consume_node(); self.rwts.anni += 1; (Trg::wire(node.p1), Trg::wire(node.p2)) // TODO: fast copy? - } else if false && !M::LAZY && (port.is_num() || port.tag() == Ref && lab >= port.lab()) { + } else if false && (port.is_num() || port.tag() == Ref && lab >= port.lab()) { self.rwts.comm += 1; self.free_trg(trg); (Trg::port(port.clone()), Trg::port(port)) @@ -136,12 +136,12 @@ impl<'a, M: Mode> Net<'a, M> { pub fn do_op(&mut self, op: Op, trg: Trg) -> (Trg, Trg) { trace!(self.tracer, op, trg); let port = trg.target(); - if !M::LAZY && port.is_num() { + if port.is_num() { self.free_trg(trg); let n = self.create_node(Op, op.swap().into()); n.p1.wire().set_target(port); (Trg::port(n.p0), Trg::port(n.p2)) - } else if !M::LAZY && port == Port::ERA { + } else if port == Port::ERA { self.free_trg(trg); (Trg::port(Port::ERA), Trg::port(Port::ERA)) } else { @@ -155,14 +155,14 @@ impl<'a, M: Mode> Net<'a, M> { #[inline(always)] pub fn do_op_num(&mut self, op: Op, trg: Trg, rhs: Port) -> Trg { let port = trg.target(); - if !M::LAZY && port.is_num() { + if port.is_num() { self.rwts.oper += 1; self.free_trg(trg); let res = op.op(port.num(), rhs.num()); Trg::port(Port::new_num(if op.is_int() { Tag::Int } else { Tag::F32 }, res)) - } else if !M::LAZY && port == Port::ERA { + } else if port == Port::ERA { self.free_trg(trg); Trg::port(Port::ERA) } else { @@ -177,7 +177,7 @@ impl<'a, M: Mode> Net<'a, M> { #[inline(always)] pub fn do_mat(&mut self, trg: Trg) -> (Trg, Trg) { let port = trg.target(); - if !M::LAZY && port.tag() == Int { + if port.tag() == Int { self.rwts.oper += 1; self.free_trg(trg); let num = port.int(); @@ -192,7 +192,7 @@ impl<'a, M: Mode> Net<'a, M> { self.link_port_port(c2.p1, Port::new_int(num - 1)); (Trg::port(c1.p0), Trg::wire(self.create_wire_to(c2.p2))) } - } else if !M::LAZY && port == Port::ERA { + } else if port == Port::ERA { self.rwts.eras += 1; self.free_trg(trg); (Trg::port(Port::ERA), Trg::port(Port::ERA)) @@ -215,7 +215,7 @@ impl<'a, M: Mode> Net<'a, M> { #[allow(unused)] // TODO: emit this instruction pub fn do_mat_con_con(&mut self, trg: Trg, out: Trg) -> (Trg, Trg, Trg) { let port = trg.target(); - if !M::LAZY && trg.target().tag() == Int { + if trg.target().tag() == Int { self.rwts.oper += 1; self.free_trg(trg); let num = port.int(); @@ -224,7 +224,7 @@ impl<'a, M: Mode> Net<'a, M> { } else { (Trg::port(Port::ERA), Trg::port(Port::new_int(num - 1)), out) } - } else if !M::LAZY && port == Port::ERA { + } else if port == Port::ERA { self.link_trg_port(out, Port::ERA); (Trg::port(Port::ERA), Trg::port(Port::ERA), Trg::port(Port::ERA)) } else { diff --git a/runtime/src/interact.rs b/runtime/src/interact.rs index 973b3068..c841e0a6 100644 --- a/runtime/src/interact.rs +++ b/runtime/src/interact.rs @@ -1,6 +1,6 @@ use super::*; -impl<'a, M: Mode> Net<'a, M> { +impl<'a> Net<'a> { /// Performs an interaction between two connected principal ports. #[inline(always)] pub fn interact(&mut self, a: Port, b: Port) { diff --git a/runtime/src/linker.rs b/runtime/src/linker.rs index 61da3084..93823518 100644 --- a/runtime/src/linker.rs +++ b/runtime/src/linker.rs @@ -1,19 +1,5 @@ use super::*; -#[cfg(not(feature = "std"))] -use crate::prelude::Map as IntMap; -#[cfg(feature = "std")] -use nohash_hasher::IntMap; - -/// Stores extra data needed about the nodes when in lazy mode. (In strict mode, -/// this is unused.) -pub(super) struct Header { - /// the principal port of this node - pub(super) this: Port, - /// the port connected to the principal port of this node - pub(super) targ: Port, -} - /// Manages linking ports and wires within the net. /// /// When threads interfere, this uses the atomic linking algorithm described in @@ -21,25 +7,17 @@ pub(super) struct Header { /// /// Linking wires must be done atomically, but linking ports can be done /// non-atomically (because they must be locked). -pub struct Linker<'h, M: Mode> { +pub struct Linker<'h> { pub(super) allocator: Allocator<'h>, pub rwts: Rewrites, pub redexes: RedexQueue, - headers: IntMap, - _mode: PhantomData, } -deref!({<'h, M: Mode>} Linker<'h, M> => self.allocator: Allocator<'h>); +deref!({<'h, >} Linker<'h> => self.allocator: Allocator<'h>); -impl<'h, M: Mode> Linker<'h, M> { +impl<'h> Linker<'h> { pub fn new(heap: &'h Heap) -> Self { - Linker { - allocator: Allocator::new(heap), - redexes: RedexQueue::default(), - rwts: Default::default(), - headers: Default::default(), - _mode: PhantomData, - } + Linker { allocator: Allocator::new(heap), redexes: RedexQueue::default(), rwts: Default::default() } } /// Links two ports. @@ -94,7 +72,7 @@ impl<'h, M: Mode> Linker<'h, M> { debug_assert!(!(a.is(Tag::Var) || a.is(Tag::Red) || b.is(Tag::Var) || b.is(Tag::Red))); if a.is_skippable() && b.is_skippable() { self.rwts.eras += 1; - } else if !M::LAZY { + } else { // Prioritize redexes that do not allocate memory, // to prevent OOM errors that can be avoided // by reducing redexes in a different order (see #91) @@ -103,9 +81,6 @@ impl<'h, M: Mode> Linker<'h, M> { } else { self.redexes.slow.push((a, b)); } - } else { - self.set_header(a.clone(), b.clone()); - self.set_header(b.clone(), a.clone()); } } @@ -116,8 +91,6 @@ impl<'h, M: Mode> Linker<'h, M> { trace!(self, a_port, b_port); if a_port.is(Tag::Var) { a_port.wire().set_target(b_port); - } else if M::LAZY { - self.set_header(a_port, b_port); } } @@ -150,9 +123,6 @@ impl<'h, M: Mode> Linker<'h, M> { } } else { self.free_wire(a_wire); - if M::LAZY { - self.set_header(a_port, b_port); - } } } @@ -310,7 +280,7 @@ impl Trg { } } -impl<'h, M: Mode> Linker<'h, M> { +impl<'h> Linker<'h> { /// Links a `Trg` to a port, delegating to the appropriate method based on the /// type of `a`. #[inline(always)] @@ -332,27 +302,6 @@ impl<'h, M: Mode> Linker<'h, M> { (false, false) => self.link_port_port(a.as_port(), b.as_port()), } } - - pub(super) fn get_header(&self, addr: Addr) -> &Header { - assert!(M::LAZY); - &self.headers[&addr] - } - - pub(super) fn set_header(&mut self, ptr: Port, trg: Port) { - assert!(M::LAZY); - trace!(self, ptr, trg); - if ptr.is_full_node() { - self.headers.insert(ptr.addr(), Header { this: ptr, targ: trg }); - } - } - - pub(super) fn get_target_full(&self, port: Port) -> Port { - assert!(M::LAZY); - if !port.is_principal() { - return port.wire().load_target(); - } - self.headers[&port.addr()].targ.clone() - } } #[derive(Debug, Default)] diff --git a/runtime/src/net.rs b/runtime/src/net.rs index 67365f3d..8ca66e34 100644 --- a/runtime/src/net.rs +++ b/runtime/src/net.rs @@ -3,17 +3,17 @@ use super::*; use mem::MaybeUninit; /// An interaction combinator net. -pub struct Net<'a, M: Mode> { - pub(super) linker: Linker<'a, M>, +pub struct Net<'a> { + pub(super) linker: Linker<'a>, pub tid: usize, // thread id pub tids: usize, // thread count pub trgs: Box<[MaybeUninit]>, pub root: Wire, } -deref!({<'a, M: Mode>} Net<'a, M> => self.linker: Linker<'a, M>); +deref!({<'a, >} Net<'a> => self.linker: Linker<'a>); -impl<'h, M: Mode> Net<'h, M> { +impl<'h> Net<'h> { /// Creates an empty net with a given heap. pub fn new(heap: &'h Heap) -> Self { let mut net = Net::new_with_root(heap, Wire(ptr::null())); @@ -31,14 +31,13 @@ impl<'h, M: Mode> Net<'h, M> { } } -impl<'a, M: Mode> Net<'a, M> { +impl<'a> Net<'a> { /// Reduces at most `limit` redexes. /// /// If normalized, returns `Some(num_redexes)`. /// If stopped because the limit was reached, returns `None`. #[inline(always)] pub fn reduce(&mut self, limit: usize) -> Option { - assert!(!M::LAZY); let mut count = 0; while let Some((a, b)) = self.redexes.pop() { @@ -51,82 +50,18 @@ impl<'a, M: Mode> Net<'a, M> { Some(count) } - // Lazy mode weak head normalizer - #[inline(always)] - pub fn weak_normal(&mut self, mut prev: Port, root: Wire) -> Port { - assert!(M::LAZY); - - let mut path: Vec = vec![]; - - loop { - trace!(self.tracer, prev); - // Load ptrs - let next = self.get_target_full(prev.clone()); - trace!(self.tracer, next); - - // If next is root, stop. - if next == Port::new_var(root.addr()) || next == Port::new_var(self.root.addr()) { - break; - } - - // If next is a main port... - if next.is_principal() { - // If prev is a main port, reduce the active pair. - if prev.is_principal() { - self.interact(next, prev.clone()); - prev = path.pop().unwrap(); - continue; - // Otherwise, if it is a ref, expand it. - } else if next.tag() == Ref && next != Port::ERA { - self.call(next, prev.clone()); - continue; - // Otherwise, we're done. - } else { - break; - } - } - - // If next is an aux port, pass through. - let main = self.get_header(next.addr().left_half()); - path.push(prev); - prev = main.this.clone(); - } - - self.get_target_full(prev) - } - - pub fn normal_from(&mut self, root: Wire) { - assert!(M::LAZY); - let mut visit = vec![Port::new_var(root.addr())]; - while let Some(prev) = visit.pop() { - trace!(self.tracer, "visit", prev); - //println!("normal {} | {}", prev.view(), self.rewrites()); - let next = self.weak_normal(prev, root.clone()); - trace!(self.tracer, "got", next); - if next.is_full_node() { - visit.push(Port::new_var(next.addr())); - visit.push(Port::new_var(next.addr().other_half())); - } - } - } - /// Reduces a net to normal form. pub fn normal(&mut self) { - if M::LAZY { - self.normal_from(self.root.clone()); - } else { - self.expand(); - while !self.redexes.is_empty() { - self.reduce(usize::MAX); - } + self.expand(); + while !self.redexes.is_empty() { + self.reduce(usize::MAX); } } } -impl<'h, M: Mode> Net<'h, M> { +impl<'h> Net<'h> { /// Expands [`Tag::Ref`] nodes in the tree connected to `root`. pub fn expand(&mut self) { - assert!(!M::LAZY); let (new_root, out_port) = self.create_wire(); let old_root = mem::replace(&mut self.root, new_root); self.link_wire_port(old_root, ExpandDef::new(out_port)); @@ -144,7 +79,7 @@ impl ExpandDef { } impl AsDef for ExpandDef { - unsafe fn call(def: *const Def, net: &mut Net, port: Port) { + unsafe fn call(def: *const Def, net: &mut Net, port: Port) { if port.tag() == Tag::Ref && port != Port::ERA { let other: *const Def = port.addr().def() as *const _; if let Some(other) = Def::downcast_ptr::(other) { diff --git a/runtime/src/node.rs b/runtime/src/node.rs index 7fdec7e3..1e87668e 100644 --- a/runtime/src/node.rs +++ b/runtime/src/node.rs @@ -31,7 +31,7 @@ pub struct CreatedNode { pub p2: Port, } -impl<'a, M: Mode> Net<'a, M> { +impl<'a> Net<'a> { #[inline(always)] pub fn create_node(&mut self, tag: Tag, lab: Lab) -> CreatedNode { let addr = self.alloc(); diff --git a/runtime/src/parallel.rs b/runtime/src/parallel.rs index 5c7babb8..5517b489 100644 --- a/runtime/src/parallel.rs +++ b/runtime/src/parallel.rs @@ -7,7 +7,7 @@ use atomic::AtomicUsize; use super::*; -impl<'h, M: Mode> Net<'h, M> { +impl<'h> Net<'h> { /// Forks the net into `tids` child nets, for parallel operation. pub fn fork(&mut self, tids: usize) -> impl Iterator + '_ { let redexes_len = self.linker.redexes.len(); @@ -33,19 +33,17 @@ impl<'h, M: Mode> Net<'h, M> { // Evaluates a term to normal form in parallel pub fn parallel_normal(&mut self) { - assert!(!M::LAZY); - self.expand(); const SHARE_LIMIT: usize = 1 << 12; // max share redexes per split const LOCAL_LIMIT: usize = 1 << 18; // max local rewrites per epoch // Local thread context - struct ThreadContext<'a, M: Mode> { + struct ThreadContext<'a> { tid: usize, // thread id tlog2: usize, // log2 of thread count tick: usize, // current tick - net: Net<'a, M>, // thread's own net object + net: Net<'a>, // thread's own net object delta: &'a AtomicRewrites, // global delta rewrites share: &'a Vec<(AtomicU64, AtomicU64)>, // global share buffer rlens: &'a Vec, // global redex lengths (only counting shareable ones) @@ -85,7 +83,7 @@ impl<'h, M: Mode> Net<'h, M> { // Main reduction loop #[inline(always)] - fn main(ctx: &mut ThreadContext) { + fn main(ctx: &mut ThreadContext) { loop { reduce(ctx); if count(ctx) == 0 { @@ -97,7 +95,7 @@ impl<'h, M: Mode> Net<'h, M> { // Reduce redexes locally, then share with target #[inline(always)] - fn reduce(ctx: &mut ThreadContext) { + fn reduce(ctx: &mut ThreadContext) { loop { ctx.net.reduce(LOCAL_LIMIT); if count(ctx) == 0 { @@ -111,7 +109,7 @@ impl<'h, M: Mode> Net<'h, M> { // Count total redexes (and populate 'rlens') #[inline(always)] - fn count(ctx: &mut ThreadContext) -> usize { + fn count(ctx: &mut ThreadContext) -> usize { ctx.barry.wait(); ctx.total.store(0, Relaxed); ctx.barry.wait(); @@ -123,7 +121,7 @@ impl<'h, M: Mode> Net<'h, M> { // Share redexes with target thread #[inline(always)] - fn split(ctx: &mut ThreadContext, plog2: usize) { + fn split(ctx: &mut ThreadContext, plog2: usize) { unsafe { let side = (ctx.tid >> (plog2 - 1 - (ctx.tick % plog2))) & 1; let shift = (1 << (plog2 - 1)) >> (ctx.tick % plog2); diff --git a/runtime/src/runtime.rs b/runtime/src/runtime.rs index ffe1e31e..7b35dcf7 100644 --- a/runtime/src/runtime.rs +++ b/runtime/src/runtime.rs @@ -34,7 +34,6 @@ use core::{ alloc::Layout, any::{Any, TypeId}, hint::unreachable_unchecked, - marker::PhantomData, mem::size_of, ops::{Add, AddAssign, Deref, DerefMut}, }; @@ -51,7 +50,6 @@ use Tag::*; mod addr; mod allocator; mod def; -mod dyn_net; mod instruction; mod interact; mod linker; @@ -65,7 +63,6 @@ mod wire; pub use addr::*; pub use allocator::*; pub use def::*; -pub use dyn_net::*; pub use instruction::*; pub use linker::*; pub use net::*; @@ -75,30 +72,6 @@ pub use wire::*; pub type Lab = u16; -/// The runtime mode is represented with a generic such that, instead of -/// repeatedly branching on the mode at runtime, the branch can happen at the -/// top-most level, and delegate to monomorphized functions specialized for each -/// particular mode. -/// -/// This trait is `unsafe` as it may only be implemented by [`Strict`] and -/// [`Lazy`]. -pub unsafe trait Mode: Send + Sync + 'static { - const LAZY: bool; -} - -/// In strict mode, all active pairs are expanded. -pub struct Strict; -unsafe impl Mode for Strict { - const LAZY: bool = false; -} - -/// In lazy mode, only active pairs that are reached from a walk from the root -/// port are expanded. -pub struct Lazy; -unsafe impl Mode for Lazy { - const LAZY: bool = true; -} - /// Tracks the number of rewrites, categorized by type. #[derive(Clone, Copy, Debug, Default)] pub struct Rewrites { diff --git a/src/args.rs b/src/args.rs index 8d62a1cc..f7c2bd14 100644 --- a/src/args.rs +++ b/src/args.rs @@ -27,14 +27,6 @@ pub struct RuntimeOpts { #[arg(short = '1', long = "single")] pub single_core: bool, - /// Lazy mode. - /// - /// Lazy mode only expands references that are reachable - /// by a walk from the root of the net. This leads to a dramatic slowdown, - /// but allows running programs that would expand indefinitely otherwise. - #[arg(short, long = "lazy")] - pub lazy_mode: bool, - /// How much memory to allocate on startup. /// /// Supports abbreviations such as '4G' or '400M'. diff --git a/src/compile.rs b/src/compile.rs index c6909b9b..b71515a4 100644 --- a/src/compile.rs +++ b/src/compile.rs @@ -122,7 +122,7 @@ fn compile_struct(code: &mut String, host: &Host, rust_name: &str, def: &Def(slf: *const Def, net: &mut Net, port: Port) {{")?; + writeln!(code, " unsafe fn call(slf: *const Def, net: &mut Net, port: Port) {{")?; writeln!(code, " let slf = unsafe {{ &*slf }};")?; writeln!(code, " let t0 = Trg::port(port);")?; diff --git a/src/compile/include_files.rs b/src/compile/include_files.rs index 8a9dbfc9..fa70c973 100644 --- a/src/compile/include_files.rs +++ b/src/compile/include_files.rs @@ -95,7 +95,6 @@ hvm64-runtime = { path = "../runtime", default-features = false } addr allocator def - dyn_net instruction interact linker diff --git a/src/main.rs b/src/main.rs index 6e6ebc2d..eecacd59 100644 --- a/src/main.rs +++ b/src/main.rs @@ -28,7 +28,7 @@ use hvm64_host::{ stdlib::{create_host, insert_stdlib}, DefRef, Host, }; -use hvm64_runtime::{dispatch_dyn_net, Def, DynNet, Heap, Mode, Port, Trg}; +use hvm64_runtime::{Def, Heap, Port, Trg}; use hvm64_transform::Transform; fn main() { @@ -184,25 +184,23 @@ fn load_dylibs(host: Arc>, include: &[PathBuf]) { fn reduce_exprs(host: Arc>, exprs: &[Net], opts: &RuntimeOpts) { let heap = Heap::new(opts.memory).expect("memory allocation failed"); for expr in exprs { - let mut net = DynNet::new(&heap, opts.lazy_mode); - dispatch_dyn_net!(&mut net => { - host.lock().encode_net(net, Trg::port(Port::new_var(net.root.addr())), expr); - let start_time = Instant::now(); - if opts.single_core { - net.normal(); - } else { - net.parallel_normal(); - } - let elapsed = start_time.elapsed(); - println!("{}", host.lock().readback(net)); - if opts.show_stats { - print_stats(net, elapsed); - } - }); + let net = &mut hvm64_runtime::Net::new(&heap); + host.lock().encode_net(net, Trg::port(Port::new_var(net.root.addr())), expr); + let start_time = Instant::now(); + if opts.single_core { + net.normal(); + } else { + net.parallel_normal(); + } + let elapsed = start_time.elapsed(); + println!("{}", host.lock().readback(net)); + if opts.show_stats { + print_stats(net, elapsed); + } } } -fn print_stats(net: &hvm64_runtime::Net, elapsed: Duration) { +fn print_stats(net: &hvm64_runtime::Net, elapsed: Duration) { eprintln!("RWTS : {:>15}", pretty_num(net.rwts.total())); eprintln!("- ANNI : {:>15}", pretty_num(net.rwts.anni)); eprintln!("- COMM : {:>15}", pretty_num(net.rwts.comm)); diff --git a/tests/cli.rs b/tests/cli.rs index c820dfdb..7e06e36b 100644 --- a/tests/cli.rs +++ b/tests/cli.rs @@ -209,7 +209,7 @@ fn test_apply_tree() { } let host = Host::default(); - let mut rnet = run::Net::::new(&area); + let mut rnet = run::Net::new(&area); let root_port = run::Trg::port(run::Port::new_var(rnet.root.addr())); host.encode_net(&mut rnet, root_port, &fun); rnet.normal(); diff --git a/tests/loaders.rs b/tests/loaders.rs index 1101b078..533c17ce 100644 --- a/tests/loaders.rs +++ b/tests/loaders.rs @@ -29,7 +29,7 @@ pub fn normal_with(book: Book, mem: Option, entry_point: &str) -> (run::R let area = run::Heap::new(mem).unwrap(); let host = create_host(&book); - let mut rnet = run::Net::::new(&area); + let mut rnet = run::Net::new(&area); rnet.boot(&host.lock().defs[entry_point]); rnet.normal(); diff --git a/tests/tests.rs b/tests/tests.rs index 708058ea..26446e0c 100644 --- a/tests/tests.rs +++ b/tests/tests.rs @@ -21,7 +21,7 @@ use serial_test::serial; fn execute_host(host: Arc>) -> Option<(run::Rewrites, Net)> { let heap = run::Heap::new(None).unwrap(); - let mut net = run::Net::::new(&heap); + let mut net = run::Net::new(&heap); // The host is locked inside this block. { let lock = host.lock(); diff --git a/transform/src/pre_reduce.rs b/transform/src/pre_reduce.rs index 785c695a..fd5f1885 100644 --- a/transform/src/pre_reduce.rs +++ b/transform/src/pre_reduce.rs @@ -20,7 +20,7 @@ use hvm64_host::{ stdlib::{AsHostedDef, HostedDef}, DefRef, Host, }; -use hvm64_runtime::{Def, Heap, InterpretedDef, LabSet, Mode, Port, Rewrites, Strict}; +use hvm64_runtime::{Def, Heap, InterpretedDef, LabSet, Port, Rewrites}; use hvm64_util::maybe_grow; use alloc::sync::Arc; @@ -93,7 +93,7 @@ enum SeenState { struct InertDef(Arc>>); impl AsHostedDef for InertDef { - fn call(def: &Def, _: &mut hvm64_runtime::Net, port: Port) { + fn call(def: &Def, _: &mut hvm64_runtime::Net, port: Port) { def.data.0.lock().push((Port::new_ref(def), port)); } } @@ -139,7 +139,7 @@ impl<'a> State<'a> { // First, pre-reduce all nets referenced by this net by walking the tree self.visit_net(self.book.get(nam).unwrap()); - let mut rt = hvm64_runtime::Net::::new(self.area); + let mut rt = hvm64_runtime::Net::new(self.area); rt.boot(self.host.defs.get(nam).expect("No function.")); let n_reduced = rt.reduce(self.max_rwts as usize);