From 6bffc673ab34e81c2cb8505d1c1e19ac92549b53 Mon Sep 17 00:00:00 2001 From: Alex Crichton Date: Thu, 31 Oct 2024 11:34:58 -0700 Subject: [PATCH] Remove the `wasmtime_environ::MemoryPlan` type This is the equivalent of #9530 for memories. The goal of this commit is to eventually remove the abstraction layer of `MemoryPlan` and `MemoryStyle` in favor of directly reading the configuration of `Tunables`. The prediction is that it will be simpler to work directly with configured values instead of a layer of abstraction between the configuration and the runtime which needs to be evolved independently to capture how to interpret the configuration. Like with #9530 my plan is to eventually remove the `MemoryStyle` type itself, but that'll be a larger change, so it's deferred to a future PR. --- crates/cranelift/src/func_environ.rs | 302 ++++++++---------- crates/environ/src/compile/module_environ.rs | 29 +- .../environ/src/component/translate/inline.rs | 4 +- crates/environ/src/module.rs | 47 +-- crates/environ/src/vmoffsets.rs | 8 +- crates/wasmtime/src/runtime/externals.rs | 2 +- crates/wasmtime/src/runtime/memory.rs | 24 +- crates/wasmtime/src/runtime/module.rs | 6 +- crates/wasmtime/src/runtime/store.rs | 2 +- .../wasmtime/src/runtime/trampoline/memory.rs | 36 +-- crates/wasmtime/src/runtime/vm/cow.rs | 108 ++++--- .../wasmtime/src/runtime/vm/debug_builtins.rs | 4 +- crates/wasmtime/src/runtime/vm/export.rs | 4 +- crates/wasmtime/src/runtime/vm/instance.rs | 24 +- .../src/runtime/vm/instance/allocator.rs | 34 +- .../vm/instance/allocator/on_demand.rs | 8 +- .../runtime/vm/instance/allocator/pooling.rs | 9 +- .../instance/allocator/pooling/memory_pool.rs | 38 +-- crates/wasmtime/src/runtime/vm/memory.rs | 71 ++-- .../src/runtime/vm/threads/shared_memory.rs | 19 +- winch/codegen/src/codegen/env.rs | 48 ++- winch/codegen/src/isa/aarch64/mod.rs | 1 + winch/codegen/src/isa/x64/mod.rs | 1 + 23 files changed, 392 insertions(+), 437 deletions(-) diff --git a/crates/cranelift/src/func_environ.rs b/crates/cranelift/src/func_environ.rs index 87009b538b76..4eabb08fa632 100644 --- a/crates/cranelift/src/func_environ.rs +++ b/crates/cranelift/src/func_environ.rs @@ -20,7 +20,7 @@ use std::mem; use wasmparser::{Operator, WasmFeatures}; use wasmtime_environ::{ BuiltinFunctionIndex, DataIndex, ElemIndex, EngineOrModuleTypeIndex, FuncIndex, GlobalIndex, - IndexType, Memory, MemoryIndex, MemoryPlan, MemoryStyle, Module, ModuleInternedTypeIndex, + IndexType, Memory, MemoryIndex, MemoryStyle, Module, ModuleInternedTypeIndex, ModuleTranslation, ModuleTypesBuilder, PtrSize, Table, TableIndex, TableStyle, Tunables, TypeConvert, TypeIndex, VMOffsets, WasmCompositeType, WasmFuncType, WasmHeapTopType, WasmHeapType, WasmRefType, WasmResult, WasmValType, @@ -696,7 +696,7 @@ impl<'module_environment> FuncEnvironment<'module_environment> { /// Get the Memory for the given index. fn memory(&self, index: MemoryIndex) -> Memory { - self.module.memory_plans[index].memory + self.module.memories[index] } /// Get the Table for the given index. @@ -2323,25 +2323,20 @@ impl<'module_environment> crate::translate::FuncEnvironment fn make_heap(&mut self, func: &mut ir::Function, index: MemoryIndex) -> WasmResult { let pointer_type = self.pointer_type(); - let is_shared = self.module.memory_plans[index].memory.shared; - - let min_size = self.module.memory_plans[index] - .memory - .minimum_byte_size() - .unwrap_or_else(|_| { - // The only valid Wasm memory size that won't fit in a 64-bit - // integer is the maximum memory64 size (2^64) which is one - // larger than `u64::MAX` (2^64 - 1). In this case, just say the - // minimum heap size is `u64::MAX`. - debug_assert_eq!(self.module.memory_plans[index].memory.limits.min, 1 << 48); - debug_assert_eq!(self.module.memory_plans[index].memory.page_size(), 1 << 16); - u64::MAX - }); + let memory = self.module.memories[index]; + let is_shared = memory.shared; + + let min_size = memory.minimum_byte_size().unwrap_or_else(|_| { + // The only valid Wasm memory size that won't fit in a 64-bit + // integer is the maximum memory64 size (2^64) which is one + // larger than `u64::MAX` (2^64 - 1). In this case, just say the + // minimum heap size is `u64::MAX`. + debug_assert_eq!(memory.limits.min, 1 << 48); + debug_assert_eq!(memory.page_size(), 1 << 16); + u64::MAX + }); - let max_size = self.module.memory_plans[index] - .memory - .maximum_byte_size() - .ok(); + let max_size = memory.maximum_byte_size().ok(); let (ptr, base_offset, current_length_offset, ptr_memtype) = { let vmctx = self.vmctx(func); @@ -2395,151 +2390,138 @@ impl<'module_environment> crate::translate::FuncEnvironment } }; - let page_size_log2 = self.module.memory_plans[index].memory.page_size_log2; + let page_size_log2 = memory.page_size_log2; // If we have a declared maximum, we can make this a "static" heap, which is // allocated up front and never moved. - let (offset_guard_size, heap_style, readonly_base, base_fact, memory_type) = - match self.module.memory_plans[index] { - MemoryPlan { - style: MemoryStyle::Dynamic { .. }, - offset_guard_size, - pre_guard_size: _, - memory: _, - } => { - let heap_bound = func.create_global_value(ir::GlobalValueData::Load { - base: ptr, - offset: Offset32::new(current_length_offset), - global_type: pointer_type, - flags: MemFlags::trusted(), - }); + let (style, offset_guard_size) = MemoryStyle::for_memory(memory, self.tunables); + let (heap_style, readonly_base, base_fact, memory_type) = match style { + MemoryStyle::Dynamic { .. } => { + let heap_bound = func.create_global_value(ir::GlobalValueData::Load { + base: ptr, + offset: Offset32::new(current_length_offset), + global_type: pointer_type, + flags: MemFlags::trusted(), + }); - let (base_fact, data_mt) = if let Some(ptr_memtype) = ptr_memtype { - // Create a memtype representing the untyped memory region. - let data_mt = func.create_memory_type(ir::MemoryTypeData::DynamicMemory { - gv: heap_bound, - size: offset_guard_size, - }); - // This fact applies to any pointer to the start of the memory. - let base_fact = ir::Fact::dynamic_base_ptr(data_mt); - // This fact applies to the length. - let length_fact = ir::Fact::global_value( - u16::try_from(self.isa.pointer_type().bits()).unwrap(), - heap_bound, - ); - // Create a field in the vmctx for the base pointer. - match &mut func.memory_types[ptr_memtype] { - ir::MemoryTypeData::Struct { size, fields } => { - let base_offset = u64::try_from(base_offset).unwrap(); - fields.push(ir::MemoryTypeField { - offset: base_offset, - ty: self.isa.pointer_type(), - // Read-only field from the PoV of PCC checks: - // don't allow stores to this field. (Even if - // it is a dynamic memory whose base can - // change, that update happens inside the - // runtime, not in generated code.) - readonly: true, - fact: Some(base_fact.clone()), - }); - let current_length_offset = - u64::try_from(current_length_offset).unwrap(); - fields.push(ir::MemoryTypeField { - offset: current_length_offset, - ty: self.isa.pointer_type(), - // As above, read-only; only the runtime modifies it. - readonly: true, - fact: Some(length_fact), - }); - - let pointer_size = u64::from(self.isa.pointer_type().bytes()); - let fields_end = std::cmp::max( - base_offset + pointer_size, - current_length_offset + pointer_size, - ); - *size = std::cmp::max(*size, fields_end); - } - _ => { - panic!("Bad memtype"); - } + let (base_fact, data_mt) = if let Some(ptr_memtype) = ptr_memtype { + // Create a memtype representing the untyped memory region. + let data_mt = func.create_memory_type(ir::MemoryTypeData::DynamicMemory { + gv: heap_bound, + size: offset_guard_size, + }); + // This fact applies to any pointer to the start of the memory. + let base_fact = ir::Fact::dynamic_base_ptr(data_mt); + // This fact applies to the length. + let length_fact = ir::Fact::global_value( + u16::try_from(self.isa.pointer_type().bits()).unwrap(), + heap_bound, + ); + // Create a field in the vmctx for the base pointer. + match &mut func.memory_types[ptr_memtype] { + ir::MemoryTypeData::Struct { size, fields } => { + let base_offset = u64::try_from(base_offset).unwrap(); + fields.push(ir::MemoryTypeField { + offset: base_offset, + ty: self.isa.pointer_type(), + // Read-only field from the PoV of PCC checks: + // don't allow stores to this field. (Even if + // it is a dynamic memory whose base can + // change, that update happens inside the + // runtime, not in generated code.) + readonly: true, + fact: Some(base_fact.clone()), + }); + let current_length_offset = + u64::try_from(current_length_offset).unwrap(); + fields.push(ir::MemoryTypeField { + offset: current_length_offset, + ty: self.isa.pointer_type(), + // As above, read-only; only the runtime modifies it. + readonly: true, + fact: Some(length_fact), + }); + + let pointer_size = u64::from(self.isa.pointer_type().bytes()); + let fields_end = std::cmp::max( + base_offset + pointer_size, + current_length_offset + pointer_size, + ); + *size = std::cmp::max(*size, fields_end); } - // Apply a fact to the base pointer. - (Some(base_fact), Some(data_mt)) - } else { - (None, None) - }; - - ( - offset_guard_size, - HeapStyle::Dynamic { - bound_gv: heap_bound, - }, - false, - base_fact, - data_mt, - ) - } - MemoryPlan { - style: - MemoryStyle::Static { - byte_reservation: bound_bytes, - }, - offset_guard_size, - pre_guard_size: _, - memory: _, - } => { - let (base_fact, data_mt) = if let Some(ptr_memtype) = ptr_memtype { - // Create a memtype representing the untyped memory region. - let data_mt = func.create_memory_type(ir::MemoryTypeData::Memory { - size: bound_bytes - .checked_add(offset_guard_size) - .expect("Memory plan has overflowing size plus guard"), - }); - // This fact applies to any pointer to the start of the memory. - let base_fact = Fact::Mem { - ty: data_mt, - min_offset: 0, - max_offset: 0, - nullable: false, - }; - // Create a field in the vmctx for the base pointer. - match &mut func.memory_types[ptr_memtype] { - ir::MemoryTypeData::Struct { size, fields } => { - let offset = u64::try_from(base_offset).unwrap(); - fields.push(ir::MemoryTypeField { - offset, - ty: self.isa.pointer_type(), - // Read-only field from the PoV of PCC checks: - // don't allow stores to this field. (Even if - // it is a dynamic memory whose base can - // change, that update happens inside the - // runtime, not in generated code.) - readonly: true, - fact: Some(base_fact.clone()), - }); - *size = std::cmp::max( - *size, - offset + u64::from(self.isa.pointer_type().bytes()), - ); - } - _ => { - panic!("Bad memtype"); - } + _ => { + panic!("Bad memtype"); } - // Apply a fact to the base pointer. - (Some(base_fact), Some(data_mt)) - } else { - (None, None) + } + // Apply a fact to the base pointer. + (Some(base_fact), Some(data_mt)) + } else { + (None, None) + }; + + ( + HeapStyle::Dynamic { + bound_gv: heap_bound, + }, + false, + base_fact, + data_mt, + ) + } + MemoryStyle::Static { + byte_reservation: bound_bytes, + } => { + let (base_fact, data_mt) = if let Some(ptr_memtype) = ptr_memtype { + // Create a memtype representing the untyped memory region. + let data_mt = func.create_memory_type(ir::MemoryTypeData::Memory { + size: bound_bytes + .checked_add(offset_guard_size) + .expect("Memory plan has overflowing size plus guard"), + }); + // This fact applies to any pointer to the start of the memory. + let base_fact = Fact::Mem { + ty: data_mt, + min_offset: 0, + max_offset: 0, + nullable: false, }; - ( - offset_guard_size, - HeapStyle::Static { bound: bound_bytes }, - true, - base_fact, - data_mt, - ) - } - }; + // Create a field in the vmctx for the base pointer. + match &mut func.memory_types[ptr_memtype] { + ir::MemoryTypeData::Struct { size, fields } => { + let offset = u64::try_from(base_offset).unwrap(); + fields.push(ir::MemoryTypeField { + offset, + ty: self.isa.pointer_type(), + // Read-only field from the PoV of PCC checks: + // don't allow stores to this field. (Even if + // it is a dynamic memory whose base can + // change, that update happens inside the + // runtime, not in generated code.) + readonly: true, + fact: Some(base_fact.clone()), + }); + *size = std::cmp::max( + *size, + offset + u64::from(self.isa.pointer_type().bytes()), + ); + } + _ => { + panic!("Bad memtype"); + } + } + // Apply a fact to the base pointer. + (Some(base_fact), Some(data_mt)) + } else { + (None, None) + }; + ( + HeapStyle::Static { bound: bound_bytes }, + true, + base_fact, + data_mt, + ) + } + }; let mut flags = MemFlags::trusted().with_checked(); if readonly_base { @@ -2756,7 +2738,7 @@ impl<'module_environment> crate::translate::FuncEnvironment ) -> WasmResult { let pointer_type = self.pointer_type(); let vmctx = self.vmctx(&mut pos.func); - let is_shared = self.module.memory_plans[index].memory.shared; + let is_shared = self.module.memories[index].shared; let base = pos.ins().global_value(pointer_type, vmctx); let current_length_in_bytes = match self.module.defined_memory_index(index) { Some(def_index) => { @@ -2818,7 +2800,7 @@ impl<'module_environment> crate::translate::FuncEnvironment } }; - let page_size_log2 = i64::from(self.module.memory_plans[index].memory.page_size_log2); + let page_size_log2 = i64::from(self.module.memories[index].page_size_log2); let current_length_in_pages = pos.ins().ushr_imm(current_length_in_bytes, page_size_log2); let single_byte_pages = match page_size_log2 { 16 => false, diff --git a/crates/environ/src/compile/module_environ.rs b/crates/environ/src/compile/module_environ.rs index 2a593b9f2585..c052901800e7 100644 --- a/crates/environ/src/compile/module_environ.rs +++ b/crates/environ/src/compile/module_environ.rs @@ -1,6 +1,6 @@ use crate::module::{ - FuncRefIndex, Initializer, MemoryInitialization, MemoryInitializer, MemoryPlan, Module, - TableSegment, TableSegmentElements, + FuncRefIndex, Initializer, MemoryInitialization, MemoryInitializer, Module, TableSegment, + TableSegmentElements, }; use crate::prelude::*; use crate::{ @@ -373,12 +373,11 @@ impl<'a, 'data> ModuleEnvironment<'a, 'data> { self.validator.memory_section(&memories)?; let cnt = usize::try_from(memories.count()).unwrap(); - self.result.module.memory_plans.reserve_exact(cnt); + self.result.module.memories.reserve_exact(cnt); for entry in memories { let memory = entry?; - let plan = MemoryPlan::for_memory(memory.into(), &self.tunables); - self.result.module.memory_plans.push(plan); + self.result.module.memories.push(memory.into()); } } @@ -767,10 +766,7 @@ and for re-adding support for interface types you can see this issue: func_index }), EntityType::Table(ty) => EntityIndex::Table(self.result.module.tables.push(ty)), - EntityType::Memory(ty) => { - let plan = MemoryPlan::for_memory(ty, &self.tunables); - EntityIndex::Memory(self.result.module.memory_plans.push(plan)) - } + EntityType::Memory(ty) => EntityIndex::Memory(self.result.module.memories.push(ty)), EntityType::Global(ty) => EntityIndex::Global(self.result.module.globals.push(ty)), EntityType::Tag(_) => unimplemented!(), } @@ -920,8 +916,8 @@ impl ModuleTranslation<'_> { // wasm module. segments: Vec<(usize, StaticMemoryInitializer)>, } - let mut info = PrimaryMap::with_capacity(self.module.memory_plans.len()); - for _ in 0..self.module.memory_plans.len() { + let mut info = PrimaryMap::with_capacity(self.module.memories.len()); + for _ in 0..self.module.memories.len() { info.push(Memory { data_size: 0, min_addr: u64::MAX, @@ -940,16 +936,11 @@ impl ModuleTranslation<'_> { &mut self, memory_index: MemoryIndex, ) -> Result { - self.module.memory_plans[memory_index] - .memory - .minimum_byte_size() + self.module.memories[memory_index].minimum_byte_size() } fn eval_offset(&mut self, memory_index: MemoryIndex, expr: &ConstExpr) -> Option { - match ( - expr.ops(), - self.module.memory_plans[memory_index].memory.idx_type, - ) { + match (expr.ops(), self.module.memories[memory_index].idx_type) { (&[ConstOp::I32Const(offset)], IndexType::I32) => { Some(offset.unsigned().into()) } @@ -1002,7 +993,7 @@ impl ModuleTranslation<'_> { // initializer can be created. This can be handled technically but // would require some more changes to help fix the assert elsewhere // that this protects against. - if self.module.memory_plans[i].memory.page_size() < page_size { + if self.module.memories[i].page_size() < page_size { return; } diff --git a/crates/environ/src/component/translate/inline.rs b/crates/environ/src/component/translate/inline.rs index b75b0509a326..881cb7440f08 100644 --- a/crates/environ/src/component/translate/inline.rs +++ b/crates/environ/src/component/translate/inline.rs @@ -966,8 +966,8 @@ impl<'a> Inliner<'a> { Some(memory) => match &self.runtime_instances[memory.instance] { InstanceModule::Static(idx) => match &memory.item { ExportItem::Index(i) => { - let plan = &self.nested_modules[*idx].module.memory_plans[*i]; - match plan.memory.idx_type { + let ty = &self.nested_modules[*idx].module.memories[*i]; + match ty.idx_type { IndexType::I32 => false, IndexType::I64 => true, } diff --git a/crates/environ/src/module.rs b/crates/environ/src/module.rs index 52d8f0a3eafd..ec6373f77c6d 100644 --- a/crates/environ/src/module.rs +++ b/crates/environ/src/module.rs @@ -72,37 +72,6 @@ impl MemoryStyle { } } -/// A WebAssembly linear memory description along with our chosen style for -/// implementing it. -#[derive(Debug, Clone, Hash, Serialize, Deserialize)] -pub struct MemoryPlan { - /// The WebAssembly linear memory description. - pub memory: Memory, - /// Our chosen implementation style. - pub style: MemoryStyle, - /// Chosen size of a guard page before the linear memory allocation. - pub pre_guard_size: u64, - /// Our chosen offset-guard size. - pub offset_guard_size: u64, -} - -impl MemoryPlan { - /// Draw up a plan for implementing a `Memory`. - pub fn for_memory(memory: Memory, tunables: &Tunables) -> Self { - let (style, offset_guard_size) = MemoryStyle::for_memory(memory, tunables); - Self { - memory, - style, - offset_guard_size, - pre_guard_size: if tunables.guard_before_linear_memory { - offset_guard_size - } else { - 0 - }, - } - } -} - /// A WebAssembly linear memory initializer. #[derive(Clone, Debug, Serialize, Deserialize)] pub struct MemoryInitializer { @@ -466,7 +435,7 @@ pub struct Module { pub tables: PrimaryMap, /// WebAssembly linear memory plans. - pub memory_plans: PrimaryMap, + pub memories: PrimaryMap, /// WebAssembly global variables. pub globals: PrimaryMap, @@ -571,7 +540,7 @@ impl Module { #[inline] pub fn owned_memory_index(&self, memory: DefinedMemoryIndex) -> OwnedMemoryIndex { assert!( - memory.index() < self.memory_plans.len(), + memory.index() < self.memories.len(), "non-shared memory must have an owned index" ); @@ -579,11 +548,11 @@ impl Module { // plans, we can iterate through the plans up to the memory index and // count how many are not shared (i.e., owned). let owned_memory_index = self - .memory_plans + .memories .iter() .skip(self.num_imported_memories) .take(memory.index()) - .filter(|(_, mp)| !mp.memory.shared) + .filter(|(_, mp)| !mp.shared) .count(); OwnedMemoryIndex::new(owned_memory_index) } @@ -634,7 +603,7 @@ impl Module { match index { EntityIndex::Global(i) => EntityType::Global(self.globals[i]), EntityIndex::Table(i) => EntityType::Table(self.tables[i]), - EntityIndex::Memory(i) => EntityType::Memory(self.memory_plans[i].memory), + EntityIndex::Memory(i) => EntityType::Memory(self.memories[i]), EntityIndex::Function(i) => { EntityType::Function(EngineOrModuleTypeIndex::Module(self.functions[i].signature)) } @@ -662,6 +631,12 @@ impl Module { pub fn num_defined_tables(&self) -> usize { self.tables.len() - self.num_imported_tables } + + /// Returns the number of memories defined by this module itself: all + /// memories minus imported memories. + pub fn num_defined_memories(&self) -> usize { + self.memories.len() - self.num_imported_memories + } } /// Type information about functions in a wasm module. diff --git a/crates/environ/src/vmoffsets.rs b/crates/environ/src/vmoffsets.rs index 9b09636c7bb6..017f261d8491 100644 --- a/crates/environ/src/vmoffsets.rs +++ b/crates/environ/src/vmoffsets.rs @@ -338,10 +338,10 @@ impl VMOffsets

{ /// Return a new `VMOffsets` instance, for a given pointer size. pub fn new(ptr: P, module: &Module) -> Self { let num_owned_memories = module - .memory_plans + .memories .iter() .skip(module.num_imported_memories) - .filter(|p| !p.1.memory.shared) + .filter(|p| !p.1.shared) .count() .try_into() .unwrap(); @@ -352,9 +352,7 @@ impl VMOffsets

{ num_imported_memories: cast_to_u32(module.num_imported_memories), num_imported_globals: cast_to_u32(module.num_imported_globals), num_defined_tables: cast_to_u32(module.num_defined_tables()), - num_defined_memories: cast_to_u32( - module.memory_plans.len() - module.num_imported_memories, - ), + num_defined_memories: cast_to_u32(module.num_defined_memories()), num_owned_memories, num_defined_globals: cast_to_u32(module.globals.len() - module.num_imported_globals), num_escaped_funcs: cast_to_u32(module.num_escaped_funcs), diff --git a/crates/wasmtime/src/runtime/externals.rs b/crates/wasmtime/src/runtime/externals.rs index d457a13a568b..67e9c04f588f 100644 --- a/crates/wasmtime/src/runtime/externals.rs +++ b/crates/wasmtime/src/runtime/externals.rs @@ -112,7 +112,7 @@ impl Extern { Extern::Func(Func::from_wasmtime_function(f, store)) } crate::runtime::vm::Export::Memory(m) => { - if m.memory.memory.shared { + if m.memory.shared { Extern::SharedMemory(SharedMemory::from_wasmtime_memory(m, store)) } else { Extern::Memory(Memory::from_wasmtime_memory(m, store)) diff --git a/crates/wasmtime/src/runtime/memory.rs b/crates/wasmtime/src/runtime/memory.rs index 777a9dd1c850..45a51132482f 100644 --- a/crates/wasmtime/src/runtime/memory.rs +++ b/crates/wasmtime/src/runtime/memory.rs @@ -9,7 +9,6 @@ use core::fmt; use core::ops::Range; use core::slice; use core::time::Duration; -use wasmtime_environ::MemoryPlan; pub use crate::runtime::vm::WaitResult; @@ -294,7 +293,7 @@ impl Memory { /// ``` pub fn ty(&self, store: impl AsContext) -> MemoryType { let store = store.as_context(); - let ty = &store[self.0].memory.memory; + let ty = &store[self.0].memory; MemoryType::from_wasmtime_memory(&ty) } @@ -499,7 +498,7 @@ impl Memory { } pub(crate) fn _page_size(&self, store: &StoreOpaque) -> u64 { - store[self.0].memory.memory.page_size() + store[self.0].memory.page_size() } /// Returns the log2 of this memory's page size, in bytes. @@ -519,7 +518,7 @@ impl Memory { } pub(crate) fn _page_size_log2(&self, store: &StoreOpaque) -> u8 { - store[self.0].memory.memory.page_size_log2 + store[self.0].memory.page_size_log2 } /// Grows this WebAssembly memory by `delta` pages. @@ -632,7 +631,7 @@ impl Memory { } pub(crate) fn wasmtime_ty<'a>(&self, store: &'a StoreData) -> &'a wasmtime_environ::Memory { - &store[self.0].memory.memory + &store[self.0].memory } pub(crate) fn vmimport(&self, store: &StoreOpaque) -> crate::runtime::vm::VMMemoryImport { @@ -808,9 +807,9 @@ impl SharedMemory { debug_assert!(ty.maximum().is_some()); let tunables = engine.tunables(); - let plan = MemoryPlan::for_memory(*ty.wasmtime_memory(), tunables); - let page_size_log2 = plan.memory.page_size_log2; - let memory = crate::runtime::vm::SharedMemory::new(plan)?; + let ty = ty.wasmtime_memory(); + let page_size_log2 = ty.page_size_log2; + let memory = crate::runtime::vm::SharedMemory::new(ty, tunables)?; Ok(Self { vm: memory, @@ -1055,8 +1054,13 @@ mod tests { let ty = MemoryType::new(1, None); let mem = Memory::new(&mut store, ty).unwrap(); let store = store.as_context(); - assert_eq!(store[mem.0].memory.offset_guard_size, 0); - match &store[mem.0].memory.style { + let (style, offset_guard_size) = wasmtime_environ::MemoryStyle::for_memory( + store[mem.0].memory, + store.engine().tunables(), + ); + + assert_eq!(offset_guard_size, 0); + match style { wasmtime_environ::MemoryStyle::Dynamic { .. } => {} other => panic!("unexpected style {other:?}"), } diff --git a/crates/wasmtime/src/runtime/module.rs b/crates/wasmtime/src/runtime/module.rs index 53a520715229..0d25c02fd774 100644 --- a/crates/wasmtime/src/runtime/module.rs +++ b/crates/wasmtime/src/runtime/module.rs @@ -876,12 +876,12 @@ impl Module { /// ``` pub fn resources_required(&self) -> ResourcesRequired { let em = self.env_module(); - let num_memories = u32::try_from(em.memory_plans.len() - em.num_imported_memories).unwrap(); + let num_memories = u32::try_from(em.num_defined_memories()).unwrap(); let max_initial_memory_size = em - .memory_plans + .memories .values() .skip(em.num_imported_memories) - .map(|plan| plan.memory.limits.min) + .map(|memory| memory.limits.min) .max(); let num_tables = u32::try_from(em.num_defined_tables()).unwrap(); let max_initial_table_size = em diff --git a/crates/wasmtime/src/runtime/store.rs b/crates/wasmtime/src/runtime/store.rs index 1c98af855367..752524fe489b 100644 --- a/crates/wasmtime/src/runtime/store.rs +++ b/crates/wasmtime/src/runtime/store.rs @@ -1293,7 +1293,7 @@ impl StoreOpaque { } let module = module.env_module(); - let memories = module.memory_plans.len() - module.num_imported_memories; + let memories = module.num_defined_memories(); let tables = module.num_defined_tables(); bump(&mut self.instance_count, self.instance_limit, 1, "instance")?; diff --git a/crates/wasmtime/src/runtime/trampoline/memory.rs b/crates/wasmtime/src/runtime/trampoline/memory.rs index 5cd53bf791e9..58210ffc27b7 100644 --- a/crates/wasmtime/src/runtime/trampoline/memory.rs +++ b/crates/wasmtime/src/runtime/trampoline/memory.rs @@ -12,8 +12,8 @@ use crate::MemoryType; use alloc::sync::Arc; use core::ops::Range; use wasmtime_environ::{ - DefinedMemoryIndex, DefinedTableIndex, EntityIndex, HostPtr, MemoryPlan, MemoryStyle, Module, - Tunables, VMOffsets, + DefinedMemoryIndex, DefinedTableIndex, EntityIndex, HostPtr, MemoryStyle, Module, Tunables, + VMOffsets, }; #[cfg(feature = "component-model")] @@ -34,14 +34,10 @@ pub fn create_memory( ) -> Result { let mut module = Module::new(); - // Create a memory plan for the memory, though it will never be used for - // constructing a memory with an allocator: instead the memories are either - // preallocated (i.e., shared memory) or allocated manually below. - let plan = wasmtime_environ::MemoryPlan::for_memory( - *memory_ty.wasmtime_memory(), - store.engine().tunables(), - ); - let memory_id = module.memory_plans.push(plan.clone()); + // Create a memory, though it will never be used for constructing a memory + // with an allocator: instead the memories are either preallocated (i.e., + // shared memory) or allocated manually below. + let memory_id = module.memories.push(*memory_ty.wasmtime_memory()); // Since we have only associated a single memory with the "frankenstein" // instance, it will be exported at index 0. @@ -126,13 +122,14 @@ pub(crate) struct MemoryCreatorProxy(pub Arc); impl RuntimeMemoryCreator for MemoryCreatorProxy { fn new_memory( &self, - plan: &MemoryPlan, + ty: &wasmtime_environ::Memory, + tunables: &Tunables, minimum: usize, maximum: Option, _: Option<&Arc>, ) -> Result> { - let ty = MemoryType::from_wasmtime_memory(&plan.memory); - let reserved_size_in_bytes = match plan.style { + let (style, offset_guard_size) = MemoryStyle::for_memory(*ty, tunables); + let reserved_size_in_bytes = match style { MemoryStyle::Static { byte_reservation } => { Some(usize::try_from(byte_reservation).unwrap()) } @@ -140,16 +137,16 @@ impl RuntimeMemoryCreator for MemoryCreatorProxy { }; self.0 .new_memory( - ty, + MemoryType::from_wasmtime_memory(ty), minimum, maximum, reserved_size_in_bytes, - usize::try_from(plan.offset_guard_size).unwrap(), + usize::try_from(offset_guard_size).unwrap(), ) .map(|mem| { Box::new(LinearMemoryProxy { mem, - page_size_log2: plan.memory.page_size_log2, + page_size_log2: ty.page_size_log2, }) as Box }) .map_err(|e| anyhow!(e)) @@ -174,7 +171,7 @@ unsafe impl InstanceAllocatorImpl for SingleMemoryInstance<'_> { fn validate_module_impl(&self, module: &Module, offsets: &VMOffsets) -> Result<()> { anyhow::ensure!( - module.memory_plans.len() == 1, + module.memories.len() == 1, "`SingleMemoryInstance` allocator can only be used for modules with a single memory" ); self.ondemand.validate_module_impl(module, offsets)?; @@ -200,7 +197,8 @@ unsafe impl InstanceAllocatorImpl for SingleMemoryInstance<'_> { unsafe fn allocate_memory( &self, request: &mut InstanceAllocationRequest, - memory_plan: &MemoryPlan, + ty: &wasmtime_environ::Memory, + tunables: &Tunables, memory_index: DefinedMemoryIndex, ) -> Result<(MemoryAllocationIndex, Memory)> { #[cfg(debug_assertions)] @@ -218,7 +216,7 @@ unsafe impl InstanceAllocatorImpl for SingleMemoryInstance<'_> { )), None => self .ondemand - .allocate_memory(request, memory_plan, memory_index), + .allocate_memory(request, ty, tunables, memory_index), } } diff --git a/crates/wasmtime/src/runtime/vm/cow.rs b/crates/wasmtime/src/runtime/vm/cow.rs index 392ff6c1fb58..29c744e3cf25 100644 --- a/crates/wasmtime/src/runtime/vm/cow.rs +++ b/crates/wasmtime/src/runtime/vm/cow.rs @@ -5,6 +5,7 @@ // warnings #![cfg_attr(any(not(unix), miri), allow(unreachable_patterns))] +use super::sys::DecommitBehavior; use crate::prelude::*; use crate::runtime::vm::sys::vm::{self, MemoryImageSource}; use crate::runtime::vm::{MmapVec, SendSyncPtr}; @@ -13,11 +14,9 @@ use core::ffi::c_void; use core::ops::Range; use core::ptr::{self, NonNull}; use wasmtime_environ::{ - DefinedMemoryIndex, MemoryInitialization, MemoryPlan, MemoryStyle, Module, PrimaryMap, + DefinedMemoryIndex, MemoryInitialization, MemoryStyle, Module, PrimaryMap, Tunables, }; -use super::sys::DecommitBehavior; - /// Backing images for memories in a module. /// /// This is meant to be built once, when a module is first loaded/constructed, @@ -196,8 +195,7 @@ impl ModuleMemoryImages { // creation files then we fail creating `ModuleMemoryImages` since this // memory couldn't be represented. let data = &wasm_data[init.data.start as usize..init.data.end as usize]; - if module.memory_plans[memory_index] - .memory + if module.memories[memory_index] .minimum_byte_size() .map_or(false, |mem_initial_len| { init.offset + u64::try_from(data.len()).unwrap() > mem_initial_len @@ -413,7 +411,8 @@ impl MemoryImageSlot { &mut self, initial_size_bytes: usize, maybe_image: Option<&Arc>, - plan: &MemoryPlan, + ty: &wasmtime_environ::Memory, + tunables: &Tunables, ) -> Result<()> { assert!(!self.dirty); assert!(initial_size_bytes <= self.static_size); @@ -443,8 +442,9 @@ impl MemoryImageSlot { // and/or is static), then we need to reset memory protections. Put // another way, the only time it is safe to not reset protections is // when we are using dynamic memory without any guard pages. + let (style, offset_guard_size) = MemoryStyle::for_memory(*ty, tunables); if initial_size_bytes < self.accessible - && (plan.offset_guard_size > 0 || matches!(plan.style, MemoryStyle::Static { .. })) + && (offset_guard_size > 0 || matches!(style, MemoryStyle::Static { .. })) { self.set_protection(initial_size_bytes..self.accessible, false)?; self.accessible = initial_size_bytes; @@ -774,25 +774,22 @@ mod test { }) } - fn dummy_memory_plan(style: MemoryStyle) -> MemoryPlan { - MemoryPlan { - style, - memory: Memory { - idx_type: IndexType::I32, - limits: Limits { min: 0, max: None }, - shared: false, - page_size_log2: Memory::DEFAULT_PAGE_SIZE_LOG2, - }, - pre_guard_size: 0, - offset_guard_size: 0, + fn dummy_memory() -> Memory { + Memory { + idx_type: IndexType::I32, + limits: Limits { min: 0, max: None }, + shared: false, + page_size_log2: Memory::DEFAULT_PAGE_SIZE_LOG2, } } #[test] fn instantiate_no_image() { - let plan = dummy_memory_plan(MemoryStyle::Static { - byte_reservation: 4 << 30, - }); + let ty = dummy_memory(); + let tunables = Tunables { + static_memory_reservation: 4 << 30, + ..Tunables::default_miri() + }; // 4 MiB mmap'd area, not accessible let mut mmap = Mmap::accessible_reserved(0, 4 << 20).unwrap(); // Create a MemoryImageSlot on top of it @@ -800,7 +797,7 @@ mod test { memfd.no_clear_on_drop(); assert!(!memfd.is_dirty()); // instantiate with 64 KiB initial size - memfd.instantiate(64 << 10, None, &plan).unwrap(); + memfd.instantiate(64 << 10, None, &ty, &tunables).unwrap(); assert!(memfd.is_dirty()); // We should be able to access this 64 KiB (try both ends) and // it should consist of zeroes. @@ -820,7 +817,7 @@ mod test { .clear_and_remain_ready(0, |ptr, len| unsafe { decommit_pages(ptr, len).unwrap() }) .unwrap(); assert!(!memfd.is_dirty()); - memfd.instantiate(64 << 10, None, &plan).unwrap(); + memfd.instantiate(64 << 10, None, &ty, &tunables).unwrap(); let slice = unsafe { mmap.slice(0..65536) }; assert_eq!(0, slice[1024]); } @@ -828,9 +825,11 @@ mod test { #[test] fn instantiate_image() { let page_size = host_page_size(); - let plan = dummy_memory_plan(MemoryStyle::Static { - byte_reservation: 4 << 30, - }); + let ty = dummy_memory(); + let tunables = Tunables { + static_memory_reservation: 4 << 30, + ..Tunables::default_miri() + }; // 4 MiB mmap'd area, not accessible let mut mmap = Mmap::accessible_reserved(0, 4 << 20).unwrap(); // Create a MemoryImageSlot on top of it @@ -839,7 +838,9 @@ mod test { // Create an image with some data. let image = Arc::new(create_memfd_with_data(page_size, &[1, 2, 3, 4]).unwrap()); // Instantiate with this image - memfd.instantiate(64 << 10, Some(&image), &plan).unwrap(); + memfd + .instantiate(64 << 10, Some(&image), &ty, &tunables) + .unwrap(); assert!(memfd.has_image()); let slice = unsafe { mmap.slice_mut(0..65536) }; assert_eq!(&[1, 2, 3, 4], &slice[page_size..][..4]); @@ -848,7 +849,9 @@ mod test { memfd .clear_and_remain_ready(0, |ptr, len| unsafe { decommit_pages(ptr, len).unwrap() }) .unwrap(); - memfd.instantiate(64 << 10, Some(&image), &plan).unwrap(); + memfd + .instantiate(64 << 10, Some(&image), &ty, &tunables) + .unwrap(); let slice = unsafe { mmap.slice_mut(0..65536) }; // Should not see mutation from above assert_eq!(&[1, 2, 3, 4], &slice[page_size..][..4]); @@ -856,7 +859,7 @@ mod test { memfd .clear_and_remain_ready(0, |ptr, len| unsafe { decommit_pages(ptr, len).unwrap() }) .unwrap(); - memfd.instantiate(64 << 10, None, &plan).unwrap(); + memfd.instantiate(64 << 10, None, &ty, &tunables).unwrap(); assert!(!memfd.has_image()); let slice = unsafe { mmap.slice_mut(0..65536) }; assert_eq!(&[0, 0, 0, 0], &slice[page_size..][..4]); @@ -864,7 +867,9 @@ mod test { memfd .clear_and_remain_ready(0, |ptr, len| unsafe { decommit_pages(ptr, len).unwrap() }) .unwrap(); - memfd.instantiate(64 << 10, Some(&image), &plan).unwrap(); + memfd + .instantiate(64 << 10, Some(&image), &ty, &tunables) + .unwrap(); let slice = unsafe { mmap.slice_mut(0..65536) }; assert_eq!(&[1, 2, 3, 4], &slice[page_size..][..4]); // Create another image with different data. @@ -872,7 +877,9 @@ mod test { memfd .clear_and_remain_ready(0, |ptr, len| unsafe { decommit_pages(ptr, len).unwrap() }) .unwrap(); - memfd.instantiate(128 << 10, Some(&image2), &plan).unwrap(); + memfd + .instantiate(128 << 10, Some(&image2), &ty, &tunables) + .unwrap(); let slice = unsafe { mmap.slice_mut(0..65536) }; assert_eq!(&[10, 11, 12, 13], &slice[page_size..][..4]); // Instantiate the original image again; we should notice it's @@ -880,7 +887,9 @@ mod test { memfd .clear_and_remain_ready(0, |ptr, len| unsafe { decommit_pages(ptr, len).unwrap() }) .unwrap(); - memfd.instantiate(64 << 10, Some(&image), &plan).unwrap(); + memfd + .instantiate(64 << 10, Some(&image), &ty, &tunables) + .unwrap(); let slice = unsafe { mmap.slice_mut(0..65536) }; assert_eq!(&[1, 2, 3, 4], &slice[page_size..][..4]); } @@ -889,9 +898,11 @@ mod test { #[cfg(target_os = "linux")] fn memset_instead_of_madvise() { let page_size = host_page_size(); - let plan = dummy_memory_plan(MemoryStyle::Static { - byte_reservation: 100 << 16, - }); + let ty = dummy_memory(); + let tunables = Tunables { + static_memory_reservation: 100 << 16, + ..Tunables::default_miri() + }; let mut mmap = Mmap::accessible_reserved(0, 4 << 20).unwrap(); let mut memfd = MemoryImageSlot::create(mmap.as_mut_ptr() as *mut _, 0, 4 << 20); memfd.no_clear_on_drop(); @@ -900,7 +911,9 @@ mod test { for image_off in [0, page_size, page_size * 2] { let image = Arc::new(create_memfd_with_data(image_off, &[1, 2, 3, 4]).unwrap()); for amt_to_memset in [0, page_size, page_size * 10, 1 << 20, 10 << 20] { - memfd.instantiate(64 << 10, Some(&image), &plan).unwrap(); + memfd + .instantiate(64 << 10, Some(&image), &ty, &tunables) + .unwrap(); assert!(memfd.has_image()); let slice = unsafe { mmap.slice_mut(0..64 << 10) }; if image_off > 0 { @@ -920,7 +933,7 @@ mod test { // Test without an image for amt_to_memset in [0, page_size, page_size * 10, 1 << 20, 10 << 20] { - memfd.instantiate(64 << 10, None, &plan).unwrap(); + memfd.instantiate(64 << 10, None, &ty, &tunables).unwrap(); let mem = unsafe { mmap.slice_mut(0..64 << 10) }; for chunk in mem.chunks_mut(1024) { assert_eq!(chunk[0], 0); @@ -938,7 +951,12 @@ mod test { #[cfg(target_os = "linux")] fn dynamic() { let page_size = host_page_size(); - let plan = dummy_memory_plan(MemoryStyle::Dynamic { reserve: 200 }); + let ty = dummy_memory(); + let tunables = Tunables { + static_memory_reservation: 0, + dynamic_memory_growth_reserve: 200, + ..Tunables::default_miri() + }; let mut mmap = Mmap::accessible_reserved(0, 4 << 20).unwrap(); let mut memfd = MemoryImageSlot::create(mmap.as_mut_ptr() as *mut _, 0, 4 << 20); @@ -948,7 +966,9 @@ mod test { // Instantiate the image and test that memory remains accessible after // it's cleared. - memfd.instantiate(initial, Some(&image), &plan).unwrap(); + memfd + .instantiate(initial, Some(&image), &ty, &tunables) + .unwrap(); assert!(memfd.has_image()); let slice = unsafe { mmap.slice_mut(0..(64 << 10) + page_size) }; assert_eq!(&[1, 2, 3, 4], &slice[page_size..][..4]); @@ -961,7 +981,9 @@ mod test { // Re-instantiate make sure it preserves memory. Grow a bit and set data // beyond the initial size. - memfd.instantiate(initial, Some(&image), &plan).unwrap(); + memfd + .instantiate(initial, Some(&image), &ty, &tunables) + .unwrap(); assert_eq!(&[1, 2, 3, 4], &slice[page_size..][..4]); memfd.set_heap_limit(initial * 2).unwrap(); assert_eq!(&[0, 0], &slice[initial..initial + 2]); @@ -976,7 +998,9 @@ mod test { // Instantiate again, and again memory beyond the initial size should // still be accessible. Grow into it again and make sure it works. - memfd.instantiate(initial, Some(&image), &plan).unwrap(); + memfd + .instantiate(initial, Some(&image), &ty, &tunables) + .unwrap(); assert_eq!(&[0, 0], &slice[initial..initial + 2]); memfd.set_heap_limit(initial * 2).unwrap(); assert_eq!(&[0, 0], &slice[initial..initial + 2]); @@ -987,7 +1011,7 @@ mod test { .unwrap(); // Reset the image to none and double-check everything is back to zero - memfd.instantiate(64 << 10, None, &plan).unwrap(); + memfd.instantiate(64 << 10, None, &ty, &tunables).unwrap(); assert!(!memfd.has_image()); assert_eq!(&[0, 0, 0, 0], &slice[page_size..][..4]); assert_eq!(&[0, 0], &slice[initial..initial + 2]); diff --git a/crates/wasmtime/src/runtime/vm/debug_builtins.rs b/crates/wasmtime/src/runtime/vm/debug_builtins.rs index 8d460f724a72..9a8261fdbce0 100644 --- a/crates/wasmtime/src/runtime/vm/debug_builtins.rs +++ b/crates/wasmtime/src/runtime/vm/debug_builtins.rs @@ -11,7 +11,7 @@ static mut VMCTX_AND_MEMORY: (*mut VMContext, usize) = (std::ptr::null_mut(), 0) pub unsafe extern "C" fn resolve_vmctx_memory(ptr: usize) -> *const u8 { Instance::from_vmctx(VMCTX_AND_MEMORY.0, |handle| { assert!( - VMCTX_AND_MEMORY.1 < handle.env_module().memory_plans.len(), + VMCTX_AND_MEMORY.1 < handle.env_module().memories.len(), "memory index for debugger is out of bounds" ); let index = MemoryIndex::new(VMCTX_AND_MEMORY.1); @@ -29,7 +29,7 @@ pub unsafe extern "C" fn resolve_vmctx_memory_ptr(p: *const u32) -> *const u8 { ); Instance::from_vmctx(VMCTX_AND_MEMORY.0, |handle| { assert!( - VMCTX_AND_MEMORY.1 < handle.env_module().memory_plans.len(), + VMCTX_AND_MEMORY.1 < handle.env_module().memories.len(), "memory index for debugger is out of bounds" ); let index = MemoryIndex::new(VMCTX_AND_MEMORY.1); diff --git a/crates/wasmtime/src/runtime/vm/export.rs b/crates/wasmtime/src/runtime/vm/export.rs index 8bc8f016d7c7..526f714d059e 100644 --- a/crates/wasmtime/src/runtime/vm/export.rs +++ b/crates/wasmtime/src/runtime/vm/export.rs @@ -2,7 +2,7 @@ use crate::runtime::vm::vmcontext::{ VMContext, VMFuncRef, VMGlobalDefinition, VMMemoryDefinition, VMTableDefinition, }; use core::ptr::NonNull; -use wasmtime_environ::{DefinedMemoryIndex, Global, MemoryPlan, Table}; +use wasmtime_environ::{DefinedMemoryIndex, Global, Memory, Table}; /// The value of an export passed from one instance to another. pub enum Export { @@ -70,7 +70,7 @@ pub struct ExportMemory { /// Pointer to the containing `VMContext`. pub vmctx: *mut VMContext, /// The memory declaration, used for compatibility checking. - pub memory: MemoryPlan, + pub memory: Memory, /// The index at which the memory is defined within the `vmctx`. pub index: DefinedMemoryIndex, } diff --git a/crates/wasmtime/src/runtime/vm/instance.rs b/crates/wasmtime/src/runtime/vm/instance.rs index 38849a817089..7e399818a5e8 100644 --- a/crates/wasmtime/src/runtime/vm/instance.rs +++ b/crates/wasmtime/src/runtime/vm/instance.rs @@ -27,9 +27,9 @@ use sptr::Strict; use wasmtime_environ::{ packed_option::ReservedValue, DataIndex, DefinedGlobalIndex, DefinedMemoryIndex, DefinedTableIndex, ElemIndex, EntityIndex, EntityRef, EntitySet, FuncIndex, GlobalIndex, - HostPtr, MemoryIndex, MemoryPlan, Module, ModuleInternedTypeIndex, PrimaryMap, PtrSize, - TableIndex, TableInitialValue, TableSegmentElements, Trap, VMOffsets, VMSharedTypeIndex, - WasmHeapTopType, VMCONTEXT_MAGIC, + HostPtr, MemoryIndex, Module, ModuleInternedTypeIndex, PrimaryMap, PtrSize, TableIndex, + TableInitialValue, TableSegmentElements, Trap, VMOffsets, VMSharedTypeIndex, WasmHeapTopType, + VMCONTEXT_MAGIC, }; #[cfg(feature = "wmemcheck")] use wasmtime_wmemcheck::Wmemcheck; @@ -162,7 +162,7 @@ impl Instance { req: InstanceAllocationRequest, memories: PrimaryMap, tables: PrimaryMap, - memory_plans: &PrimaryMap, + memory_tys: &PrimaryMap, ) -> InstanceHandle { // The allocation must be *at least* the size required of `Instance`. let layout = Self::alloc_layout(req.runtime_info.offsets()); @@ -177,7 +177,7 @@ impl Instance { let dropped_data = EntitySet::with_capacity(module.passive_data_map.len()); #[cfg(not(feature = "wmemcheck"))] - let _ = memory_plans; + let _ = memory_tys; ptr::write( ptr, @@ -195,10 +195,10 @@ impl Instance { #[cfg(feature = "wmemcheck")] wmemcheck_state: { if req.wmemcheck { - let size = memory_plans + let size = memory_tys .iter() .next() - .map(|plan| plan.1.memory.limits.min) + .map(|memory| memory.1.limits.min) .unwrap_or(0) * 64 * 1024; @@ -587,7 +587,7 @@ impl Instance { ExportMemory { definition, vmctx, - memory: self.env_module().memory_plans[index].clone(), + memory: self.env_module().memories[index], index: def_index, } } @@ -634,7 +634,7 @@ impl Instance { /// Get the given memory's page size, in bytes. pub(crate) fn memory_page_size(&self, index: MemoryIndex) -> usize { - usize::try_from(self.env_module().memory_plans[index].memory.page_size()).unwrap() + usize::try_from(self.env_module().memories[index].page_size()).unwrap() } /// Grow memory by the specified amount of pages. @@ -1295,10 +1295,10 @@ impl Instance { // definitions of memories owned (not shared) in the module. let mut ptr = self.vmctx_plus_offset_mut(offsets.vmctx_memories_begin()); let mut owned_ptr = self.vmctx_plus_offset_mut(offsets.vmctx_owned_memories_begin()); - for i in 0..module.memory_plans.len() - module.num_imported_memories { + for i in 0..module.num_defined_memories() { let defined_memory_index = DefinedMemoryIndex::new(i); let memory_index = module.memory_index(defined_memory_index); - if module.memory_plans[memory_index].memory.shared { + if module.memories[memory_index].shared { let def_ptr = self.memories[defined_memory_index] .1 .as_shared_memory() @@ -1458,7 +1458,7 @@ impl InstanceHandle { pub fn all_memories<'a>( &'a mut self, ) -> impl ExactSizeIterator + 'a { - let indices = (0..self.module().memory_plans.len()) + let indices = (0..self.module().memories.len()) .map(|i| MemoryIndex::new(i)) .collect::>(); indices diff --git a/crates/wasmtime/src/runtime/vm/instance/allocator.rs b/crates/wasmtime/src/runtime/vm/instance/allocator.rs index 8a0434ec20f0..4d7e1f93c087 100644 --- a/crates/wasmtime/src/runtime/vm/instance/allocator.rs +++ b/crates/wasmtime/src/runtime/vm/instance/allocator.rs @@ -11,8 +11,8 @@ use crate::vm::VMGlobalDefinition; use core::{any::Any, mem, ptr}; use wasmtime_environ::{ DefinedMemoryIndex, DefinedTableIndex, HostPtr, InitMemory, MemoryInitialization, - MemoryInitializer, MemoryPlan, Module, PrimaryMap, SizeOverflow, TableInitialValue, Trap, - Tunables, VMOffsets, WasmHeapTopType, + MemoryInitializer, Module, PrimaryMap, SizeOverflow, TableInitialValue, Trap, Tunables, + VMOffsets, WasmHeapTopType, }; #[cfg(feature = "gc")] @@ -239,7 +239,8 @@ pub unsafe trait InstanceAllocatorImpl { unsafe fn allocate_memory( &self, request: &mut InstanceAllocationRequest, - memory_plan: &MemoryPlan, + ty: &wasmtime_environ::Memory, + tunables: &Tunables, memory_index: DefinedMemoryIndex, ) -> Result<(MemoryAllocationIndex, Memory)>; @@ -386,7 +387,7 @@ pub trait InstanceAllocator: InstanceAllocatorImpl { self.increment_core_instance_count()?; - let num_defined_memories = module.memory_plans.len() - module.num_imported_memories; + let num_defined_memories = module.num_defined_memories(); let mut memories = PrimaryMap::with_capacity(num_defined_memories); let num_defined_tables = module.num_defined_tables(); @@ -397,12 +398,7 @@ pub trait InstanceAllocator: InstanceAllocatorImpl { self.allocate_tables(&mut request, &mut tables)?; Ok(()) })() { - Ok(_) => Ok(Instance::new( - request, - memories, - tables, - &module.memory_plans, - )), + Ok(_) => Ok(Instance::new(request, memories, tables, &module.memories)), Err(e) => { self.deallocate_memories(&mut memories); self.deallocate_tables(&mut tables); @@ -450,16 +446,12 @@ pub trait InstanceAllocator: InstanceAllocatorImpl { InstanceAllocatorImpl::validate_module_impl(self, module, request.runtime_info.offsets()) .expect("module should have already been validated before allocation"); - for (memory_index, memory_plan) in module - .memory_plans - .iter() - .skip(module.num_imported_memories) - { + for (memory_index, ty) in module.memories.iter().skip(module.num_imported_memories) { let memory_index = module .defined_memory_index(memory_index) .expect("should be a defined memory since we skipped imported ones"); - memories.push(self.allocate_memory(request, memory_plan, memory_index)?); + memories.push(self.allocate_memory(request, ty, request.tunables, memory_index)?); } Ok(()) @@ -639,10 +631,7 @@ fn get_memory_init_start(init: &MemoryInitializer, instance: &mut Instance) -> R let mut context = ConstEvalContext::new(instance); let mut const_evaluator = ConstExprEvaluator::default(); unsafe { const_evaluator.eval(&mut context, &init.offset) }.map(|v| { - match instance.env_module().memory_plans[init.memory_index] - .memory - .idx_type - { + match instance.env_module().memories[init.memory_index].idx_type { wasmtime_environ::IndexType::I32 => v.get_u32().into(), wasmtime_environ::IndexType::I64 => v.get_u64(), } @@ -711,10 +700,7 @@ fn initialize_memories( let val = unsafe { self.const_evaluator.eval(self.context, expr) } .expect("const expression should be valid"); Some( - match self.context.instance.env_module().memory_plans[memory] - .memory - .idx_type - { + match self.context.instance.env_module().memories[memory].idx_type { wasmtime_environ::IndexType::I32 => val.get_u32().into(), wasmtime_environ::IndexType::I64 => val.get_u64(), }, diff --git a/crates/wasmtime/src/runtime/vm/instance/allocator/on_demand.rs b/crates/wasmtime/src/runtime/vm/instance/allocator/on_demand.rs index 59a5030b2c1b..c5adb7b9e724 100644 --- a/crates/wasmtime/src/runtime/vm/instance/allocator/on_demand.rs +++ b/crates/wasmtime/src/runtime/vm/instance/allocator/on_demand.rs @@ -9,7 +9,7 @@ use crate::runtime::vm::table::Table; use crate::runtime::vm::CompiledModuleId; use alloc::sync::Arc; use wasmtime_environ::{ - DefinedMemoryIndex, DefinedTableIndex, HostPtr, MemoryPlan, Module, Tunables, VMOffsets, + DefinedMemoryIndex, DefinedTableIndex, HostPtr, Module, Tunables, VMOffsets, }; #[cfg(feature = "gc")] @@ -96,7 +96,8 @@ unsafe impl InstanceAllocatorImpl for OnDemandInstanceAllocator { unsafe fn allocate_memory( &self, request: &mut InstanceAllocationRequest, - memory_plan: &MemoryPlan, + ty: &wasmtime_environ::Memory, + tunables: &Tunables, memory_index: DefinedMemoryIndex, ) -> Result<(MemoryAllocationIndex, Memory)> { let creator = self @@ -106,7 +107,8 @@ unsafe impl InstanceAllocatorImpl for OnDemandInstanceAllocator { let image = request.runtime_info.memory_image(memory_index)?; let allocation_index = MemoryAllocationIndex::default(); let memory = Memory::new_dynamic( - memory_plan, + ty, + tunables, creator, request .store diff --git a/crates/wasmtime/src/runtime/vm/instance/allocator/pooling.rs b/crates/wasmtime/src/runtime/vm/instance/allocator/pooling.rs index 2c865a1861be..dbad7bbd9fb7 100644 --- a/crates/wasmtime/src/runtime/vm/instance/allocator/pooling.rs +++ b/crates/wasmtime/src/runtime/vm/instance/allocator/pooling.rs @@ -60,7 +60,7 @@ use std::{ sync::atomic::{AtomicU64, Ordering}, }; use wasmtime_environ::{ - DefinedMemoryIndex, DefinedTableIndex, HostPtr, MemoryPlan, Module, Tunables, VMOffsets, + DefinedMemoryIndex, DefinedTableIndex, HostPtr, Module, Tunables, VMOffsets, }; #[cfg(feature = "gc")] @@ -513,7 +513,7 @@ unsafe impl InstanceAllocatorImpl for PoolingInstanceAllocator { let offsets = VMOffsets::new(HostPtr, &module); self.validate_module_impl(module, &offsets)?; num_core_instances += 1; - num_memories += module.memory_plans.len() - module.num_imported_memories; + num_memories += module.num_defined_memories(); num_tables += module.num_defined_tables(); } LowerImport { .. } @@ -597,10 +597,11 @@ unsafe impl InstanceAllocatorImpl for PoolingInstanceAllocator { unsafe fn allocate_memory( &self, request: &mut InstanceAllocationRequest, - memory_plan: &MemoryPlan, + ty: &wasmtime_environ::Memory, + tunables: &Tunables, memory_index: DefinedMemoryIndex, ) -> Result<(MemoryAllocationIndex, Memory)> { - self.with_flush_and_retry(|| self.memories.allocate(request, memory_plan, memory_index)) + self.with_flush_and_retry(|| self.memories.allocate(request, ty, tunables, memory_index)) } unsafe fn deallocate_memory( diff --git a/crates/wasmtime/src/runtime/vm/instance/allocator/pooling/memory_pool.rs b/crates/wasmtime/src/runtime/vm/instance/allocator/pooling/memory_pool.rs index 33cc098dc85d..5535d0153fd2 100644 --- a/crates/wasmtime/src/runtime/vm/instance/allocator/pooling/memory_pool.rs +++ b/crates/wasmtime/src/runtime/vm/instance/allocator/pooling/memory_pool.rs @@ -63,7 +63,7 @@ use crate::{prelude::*, vm::round_usize_up_to_host_pages}; use std::ffi::c_void; use std::sync::atomic::{AtomicUsize, Ordering}; use std::sync::Mutex; -use wasmtime_environ::{DefinedMemoryIndex, MemoryPlan, MemoryStyle, Module, Tunables}; +use wasmtime_environ::{DefinedMemoryIndex, MemoryStyle, Module, Tunables}; /// A set of allocator slots. /// @@ -257,7 +257,7 @@ impl MemoryPool { /// Validate whether this memory pool supports the given module. pub fn validate(&self, module: &Module) -> Result<()> { - let memories = module.memory_plans.len() - module.num_imported_memories; + let memories = module.num_defined_memories(); if memories > usize::try_from(self.memories_per_instance).unwrap() { bail!( "defined memories count of {} exceeds the per-instance limit of {}", @@ -266,25 +266,8 @@ impl MemoryPool { ); } - for (i, plan) in module - .memory_plans - .iter() - .skip(module.num_imported_memories) - { - match plan.style { - MemoryStyle::Static { byte_reservation } => { - if u64::try_from(self.layout.bytes_to_next_stripe_slot()).unwrap() - < byte_reservation - { - bail!( - "memory size allocated per-memory is too small to \ - satisfy static bound of {byte_reservation:#x} bytes" - ); - } - } - MemoryStyle::Dynamic { .. } => {} - } - let min = plan.memory.minimum_byte_size().with_context(|| { + for (i, memory) in module.memories.iter().skip(module.num_imported_memories) { + let min = memory.minimum_byte_size().with_context(|| { format!( "memory index {} has a minimum byte size that cannot be represented in a u64", i.as_u32() @@ -312,7 +295,8 @@ impl MemoryPool { pub fn allocate( &self, request: &mut InstanceAllocationRequest, - memory_plan: &MemoryPlan, + ty: &wasmtime_environ::Memory, + tunables: &Tunables, memory_index: DefinedMemoryIndex, ) -> Result<(MemoryAllocationIndex, Memory)> { let stripe_index = if let Some(pkey) = &request.pkey { @@ -345,7 +329,8 @@ impl MemoryPool { // satisfied by the configuration of this pooling allocator. This // should be returned as an error through `validate_memory_plans` // but double-check here to be sure. - match memory_plan.style { + let (style, _) = MemoryStyle::for_memory(*ty, tunables); + match style { MemoryStyle::Static { byte_reservation } => { assert!( byte_reservation @@ -360,8 +345,7 @@ impl MemoryPool { let mut slot = self.take_memory_image_slot(allocation_index); let image = request.runtime_info.memory_image(memory_index)?; - let initial_size = memory_plan - .memory + let initial_size = ty .minimum_byte_size() .expect("min size checked in validation"); @@ -379,10 +363,10 @@ impl MemoryPool { // mmap that would leave an open space for someone // else to come in and map something. let initial_size = usize::try_from(initial_size).unwrap(); - slot.instantiate(initial_size, image, memory_plan)?; + slot.instantiate(initial_size, image, ty, tunables)?; Memory::new_static( - memory_plan, + ty, base_ptr, base_capacity, slot, diff --git a/crates/wasmtime/src/runtime/vm/memory.rs b/crates/wasmtime/src/runtime/vm/memory.rs index 5b3bfe41e28d..d82ec0cbac5a 100644 --- a/crates/wasmtime/src/runtime/vm/memory.rs +++ b/crates/wasmtime/src/runtime/vm/memory.rs @@ -13,14 +13,15 @@ use alloc::sync::Arc; use core::ops::Range; use core::ptr::NonNull; use core::time::Duration; -use wasmtime_environ::{MemoryPlan, MemoryStyle, Trap}; +use wasmtime_environ::{MemoryStyle, Trap, Tunables}; /// A memory allocator pub trait RuntimeMemoryCreator: Send + Sync { /// Create new RuntimeLinearMemory fn new_memory( &self, - plan: &MemoryPlan, + ty: &wasmtime_environ::Memory, + tunables: &Tunables, minimum: usize, maximum: Option, // Optionally, a memory image for CoW backing. @@ -35,13 +36,15 @@ impl RuntimeMemoryCreator for DefaultMemoryCreator { /// Create new MmapMemory fn new_memory( &self, - plan: &MemoryPlan, + ty: &wasmtime_environ::Memory, + tunables: &Tunables, minimum: usize, maximum: Option, memory_image: Option<&Arc>, ) -> Result> { Ok(Box::new(MmapMemory::new( - plan, + ty, + tunables, minimum, maximum, memory_image, @@ -214,22 +217,29 @@ impl MmapMemory { /// Create a new linear memory instance with specified minimum and maximum /// number of wasm pages. pub fn new( - plan: &MemoryPlan, + ty: &wasmtime_environ::Memory, + tunables: &Tunables, minimum: usize, mut maximum: Option, memory_image: Option<&Arc>, ) -> Result { + let (style, offset_guard_size) = MemoryStyle::for_memory(*ty, tunables); + // It's a programmer error for these two configuration values to exceed // the host available address space, so panic if such a configuration is // found (mostly an issue for hypothetical 32-bit hosts). - let offset_guard_bytes = usize::try_from(plan.offset_guard_size).unwrap(); - let pre_guard_bytes = usize::try_from(plan.pre_guard_size).unwrap(); + let offset_guard_bytes = usize::try_from(offset_guard_size).unwrap(); + let pre_guard_bytes = if tunables.guard_before_linear_memory { + offset_guard_bytes + } else { + 0 + }; // Ensure that our guard regions are multiples of the host page size. let offset_guard_bytes = round_usize_up_to_host_pages(offset_guard_bytes)?; let pre_guard_bytes = round_usize_up_to_host_pages(pre_guard_bytes)?; - let (alloc_bytes, extra_to_reserve_on_growth) = match plan.style { + let (alloc_bytes, extra_to_reserve_on_growth) = match style { // Dynamic memories start with the minimum size plus the `reserve` // amount specified to grow into. MemoryStyle::Dynamic { reserve } => ( @@ -243,7 +253,7 @@ impl MmapMemory { // of the two is, the `maximum` given or the `bound` specified for // this memory. MemoryStyle::Static { byte_reservation } => { - assert!(byte_reservation >= plan.memory.minimum_byte_size().unwrap()); + assert!(byte_reservation >= ty.minimum_byte_size().unwrap()); let bound_bytes = usize::try_from(byte_reservation).unwrap(); let bound_bytes = round_usize_up_to_host_pages(bound_bytes)?; maximum = Some(bound_bytes.min(maximum.unwrap_or(usize::MAX))); @@ -276,7 +286,7 @@ impl MmapMemory { minimum, alloc_bytes + extra_to_reserve_on_growth, ); - slot.instantiate(minimum, Some(image), &plan)?; + slot.instantiate(minimum, Some(image), ty, tunables)?; // On drop, we will unmap our mmap'd range that this slot was // mapped on top of, so there is no need for the slot to wipe // it with an anonymous mapping first. @@ -290,7 +300,7 @@ impl MmapMemory { mmap, len: minimum, maximum, - page_size_log2: plan.memory.page_size_log2, + page_size_log2: ty.page_size_log2, pre_guard_size: pre_guard_bytes, offset_guard_size: offset_guard_bytes, extra_to_reserve_on_growth, @@ -537,15 +547,16 @@ pub struct Memory(pub(crate) Box); impl Memory { /// Create a new dynamic (movable) memory instance for the specified plan. pub fn new_dynamic( - plan: &MemoryPlan, + ty: &wasmtime_environ::Memory, + tunables: &Tunables, creator: &dyn RuntimeMemoryCreator, store: &mut dyn VMStore, memory_image: Option<&Arc>, ) -> Result { - let (minimum, maximum) = Self::limit_new(plan, Some(store))?; - let allocation = creator.new_memory(plan, minimum, maximum, memory_image)?; - let allocation = if plan.memory.shared { - Box::new(SharedMemory::wrap(plan, allocation, plan.memory)?) + let (minimum, maximum) = Self::limit_new(ty, Some(store))?; + let allocation = creator.new_memory(ty, tunables, minimum, maximum, memory_image)?; + let allocation = if ty.shared { + Box::new(SharedMemory::wrap(ty, tunables, allocation)?) } else { allocation }; @@ -554,25 +565,25 @@ impl Memory { /// Create a new static (immovable) memory instance for the specified plan. pub fn new_static( - plan: &MemoryPlan, + ty: &wasmtime_environ::Memory, base_ptr: *mut u8, base_capacity: usize, memory_image: MemoryImageSlot, memory_and_guard_size: usize, store: &mut dyn VMStore, ) -> Result { - let (minimum, maximum) = Self::limit_new(plan, Some(store))?; + let (minimum, maximum) = Self::limit_new(ty, Some(store))?; let pooled_memory = StaticMemory::new( base_ptr, base_capacity, minimum, maximum, - plan.memory.page_size_log2, + ty.page_size_log2, memory_image, memory_and_guard_size, )?; let allocation = Box::new(pooled_memory); - let allocation: Box = if plan.memory.shared { + let allocation: Box = if ty.shared { // FIXME: since the pooling allocator owns the memory allocation // (which is torn down with the instance), the current shared memory // implementation will cause problems; see @@ -589,10 +600,10 @@ impl Memory { /// Returns a tuple of the minimum size, optional maximum size, and log(page /// size) of the memory, all in bytes. pub(crate) fn limit_new( - plan: &MemoryPlan, + ty: &wasmtime_environ::Memory, store: Option<&mut dyn VMStore>, ) -> Result<(usize, Option)> { - let page_size = usize::try_from(plan.memory.page_size()).unwrap(); + let page_size = usize::try_from(ty.page_size()).unwrap(); // This is the absolute possible maximum that the module can try to // allocate, which is our entire address space minus a wasm page. That @@ -608,10 +619,10 @@ impl Memory { let absolute_max = 0usize.wrapping_sub(page_size); // Sanity-check what should already be true from wasm module validation. - if let Ok(size) = plan.memory.minimum_byte_size() { + if let Ok(size) = ty.minimum_byte_size() { assert!(size <= u64::try_from(absolute_max).unwrap()); } - if let Ok(max) = plan.memory.maximum_byte_size() { + if let Ok(max) = ty.maximum_byte_size() { assert!(max <= u64::try_from(absolute_max).unwrap()); } @@ -619,8 +630,7 @@ impl Memory { // space, then we can't satisfy this request, but defer the error to // later so the `store` can be informed that an effective oom is // happening. - let minimum = plan - .memory + let minimum = ty .minimum_byte_size() .ok() .and_then(|m| usize::try_from(m).ok()); @@ -631,8 +641,7 @@ impl Memory { // maximum size exceeds `usize` or `u64` then there's no need to further // keep track of it as some sort of runtime limit will kick in long // before we reach the statically declared maximum size. - let maximum = plan - .memory + let maximum = ty .maximum_byte_size() .ok() .and_then(|m| usize::try_from(m).ok()); @@ -647,11 +656,11 @@ impl Memory { // We ignore the store limits for shared memories since they are // technically not created within a store (though, trickily, they // may be associated with one in order to get a `vmctx`). - if !plan.memory.shared { + if !ty.shared { if !store.memory_growing(0, minimum.unwrap_or(absolute_max), maximum)? { bail!( "memory minimum size of {} pages exceeds memory limits", - plan.memory.limits.min + ty.limits.min ); } } @@ -662,7 +671,7 @@ impl Memory { let minimum = minimum.ok_or_else(|| { format_err!( "memory minimum size of {} pages exceeds memory limits", - plan.memory.limits.min + ty.limits.min ) })?; diff --git a/crates/wasmtime/src/runtime/vm/threads/shared_memory.rs b/crates/wasmtime/src/runtime/vm/threads/shared_memory.rs index 155ab77c97c0..263c086cfd05 100644 --- a/crates/wasmtime/src/runtime/vm/threads/shared_memory.rs +++ b/crates/wasmtime/src/runtime/vm/threads/shared_memory.rs @@ -8,7 +8,7 @@ use std::ops::Range; use std::sync::atomic::{AtomicU32, AtomicU64, Ordering}; use std::sync::{Arc, RwLock}; use std::time::{Duration, Instant}; -use wasmtime_environ::{MemoryPlan, MemoryStyle, Trap}; +use wasmtime_environ::{MemoryStyle, Trap, Tunables}; /// For shared memory (and only for shared memory), this lock-version restricts /// access when growing the memory or checking its size. This is to conform with @@ -30,22 +30,23 @@ struct SharedMemoryInner { impl SharedMemory { /// Construct a new [`SharedMemory`]. - pub fn new(plan: MemoryPlan) -> Result { - let (minimum_bytes, maximum_bytes) = Memory::limit_new(&plan, None)?; - let mmap_memory = MmapMemory::new(&plan, minimum_bytes, maximum_bytes, None)?; - Self::wrap(&plan, Box::new(mmap_memory), plan.memory) + pub fn new(ty: &wasmtime_environ::Memory, tunables: &Tunables) -> Result { + let (minimum_bytes, maximum_bytes) = Memory::limit_new(ty, None)?; + let mmap_memory = MmapMemory::new(ty, tunables, minimum_bytes, maximum_bytes, None)?; + Self::wrap(ty, tunables, Box::new(mmap_memory)) } /// Wrap an existing [Memory] with the locking provided by a [SharedMemory]. pub fn wrap( - plan: &MemoryPlan, + ty: &wasmtime_environ::Memory, + tunables: &Tunables, mut memory: Box, - ty: wasmtime_environ::Memory, ) -> Result { if !ty.shared { bail!("shared memory must have a `shared` memory type"); } - if !matches!(plan.style, MemoryStyle::Static { .. }) { + let (style, _) = MemoryStyle::for_memory(*ty, tunables); + if !matches!(style, MemoryStyle::Static { .. }) { bail!("shared memory can only be built from a static memory allocation") } assert!( @@ -53,7 +54,7 @@ impl SharedMemory { "cannot re-wrap a shared memory" ); Ok(Self(Arc::new(SharedMemoryInner { - ty, + ty: *ty, spot: ParkingSpot::default(), def: LongTermVMMemoryDefinition(memory.vmmemory()), memory: RwLock::new(memory), diff --git a/winch/codegen/src/codegen/env.rs b/winch/codegen/src/codegen/env.rs index 120ee75e8ad9..fc8e0bb19c24 100644 --- a/winch/codegen/src/codegen/env.rs +++ b/winch/codegen/src/codegen/env.rs @@ -11,9 +11,9 @@ use std::collections::{ use std::mem; use wasmparser::BlockType; use wasmtime_environ::{ - BuiltinFunctionIndex, FuncIndex, GlobalIndex, MemoryIndex, MemoryPlan, MemoryStyle, - ModuleTranslation, ModuleTypesBuilder, PrimaryMap, PtrSize, Table, TableIndex, TypeConvert, - TypeIndex, VMOffsets, WasmHeapType, WasmValType, + BuiltinFunctionIndex, FuncIndex, GlobalIndex, Memory, MemoryIndex, MemoryStyle, + ModuleTranslation, ModuleTypesBuilder, PrimaryMap, PtrSize, Table, TableIndex, Tunables, + TypeConvert, TypeIndex, VMOffsets, WasmHeapType, WasmValType, }; #[derive(Debug, Clone, Copy)] @@ -116,6 +116,8 @@ pub struct FuncEnv<'a, 'translation: 'a, 'data: 'translation, P: PtrSize> { pub types: &'translation ModuleTypesBuilder, /// The built-in functions available to the JIT code. pub builtins: &'translation mut BuiltinFunctions, + /// Configurable code generation options. + tunables: &'translation Tunables, /// Track resolved table information. resolved_tables: HashMap, /// Track resolved heap information. @@ -151,6 +153,7 @@ impl<'a, 'translation, 'data, P: PtrSize> FuncEnv<'a, 'translation, 'data, P> { translation: &'translation ModuleTranslation<'data>, types: &'translation ModuleTypesBuilder, builtins: &'translation mut BuiltinFunctions, + tunables: &'translation Tunables, isa: &dyn TargetIsa, ptr_type: WasmValType, ) -> Self { @@ -158,6 +161,7 @@ impl<'a, 'translation, 'data, P: PtrSize> FuncEnv<'a, 'translation, 'data, P> { vmoffsets, translation, types, + tunables, resolved_tables: HashMap::new(), resolved_heaps: HashMap::new(), resolved_callees: HashMap::new(), @@ -289,22 +293,23 @@ impl<'a, 'translation, 'data, P: PtrSize> FuncEnv<'a, 'translation, 'data, P> { ), }; - let plan = &self.translation.module.memory_plans[index]; - let (min_size, max_size) = heap_limits(&plan); - let (style, offset_guard_size) = heap_style_and_offset_guard_size(&plan); + let memory = &self.translation.module.memories[index]; + let (min_size, max_size) = heap_limits(memory); + let (style, offset_guard_size) = + heap_style_and_offset_guard_size(memory, self.tunables); *entry.insert(HeapData { offset: base_offset, import_from, current_length_offset, style, - ty: match plan.memory.idx_type { + ty: match memory.idx_type { wasmtime_environ::IndexType::I32 => WasmValType::I32, wasmtime_environ::IndexType::I64 => WasmValType::I64, }, min_size, max_size, - page_size_log2: plan.memory.page_size_log2, + page_size_log2: memory.page_size_log2, offset_guard_size, }) } @@ -420,34 +425,27 @@ impl<'a, 'data> TypeConverter<'a, 'data> { } } -fn heap_style_and_offset_guard_size(plan: &MemoryPlan) -> (HeapStyle, u64) { - match plan { - MemoryPlan { - style: MemoryStyle::Static { byte_reservation }, - offset_guard_size, - .. - } => ( +fn heap_style_and_offset_guard_size(memory: &Memory, tunables: &Tunables) -> (HeapStyle, u64) { + let (style, offset_guard_size) = MemoryStyle::for_memory(*memory, tunables); + match style { + MemoryStyle::Static { byte_reservation } => ( HeapStyle::Static { - bound: *byte_reservation, + bound: byte_reservation, }, - *offset_guard_size, + offset_guard_size, ), - MemoryPlan { - style: MemoryStyle::Dynamic { .. }, - offset_guard_size, - .. - } => (HeapStyle::Dynamic, *offset_guard_size), + MemoryStyle::Dynamic { .. } => (HeapStyle::Dynamic, offset_guard_size), } } -fn heap_limits(plan: &MemoryPlan) -> (u64, Option) { +fn heap_limits(memory: &Memory) -> (u64, Option) { ( - plan.memory.minimum_byte_size().unwrap_or_else(|_| { + memory.minimum_byte_size().unwrap_or_else(|_| { // 2^64 as a minimum doesn't fin in a 64 bit integer. // So in this case, the minimum is clamped to u64::MAX. u64::MAX }), - plan.memory.maximum_byte_size().ok(), + memory.maximum_byte_size().ok(), ) } diff --git a/winch/codegen/src/isa/aarch64/mod.rs b/winch/codegen/src/isa/aarch64/mod.rs index 6cd04f68feb6..5300dedb14be 100644 --- a/winch/codegen/src/isa/aarch64/mod.rs +++ b/winch/codegen/src/isa/aarch64/mod.rs @@ -104,6 +104,7 @@ impl TargetIsa for Aarch64 { translation, types, builtins, + tunables, self, abi::Aarch64ABI::ptr_type(), ); diff --git a/winch/codegen/src/isa/x64/mod.rs b/winch/codegen/src/isa/x64/mod.rs index 7bef612ca0d4..455130fdbde1 100644 --- a/winch/codegen/src/isa/x64/mod.rs +++ b/winch/codegen/src/isa/x64/mod.rs @@ -114,6 +114,7 @@ impl TargetIsa for X64 { translation, types, builtins, + tunables, self, abi::X64ABI::ptr_type(), );