diff --git a/crates/cranelift/src/func_environ.rs b/crates/cranelift/src/func_environ.rs index 8821dfc287d3..b5c4750d6644 100644 --- a/crates/cranelift/src/func_environ.rs +++ b/crates/cranelift/src/func_environ.rs @@ -1,6 +1,6 @@ use crate::translate::{ - FuncEnvironment as _, FuncTranslationState, GlobalVariable, Heap, HeapData, HeapStyle, - StructFieldsVec, TableData, TableSize, TargetEnvironment, + FuncEnvironment as _, FuncTranslationState, GlobalVariable, Heap, HeapData, StructFieldsVec, + TableData, TableSize, TargetEnvironment, }; use crate::{gc, BuiltinFunctionSignatures, TRAP_INTERNAL_ASSERT}; use cranelift_codegen::cursor::FuncCursor; @@ -20,10 +20,10 @@ use std::mem; use wasmparser::{Operator, WasmFeatures}; use wasmtime_environ::{ BuiltinFunctionIndex, DataIndex, ElemIndex, EngineOrModuleTypeIndex, FuncIndex, GlobalIndex, - IndexType, Memory, MemoryIndex, MemoryPlan, MemoryStyle, Module, ModuleInternedTypeIndex, - ModuleTranslation, ModuleTypesBuilder, PtrSize, Table, TableIndex, TableStyle, Tunables, - TypeConvert, TypeIndex, VMOffsets, WasmCompositeType, WasmFuncType, WasmHeapTopType, - WasmHeapType, WasmRefType, WasmResult, WasmValType, + IndexType, Memory, MemoryIndex, Module, ModuleInternedTypeIndex, ModuleTranslation, + ModuleTypesBuilder, PtrSize, Table, TableIndex, Tunables, TypeConvert, TypeIndex, VMOffsets, + WasmCompositeType, WasmFuncType, WasmHeapTopType, WasmHeapType, WasmRefType, WasmResult, + WasmValType, }; use wasmtime_environ::{FUNCREF_INIT_BIT, FUNCREF_MASK}; @@ -696,12 +696,12 @@ impl<'module_environment> FuncEnvironment<'module_environment> { /// Get the Memory for the given index. fn memory(&self, index: MemoryIndex) -> Memory { - self.module.memory_plans[index].memory + self.module.memories[index] } /// Get the Table for the given index. fn table(&self, index: TableIndex) -> Table { - self.module.table_plans[index].table + self.module.tables[index] } /// Cast the value to I64 and sign extend if necessary. @@ -817,7 +817,7 @@ impl<'module_environment> FuncEnvironment<'module_environment> { } }; - let table = &self.module.table_plans[index].table; + let table = &self.module.tables[index]; let element_size = if table.ref_type.is_vmgcref_type() { // For GC-managed references, tables store `Option`s. ir::types::I32.bytes() @@ -869,7 +869,6 @@ impl<'module_environment> FuncEnvironment<'module_environment> { table_index: TableIndex, index: ir::Value, cold_blocks: bool, - lazy_init: bool, ) -> ir::Value { let pointer_type = self.pointer_type(); self.ensure_table_exists(builder.func, table_index); @@ -882,7 +881,7 @@ impl<'module_environment> FuncEnvironment<'module_environment> { let (table_entry_addr, flags) = table_data.prepare_table_addr(self, builder, index); let value = builder.ins().load(pointer_type, flags, table_entry_addr, 0); - if !lazy_init { + if !self.tunables.table_lazy_init { return value; } @@ -1325,14 +1324,11 @@ impl<'a, 'func, 'module_env> Call<'a, 'func, 'module_env> { cold_blocks: bool, ) -> WasmResult> { // Get the funcref pointer from the table. - let table = &self.env.module.table_plans[table_index]; - let TableStyle::CallerChecksSignature { lazy_init } = table.style; let funcref_ptr = self.env.get_or_init_func_ref_table_elem( self.builder, table_index, callee, cold_blocks, - lazy_init, ); // If necessary, check the signature. @@ -1376,18 +1372,14 @@ impl<'a, 'func, 'module_env> Call<'a, 'func, 'module_env> { ty_index: TypeIndex, funcref_ptr: ir::Value, ) -> CheckIndirectCallTypeSignature { - let table = &self.env.module.table_plans[table_index]; + let table = &self.env.module.tables[table_index]; let sig_id_size = self.env.offsets.size_of_vmshared_type_index(); let sig_id_type = Type::int(u16::from(sig_id_size) * 8).unwrap(); - // Generate a rustc compile error here if more styles are added in - // the future as the following code is tailored to just this style. - let TableStyle::CallerChecksSignature { .. } = table.style; - // Test if a type check is necessary for this table. If this table is a // table of typed functions and that type matches `ty_index`, then // there's no need to perform a typecheck. - match table.table.ref_type.heap_type { + match table.ref_type.heap_type { // Functions do not have a statically known type in the table, a // typecheck is required. Fall through to below to perform the // actual typecheck. @@ -1403,7 +1395,7 @@ impl<'a, 'func, 'module_env> Call<'a, 'func, 'module_env> { let specified_ty = self.env.module.types[ty_index]; if specified_ty == table_ty { return CheckIndirectCallTypeSignature::StaticMatch { - may_be_null: table.table.ref_type.nullable, + may_be_null: table.ref_type.nullable, }; } @@ -1421,7 +1413,7 @@ impl<'a, 'func, 'module_env> Call<'a, 'func, 'module_env> { // a null pointer, then this was a call to null. Otherwise // if it succeeds then we know it won't match, so trap // anyway. - if table.table.ref_type.nullable { + if table.ref_type.nullable { if self.env.signals_based_traps() { let mem_flags = ir::MemFlags::trusted().with_readonly(); self.builder.ins().load( @@ -1446,7 +1438,7 @@ impl<'a, 'func, 'module_env> Call<'a, 'func, 'module_env> { // Tables of `nofunc` can only be inhabited by null, so go ahead and // trap with that. WasmHeapType::NoFunc => { - assert!(table.table.ref_type.nullable); + assert!(table.ref_type.nullable); self.env .trap(self.builder, crate::TRAP_INDIRECT_CALL_TO_NULL); return CheckIndirectCallTypeSignature::StaticTrap; @@ -1782,8 +1774,7 @@ impl<'module_environment> crate::translate::FuncEnvironment table_index: TableIndex, index: ir::Value, ) -> WasmResult { - let plan = &self.module.table_plans[table_index]; - let table = plan.table; + let table = &self.module.tables[table_index]; self.ensure_table_exists(builder.func, table_index); let table_data = self.tables[table_index].clone().unwrap(); let heap_ty = table.ref_type.heap_type; @@ -1801,16 +1792,9 @@ impl<'module_environment> crate::translate::FuncEnvironment } // Function types. - WasmHeapTopType::Func => match plan.style { - TableStyle::CallerChecksSignature { lazy_init } => Ok(self - .get_or_init_func_ref_table_elem( - builder, - table_index, - index, - false, - lazy_init, - )), - }, + WasmHeapTopType::Func => { + Ok(self.get_or_init_func_ref_table_elem(builder, table_index, index, false)) + } } } @@ -1821,8 +1805,7 @@ impl<'module_environment> crate::translate::FuncEnvironment value: ir::Value, index: ir::Value, ) -> WasmResult<()> { - let plan = &self.module.table_plans[table_index]; - let table = plan.table; + let table = &self.module.tables[table_index]; self.ensure_table_exists(builder.func, table_index); let table_data = self.tables[table_index].clone().unwrap(); let heap_ty = table.ref_type.heap_type; @@ -1842,26 +1825,21 @@ impl<'module_environment> crate::translate::FuncEnvironment // Function types. WasmHeapTopType::Func => { - match plan.style { - TableStyle::CallerChecksSignature { lazy_init } => { - let (elem_addr, flags) = - table_data.prepare_table_addr(self, builder, index); - // Set the "initialized bit". See doc-comment on - // `FUNCREF_INIT_BIT` in - // crates/environ/src/ref_bits.rs for details. - let value_with_init_bit = if lazy_init { - builder - .ins() - .bor_imm(value, Imm64::from(FUNCREF_INIT_BIT as i64)) - } else { - value - }; - builder - .ins() - .store(flags, value_with_init_bit, elem_addr, 0); - Ok(()) - } - } + let (elem_addr, flags) = table_data.prepare_table_addr(self, builder, index); + // Set the "initialized bit". See doc-comment on + // `FUNCREF_INIT_BIT` in + // crates/environ/src/ref_bits.rs for details. + let value_with_init_bit = if self.tunables.table_lazy_init { + builder + .ins() + .bor_imm(value, Imm64::from(FUNCREF_INIT_BIT as i64)) + } else { + value + }; + builder + .ins() + .store(flags, value_with_init_bit, elem_addr, 0); + Ok(()) } } } @@ -2323,25 +2301,21 @@ impl<'module_environment> crate::translate::FuncEnvironment fn make_heap(&mut self, func: &mut ir::Function, index: MemoryIndex) -> WasmResult { let pointer_type = self.pointer_type(); - let is_shared = self.module.memory_plans[index].memory.shared; + let is_shared = self.module.memories[index].shared; - let min_size = self.module.memory_plans[index] - .memory + let min_size = self.module.memories[index] .minimum_byte_size() .unwrap_or_else(|_| { // The only valid Wasm memory size that won't fit in a 64-bit // integer is the maximum memory64 size (2^64) which is one // larger than `u64::MAX` (2^64 - 1). In this case, just say the // minimum heap size is `u64::MAX`. - debug_assert_eq!(self.module.memory_plans[index].memory.limits.min, 1 << 48); - debug_assert_eq!(self.module.memory_plans[index].memory.page_size(), 1 << 16); + debug_assert_eq!(self.module.memories[index].limits.min, 1 << 48); + debug_assert_eq!(self.module.memories[index].page_size(), 1 << 16); u64::MAX }); - let max_size = self.module.memory_plans[index] - .memory - .maximum_byte_size() - .ok(); + let max_size = self.module.memories[index].maximum_byte_size().ok(); let (ptr, base_offset, current_length_offset, ptr_memtype) = { let vmctx = self.vmctx(func); @@ -2395,155 +2369,163 @@ impl<'module_environment> crate::translate::FuncEnvironment } }; - let page_size_log2 = self.module.memory_plans[index].memory.page_size_log2; - - // If we have a declared maximum, we can make this a "static" heap, which is - // allocated up front and never moved. - let (offset_guard_size, heap_style, readonly_base, base_fact, memory_type) = - match self.module.memory_plans[index] { - MemoryPlan { - style: MemoryStyle::Dynamic { .. }, - offset_guard_size, - pre_guard_size: _, - memory: _, - } => { - let heap_bound = func.create_global_value(ir::GlobalValueData::Load { - base: ptr, - offset: Offset32::new(current_length_offset), - global_type: pointer_type, - flags: MemFlags::trusted(), - }); - - let (base_fact, data_mt) = if let Some(ptr_memtype) = ptr_memtype { - // Create a memtype representing the untyped memory region. - let data_mt = func.create_memory_type(ir::MemoryTypeData::DynamicMemory { - gv: heap_bound, - size: offset_guard_size, - }); - // This fact applies to any pointer to the start of the memory. - let base_fact = ir::Fact::dynamic_base_ptr(data_mt); - // This fact applies to the length. - let length_fact = ir::Fact::global_value( - u16::try_from(self.isa.pointer_type().bits()).unwrap(), - heap_bound, - ); - // Create a field in the vmctx for the base pointer. - match &mut func.memory_types[ptr_memtype] { - ir::MemoryTypeData::Struct { size, fields } => { - let base_offset = u64::try_from(base_offset).unwrap(); - fields.push(ir::MemoryTypeField { - offset: base_offset, - ty: self.isa.pointer_type(), - // Read-only field from the PoV of PCC checks: - // don't allow stores to this field. (Even if - // it is a dynamic memory whose base can - // change, that update happens inside the - // runtime, not in generated code.) - readonly: true, - fact: Some(base_fact.clone()), - }); - let current_length_offset = - u64::try_from(current_length_offset).unwrap(); - fields.push(ir::MemoryTypeField { - offset: current_length_offset, - ty: self.isa.pointer_type(), - // As above, read-only; only the runtime modifies it. - readonly: true, - fact: Some(length_fact), - }); - - let pointer_size = u64::from(self.isa.pointer_type().bytes()); - let fields_end = std::cmp::max( - base_offset + pointer_size, - current_length_offset + pointer_size, - ); - *size = std::cmp::max(*size, fields_end); - } - _ => { - panic!("Bad memtype"); - } - } - // Apply a fact to the base pointer. - (Some(base_fact), Some(data_mt)) - } else { - (None, None) - }; - - ( - offset_guard_size, - HeapStyle::Dynamic { - bound_gv: heap_bound, - }, - false, - base_fact, - data_mt, - ) - } - MemoryPlan { - style: - MemoryStyle::Static { - byte_reservation: bound_bytes, - }, - offset_guard_size, - pre_guard_size: _, - memory: _, - } => { - let (base_fact, data_mt) = if let Some(ptr_memtype) = ptr_memtype { - // Create a memtype representing the untyped memory region. - let data_mt = func.create_memory_type(ir::MemoryTypeData::Memory { - size: bound_bytes - .checked_add(offset_guard_size) - .expect("Memory plan has overflowing size plus guard"), - }); - // This fact applies to any pointer to the start of the memory. - let base_fact = Fact::Mem { - ty: data_mt, - min_offset: 0, - max_offset: 0, - nullable: false, - }; - // Create a field in the vmctx for the base pointer. - match &mut func.memory_types[ptr_memtype] { - ir::MemoryTypeData::Struct { size, fields } => { - let offset = u64::try_from(base_offset).unwrap(); - fields.push(ir::MemoryTypeField { - offset, - ty: self.isa.pointer_type(), - // Read-only field from the PoV of PCC checks: - // don't allow stores to this field. (Even if - // it is a dynamic memory whose base can - // change, that update happens inside the - // runtime, not in generated code.) - readonly: true, - fact: Some(base_fact.clone()), - }); - *size = std::cmp::max( - *size, - offset + u64::from(self.isa.pointer_type().bytes()), - ); - } - _ => { - panic!("Bad memtype"); - } - } - // Apply a fact to the base pointer. - (Some(base_fact), Some(data_mt)) - } else { - (None, None) - }; - ( - offset_guard_size, - HeapStyle::Static { bound: bound_bytes }, - true, - base_fact, - data_mt, - ) - } - }; - + let page_size_log2 = self.module.memories[index].page_size_log2; + + // // If we have a declared maximum, we can make this a "static" heap, which is + // // alloced up front and never moved. + // let (offset_guard_size, heap_style, readonly_base, base_fact, memory_type) = + // match self.module.memory_plans[index] { + // MemoryPlan { + // style: MemoryStyle::Dynamic { .. }, + // offset_guard_size, + // pre_guard_size: _, + // memory: _, + // } => { + // let heap_bound = func.create_global_value(ir::GlobalValueData::Load { + // base: ptr, + // offset: Offset32::new(current_length_offset), + // global_type: pointer_type, + // flags: MemFlags::trusted(), + // }); + + // let (base_fact, data_mt) = if let Some(ptr_memtype) = ptr_memtype { + // // Create a memtype representing the untyped memory region. + // let data_mt = func.create_memory_type(ir::MemoryTypeData::DynamicMemory { + // gv: heap_bound, + // size: offset_guard_size, + // }); + // // This fact applies to any pointer to the start of the memory. + // let base_fact = ir::Fact::dynamic_base_ptr(data_mt); + // // This fact applies to the length. + // let length_fact = ir::Fact::global_value( + // u16::try_from(self.isa.pointer_type().bits()).unwrap(), + // heap_bound, + // ); + // // Create a field in the vmctx for the base pointer. + // match &mut func.memory_types[ptr_memtype] { + // ir::MemoryTypeData::Struct { size, fields } => { + // let base_offset = u64::try_from(base_offset).unwrap(); + // fields.push(ir::MemoryTypeField { + // offset: base_offset, + // ty: self.isa.pointer_type(), + // // Read-only field from the PoV of PCC checks: + // // don't allow stores to this field. (Even if + // // it is a dynamic memory whose base can + // // change, that update happens inside the + // // runtime, not in generated code.) + // readonly: true, + // fact: Some(base_fact.clone()), + // }); + // let current_length_offset = + // u64::try_from(current_length_offset).unwrap(); + // fields.push(ir::MemoryTypeField { + // offset: current_length_offset, + // ty: self.isa.pointer_type(), + // // As above, read-only; only the runtime modifies it. + // readonly: true, + // fact: Some(length_fact), + // }); + + // let pointer_size = u64::from(self.isa.pointer_type().bytes()); + // let fields_end = std::cmp::max( + // base_offset + pointer_size, + // current_length_offset + pointer_size, + // ); + // *size = std::cmp::max(*size, fields_end); + // } + // _ => { + // panic!("Bad memtype"); + // } + // } + // // Apply a fact to the base pointer. + // (Some(base_fact), Some(data_mt)) + // } else { + // (None, None) + // }; + + // ( + // offset_guard_size, + // HeapStyle::Dynamic { + // bound_gv: heap_bound, + // }, + // false, + // base_fact, + // data_mt, + // ) + // } + // MemoryPlan { + // style: + // MemoryStyle::Static { + // byte_reservation: bound_bytes, + // }, + // offset_guard_size, + // pre_guard_size: _, + // memory: _, + // } => { + // let (base_fact, data_mt) = if let Some(ptr_memtype) = ptr_memtype { + // // Create a memtype representing the untyped memory region. + // let data_mt = func.create_memory_type(ir::MemoryTypeData::Memory { + // size: bound_bytes + // .checked_add(offset_guard_size) + // .expect("Memory plan has overflowing size plus guard"), + // }); + // // This fact applies to any pointer to the start of the memory. + // let base_fact = Fact::Mem { + // ty: data_mt, + // min_offset: 0, + // max_offset: 0, + // nullable: false, + // }; + // // Create a field in the vmctx for the base pointer. + // match &mut func.memory_types[ptr_memtype] { + // ir::MemoryTypeData::Struct { size, fields } => { + // let offset = u64::try_from(base_offset).unwrap(); + // fields.push(ir::MemoryTypeField { + // offset, + // ty: self.isa.pointer_type(), + // // Read-only field from the PoV of PCC checks: + // // don't allow stores to this field. (Even if + // // it is a dynamic memory whose base can + // // change, that update happens inside the + // // runtime, not in generated code.) + // readonly: true, + // fact: Some(base_fact.clone()), + // }); + // *size = std::cmp::max( + // *size, + // offset + u64::from(self.isa.pointer_type().bytes()), + // ); + // } + // _ => { + // panic!("Bad memtype"); + // } + // } + // // Apply a fact to the base pointer. + // (Some(base_fact), Some(data_mt)) + // } else { + // (None, None) + // }; + // ( + // offset_guard_size, + // HeapStyle::Static { bound: bound_bytes }, + // true, + // base_fact, + // data_mt, + // ) + // } + // }; + let _ = ptr_memtype; + + // If the maximum byte size of this memory is less than or equal to the + // configured memory reservation for each memory then that means that + // the base pointer won't ever change at runtime. In this situation the + // load of the base pointer can be readonly and, for example, hoisted + // out of loops. let mut flags = MemFlags::trusted().with_checked(); - if readonly_base { - flags.set_readonly(); + if let Some(max) = max_size { + if max <= self.tunables.memory_reservation { + flags.set_readonly(); + } } let heap_base = func.create_global_value(ir::GlobalValueData::Load { base: ptr, @@ -2551,16 +2533,25 @@ impl<'module_environment> crate::translate::FuncEnvironment global_type: pointer_type, flags, }); - func.global_value_facts[heap_base] = base_fact; + // TODO + // func.global_value_facts[heap_base] = base_fact; + let bound_gv = func.create_global_value(ir::GlobalValueData::Load { + base: ptr, + offset: Offset32::new(current_length_offset), + global_type: pointer_type, + flags: MemFlags::trusted(), + }); Ok(self.heaps.push(HeapData { base: heap_base, min_size, max_size, - offset_guard_size, - style: heap_style, + guard_size: self.tunables.guard_size, + attempt_bounds_check_elision: self.tunables.attempt_bounds_check_elision, + memory_reservation: self.tunables.memory_reservation, + bound_gv, index_type: index_type_to_ir_type(self.memory(index).idx_type), - memory_type, + memory_type: None, page_size_log2, })) } @@ -2756,7 +2747,7 @@ impl<'module_environment> crate::translate::FuncEnvironment ) -> WasmResult { let pointer_type = self.pointer_type(); let vmctx = self.vmctx(&mut pos.func); - let is_shared = self.module.memory_plans[index].memory.shared; + let is_shared = self.module.memories[index].shared; let base = pos.ins().global_value(pointer_type, vmctx); let current_length_in_bytes = match self.module.defined_memory_index(index) { Some(def_index) => { @@ -2818,7 +2809,7 @@ impl<'module_environment> crate::translate::FuncEnvironment } }; - let page_size_log2 = i64::from(self.module.memory_plans[index].memory.page_size_log2); + let page_size_log2 = i64::from(self.module.memories[index].page_size_log2); let current_length_in_pages = pos.ins().ushr_imm(current_length_in_bytes, page_size_log2); let single_byte_pages = match page_size_log2 { 16 => false, diff --git a/crates/cranelift/src/translate/code_translator/bounds_checks.rs b/crates/cranelift/src/translate/code_translator/bounds_checks.rs index b49bf1ce2083..63ce6bc65b11 100644 --- a/crates/cranelift/src/translate/code_translator/bounds_checks.rs +++ b/crates/cranelift/src/translate/code_translator/bounds_checks.rs @@ -63,8 +63,10 @@ where let pcc = env.proof_carrying_code(); let host_page_size_log2 = env.target_config().page_size_align_log2; - let can_use_virtual_memory = - heap.page_size_log2 >= host_page_size_log2 && env.signals_based_traps(); + let can_elide_bounds_check = heap.page_size_log2 >= host_page_size_log2 + && env.signals_based_traps() + && heap.attempt_bounds_check_elision; + // let can_use_virtual_memory = let make_compare = |builder: &mut FunctionBuilder, compare_kind: IntCC, @@ -193,7 +195,7 @@ where // multiple fields in the same struct that is in linear memory -- // will all emit the same `index > bound` check, which we can GVN. HeapStyle::Dynamic { bound_gv } - if can_use_virtual_memory && offset_and_size <= heap.offset_guard_size => + if can_use_virtual_memory && offset_and_size <= heap.guard_size => { let bound = get_dynamic_heap_bound(builder, env, heap); let oob = make_compare( @@ -371,7 +373,7 @@ where if can_use_virtual_memory && heap.index_type == ir::types::I32 && u64::from(u32::MAX) - <= u64::from(bound) + u64::from(heap.offset_guard_size) - offset_and_size => + <= u64::from(bound) + u64::from(heap.guard_size) - offset_and_size => { assert!( can_use_virtual_memory, @@ -385,7 +387,7 @@ where offset, AddrPcc::static32( heap.memory_type, - u64::from(bound) + u64::from(heap.offset_guard_size), + u64::from(bound) + u64::from(heap.guard_size), ), )) } @@ -604,7 +606,7 @@ fn explicit_check_oob_condition_and_compute_addr( min: Expr::constant(0), max: Expr::offset( &Expr::global_value(gv), - i64::try_from(heap.offset_guard_size) + i64::try_from(heap.guard_size) .unwrap() .checked_sub(i64::from(access_size)) .unwrap(), diff --git a/crates/cranelift/src/translate/heap.rs b/crates/cranelift/src/translate/heap.rs index eff81c1e78f2..8ce6441db066 100644 --- a/crates/cranelift/src/translate/heap.rs +++ b/crates/cranelift/src/translate/heap.rs @@ -72,10 +72,16 @@ pub struct HeapData { pub max_size: Option, /// Size in bytes of the offset-guard pages following the heap. - pub offset_guard_size: u64, + pub guard_size: u64, - /// Heap style, with additional style-specific info. - pub style: HeapStyle, + /// TODO + pub attempt_bounds_check_elision: bool, + + /// TODO + pub memory_reservation: u64, + + /// TODO + pub bound_gv: GlobalValue, /// The index type for the heap. pub index_type: Type, @@ -87,21 +93,21 @@ pub struct HeapData { pub page_size_log2: u8, } -/// Style of heap including style-specific information. -#[derive(Clone, PartialEq, Hash)] -pub enum HeapStyle { - /// A dynamic heap can be relocated to a different base address when it is - /// grown. - Dynamic { - /// Global value providing the current bound of the heap in bytes. - bound_gv: GlobalValue, - }, +// /// Style of heap including style-specific information. +// #[derive(Clone, PartialEq, Hash)] +// pub enum HeapStyle { +// /// A dynamic heap can be relocated to a different base address when it is +// /// grown. +// Dynamic { +// /// Global value providing the current bound of the heap in bytes. +// bound_gv: GlobalValue, +// }, - /// A static heap has a fixed base address and a number of not-yet-allocated - /// pages before the offset-guard pages. - Static { - /// Heap bound in bytes. The offset-guard pages are allocated after the - /// bound. - bound: u64, - }, -} +// /// A static heap has a fixed base address and a number of not-yet-allocated +// /// pages before the offset-guard pages. +// Static { +// /// Heap bound in bytes. The offset-guard pages are allocated after the +// /// bound. +// bound: u64, +// }, +// } diff --git a/crates/cranelift/src/translate/mod.rs b/crates/cranelift/src/translate/mod.rs index 28354827447f..0ef60af091a6 100644 --- a/crates/cranelift/src/translate/mod.rs +++ b/crates/cranelift/src/translate/mod.rs @@ -20,7 +20,7 @@ mod translation_utils; pub use self::environ::{FuncEnvironment, GlobalVariable, StructFieldsVec, TargetEnvironment}; pub use self::func_translator::FuncTranslator; -pub use self::heap::{Heap, HeapData, HeapStyle}; +pub use self::heap::{Heap, HeapData}; pub use self::state::FuncTranslationState; pub use self::table::{TableData, TableSize}; pub use self::translation_utils::*; diff --git a/crates/environ/src/compile/module_environ.rs b/crates/environ/src/compile/module_environ.rs index 26e5e646fa50..c052901800e7 100644 --- a/crates/environ/src/compile/module_environ.rs +++ b/crates/environ/src/compile/module_environ.rs @@ -1,6 +1,6 @@ use crate::module::{ - FuncRefIndex, Initializer, MemoryInitialization, MemoryInitializer, MemoryPlan, Module, - TablePlan, TableSegment, TableSegmentElements, + FuncRefIndex, Initializer, MemoryInitialization, MemoryInitializer, Module, TableSegment, + TableSegmentElements, }; use crate::prelude::*; use crate::{ @@ -343,13 +343,12 @@ impl<'a, 'data> ModuleEnvironment<'a, 'data> { Payload::TableSection(tables) => { self.validator.table_section(&tables)?; let cnt = usize::try_from(tables.count()).unwrap(); - self.result.module.table_plans.reserve_exact(cnt); + self.result.module.tables.reserve_exact(cnt); for entry in tables { let wasmparser::Table { ty, init } = entry?; let table = self.convert_table_type(&ty)?; - let plan = TablePlan::for_table(table, &self.tunables); - self.result.module.table_plans.push(plan); + self.result.module.tables.push(table); let init = match init { wasmparser::TableInit::RefNull => TableInitialValue::Null { precomputed: Vec::new(), @@ -374,12 +373,11 @@ impl<'a, 'data> ModuleEnvironment<'a, 'data> { self.validator.memory_section(&memories)?; let cnt = usize::try_from(memories.count()).unwrap(); - self.result.module.memory_plans.reserve_exact(cnt); + self.result.module.memories.reserve_exact(cnt); for entry in memories { let memory = entry?; - let plan = MemoryPlan::for_memory(memory.into(), &self.tunables); - self.result.module.memory_plans.push(plan); + self.result.module.memories.push(memory.into()); } } @@ -767,14 +765,8 @@ and for re-adding support for interface types you can see this issue: self.flag_func_escaped(func_index); func_index }), - EntityType::Table(ty) => { - let plan = TablePlan::for_table(ty, &self.tunables); - EntityIndex::Table(self.result.module.table_plans.push(plan)) - } - EntityType::Memory(ty) => { - let plan = MemoryPlan::for_memory(ty, &self.tunables); - EntityIndex::Memory(self.result.module.memory_plans.push(plan)) - } + EntityType::Table(ty) => EntityIndex::Table(self.result.module.tables.push(ty)), + EntityType::Memory(ty) => EntityIndex::Memory(self.result.module.memories.push(ty)), EntityType::Global(ty) => EntityIndex::Global(self.result.module.globals.push(ty)), EntityType::Tag(_) => unimplemented!(), } @@ -924,8 +916,8 @@ impl ModuleTranslation<'_> { // wasm module. segments: Vec<(usize, StaticMemoryInitializer)>, } - let mut info = PrimaryMap::with_capacity(self.module.memory_plans.len()); - for _ in 0..self.module.memory_plans.len() { + let mut info = PrimaryMap::with_capacity(self.module.memories.len()); + for _ in 0..self.module.memories.len() { info.push(Memory { data_size: 0, min_addr: u64::MAX, @@ -944,16 +936,11 @@ impl ModuleTranslation<'_> { &mut self, memory_index: MemoryIndex, ) -> Result { - self.module.memory_plans[memory_index] - .memory - .minimum_byte_size() + self.module.memories[memory_index].minimum_byte_size() } fn eval_offset(&mut self, memory_index: MemoryIndex, expr: &ConstExpr) -> Option { - match ( - expr.ops(), - self.module.memory_plans[memory_index].memory.idx_type, - ) { + match (expr.ops(), self.module.memories[memory_index].idx_type) { (&[ConstOp::I32Const(offset)], IndexType::I32) => { Some(offset.unsigned().into()) } @@ -1006,7 +993,7 @@ impl ModuleTranslation<'_> { // initializer can be created. This can be handled technically but // would require some more changes to help fix the assert elsewhere // that this protects against. - if self.module.memory_plans[i].memory.page_size() < page_size { + if self.module.memories[i].page_size() < page_size { return; } @@ -1141,19 +1128,19 @@ impl ModuleTranslation<'_> { // First convert any element-initialized tables to images of just that // single function if the minimum size of the table allows doing so. - for ((_, init), (_, plan)) in self + for ((_, init), (_, table)) in self .module .table_initialization .initial_values .iter_mut() .zip( self.module - .table_plans + .tables .iter() .skip(self.module.num_imported_tables), ) { - let table_size = plan.table.limits.min; + let table_size = table.limits.min; if table_size > MAX_FUNC_TABLE_SIZE { continue; } @@ -1206,16 +1193,12 @@ impl ModuleTranslation<'_> { Some(top) => top, None => break, }; - let table_size = self.module.table_plans[segment.table_index] - .table - .limits - .min; + let table_size = self.module.tables[segment.table_index].limits.min; if top > table_size || top > MAX_FUNC_TABLE_SIZE { break; } - match self.module.table_plans[segment.table_index] - .table + match self.module.tables[segment.table_index] .ref_type .heap_type .top() diff --git a/crates/environ/src/component/translate/inline.rs b/crates/environ/src/component/translate/inline.rs index b75b0509a326..e8c835b73a68 100644 --- a/crates/environ/src/component/translate/inline.rs +++ b/crates/environ/src/component/translate/inline.rs @@ -966,8 +966,8 @@ impl<'a> Inliner<'a> { Some(memory) => match &self.runtime_instances[memory.instance] { InstanceModule::Static(idx) => match &memory.item { ExportItem::Index(i) => { - let plan = &self.nested_modules[*idx].module.memory_plans[*i]; - match plan.memory.idx_type { + let memory = &self.nested_modules[*idx].module.memories[*i]; + match memory.idx_type { IndexType::I32 => false, IndexType::I64 => true, } diff --git a/crates/environ/src/module.rs b/crates/environ/src/module.rs index 8406eac025fc..53d3ce76d169 100644 --- a/crates/environ/src/module.rs +++ b/crates/environ/src/module.rs @@ -7,101 +7,99 @@ use core::ops::Range; use cranelift_entity::{packed_option::ReservedValue, EntityRef}; use serde_derive::{Deserialize, Serialize}; -/// Implementation styles for WebAssembly linear memory. -#[derive(Debug, Clone, Hash, Serialize, Deserialize)] -pub enum MemoryStyle { - /// The actual memory can be resized and moved. - Dynamic { - /// Extra space to reserve when a memory must be moved due to growth. - reserve: u64, - }, - /// Address space is allocated up front. - Static { - /// The number of bytes which are reserved for this linear memory. Only - /// the lower bytes which represent the actual linear memory need be - /// mapped, but other bytes must be guaranteed to be unmapped. - byte_reservation: u64, - }, -} - -impl MemoryStyle { - /// Decide on an implementation style for the given `Memory`. - pub fn for_memory(memory: Memory, tunables: &Tunables) -> (Self, u64) { - let is_static = - // Ideally we would compare against (an upper bound on) the target's - // page size, but unfortunately that is a little hard to plumb - // through here. - memory.page_size_log2 >= Memory::DEFAULT_PAGE_SIZE_LOG2 - && tunables.signals_based_traps - && match memory.maximum_byte_size() { - Ok(mut maximum) => { - if tunables.static_memory_bound_is_maximum { - maximum = maximum.min(tunables.static_memory_reservation); - } - - // Ensure the minimum is less than the maximum; the minimum might exceed the maximum - // when the memory is artificially bounded via `static_memory_bound_is_maximum` above - memory.minimum_byte_size().unwrap() <= maximum - && maximum <= tunables.static_memory_reservation - } - - // If the maximum size of this memory is not representable with - // `u64` then use the `static_memory_bound_is_maximum` to indicate - // whether it's a static memory or not. It should be ok to discard - // the linear memory's maximum size here as growth to the maximum is - // always fallible and never guaranteed. - Err(_) => tunables.static_memory_bound_is_maximum, - }; - - if is_static { - return ( - Self::Static { - byte_reservation: tunables.static_memory_reservation, - }, - tunables.static_memory_offset_guard_size, - ); - } - - // Otherwise, make it dynamic. - ( - Self::Dynamic { - reserve: tunables.dynamic_memory_growth_reserve, - }, - tunables.dynamic_memory_offset_guard_size, - ) - } -} - -/// A WebAssembly linear memory description along with our chosen style for -/// implementing it. -#[derive(Debug, Clone, Hash, Serialize, Deserialize)] -pub struct MemoryPlan { - /// The WebAssembly linear memory description. - pub memory: Memory, - /// Our chosen implementation style. - pub style: MemoryStyle, - /// Chosen size of a guard page before the linear memory allocation. - pub pre_guard_size: u64, - /// Our chosen offset-guard size. - pub offset_guard_size: u64, -} - -impl MemoryPlan { - /// Draw up a plan for implementing a `Memory`. - pub fn for_memory(memory: Memory, tunables: &Tunables) -> Self { - let (style, offset_guard_size) = MemoryStyle::for_memory(memory, tunables); - Self { - memory, - style, - offset_guard_size, - pre_guard_size: if tunables.guard_before_linear_memory { - offset_guard_size - } else { - 0 - }, - } - } -} +// /// Implementation styles for WebAssembly linear memory. +// #[derive(Debug, Clone, Hash, Serialize, Deserialize)] +// pub enum MemoryStyle { +// /// The actual memory can be resized and moved. +// Dynamic { +// /// Extra space to reserve when a memory must be moved due to growth. +// reserve: u64, +// }, +// /// Address space is allocated up front. +// Static { +// /// The number of bytes which are reserved for this linear memory. Only +// /// the lower bytes which represent the actual linear memory need be +// /// mapped, but other bytes must be guaranteed to be unmapped. +// byte_reservation: u64, +// }, +// } + +// impl MemoryStyle { +// /// Decide on an implementation style for the given `Memory`. +// pub fn for_memory(memory: Memory, tunables: &Tunables) -> (Self, u64) { +// let is_static = +// // Ideally we would compare against (an upper bound on) the target's +// // page size, but unfortunately that is a little hard to plumb +// // through here. +// memory.page_size_log2 >= Memory::DEFAULT_PAGE_SIZE_LOG2 +// && tunables.signals_based_traps +// && match memory.maximum_byte_size() { +// Ok(mut maximum) => { +// if tunables.static_memory_bound_is_maximum { +// maximum = maximum.min(tunables.static_memory_reservation); +// } + +// // Ensure the minimum is less than the maximum; the minimum might exceed the maximum +// // when the memory is artificially bounded via `static_memory_bound_is_maximum` above +// memory.minimum_byte_size().unwrap() <= maximum +// && maximum <= tunables.static_memory_reservation +// } + +// // If the maximum size of this memory is not representable with +// // `u64` then use the `static_memory_bound_is_maximum` to indicate +// // whether it's a static memory or not. It should be ok to discard +// // the linear memory's maximum size here as growth to the maximum is +// // always fallible and never guaranteed. +// Err(_) => tunables.static_memory_bound_is_maximum, +// }; + +// if is_static { +// return ( +// Self::Static { +// byte_reservation: tunables.static_memory_reservation, +// }, +// tunables.static_memory_offset_guard_size, +// ); +// } + +// // Otherwise, make it dynamic. +// ( +// Self::Dynamic { +// reserve: tunables.dynamic_memory_growth_reserve, +// }, +// tunables.dynamic_memory_offset_guard_size, +// ) +// } +// } + +// /// A WebAssembly linear memory description along with our chosen style for +// /// implementing it. +// #[derive(Debug, Clone, Hash, Serialize, Deserialize)] +// pub struct MemoryPlan { +// /// The WebAssembly linear memory description. +// pub memory: Memory, +// /// Chosen size of a guard page before the linear memory allocation. +// pub pre_guard_size: u64, +// /// Our chosen offset-guard size. +// pub offset_guard_size: u64, +// } + +// impl MemoryPlan { +// /// Draw up a plan for implementing a `Memory`. +// pub fn for_memory(memory: Memory, tunables: &Tunables) -> Self { +// let (style, offset_guard_size) = MemoryStyle::for_memory(memory, tunables); +// Self { +// memory, +// style, +// offset_guard_size, +// pre_guard_size: if tunables.guard_before_linear_memory { +// offset_guard_size +// } else { +// 0 +// }, +// } +// } +// } /// A WebAssembly linear memory initializer. #[derive(Clone, Debug, Serialize, Deserialize)] @@ -310,43 +308,43 @@ pub trait InitMemory { fn write(&mut self, memory_index: MemoryIndex, init: &StaticMemoryInitializer) -> bool; } -/// Implementation styles for WebAssembly tables. -#[derive(Debug, Clone, Hash, Serialize, Deserialize)] -pub enum TableStyle { - /// Signatures are stored in the table and checked in the caller. - CallerChecksSignature { - /// Whether this table is initialized lazily and requires an - /// initialization check on every access. - lazy_init: bool, - }, -} - -impl TableStyle { - /// Decide on an implementation style for the given `Table`. - pub fn for_table(_table: Table, tunables: &Tunables) -> Self { - Self::CallerChecksSignature { - lazy_init: tunables.table_lazy_init, - } - } -} - -/// A WebAssembly table description along with our chosen style for -/// implementing it. -#[derive(Debug, Clone, Hash, Serialize, Deserialize)] -pub struct TablePlan { - /// The WebAssembly table description. - pub table: Table, - /// Our chosen implementation style. - pub style: TableStyle, -} - -impl TablePlan { - /// Draw up a plan for implementing a `Table`. - pub fn for_table(table: Table, tunables: &Tunables) -> Self { - let style = TableStyle::for_table(table, tunables); - Self { table, style } - } -} +// /// Implementation styles for WebAssembly tables. +// #[derive(Debug, Clone, Hash, Serialize, Deserialize)] +// pub enum TableStyle { +// /// Signatures are stored in the table and checked in the caller. +// CallerChecksSignature { +// /// Whether this table is initialized lazily and requires an +// /// initialization check on every access. +// lazy_init: bool, +// }, +// } + +// impl TableStyle { +// /// Decide on an implementation style for the given `Table`. +// pub fn for_table(_table: Table, tunables: &Tunables) -> Self { +// Self::CallerChecksSignature { +// lazy_init: tunables.table_lazy_init, +// } +// } +// } + +// /// A WebAssembly table description along with our chosen style for +// /// implementing it. +// #[derive(Debug, Clone, Hash, Serialize, Deserialize)] +// pub struct TablePlan { +// /// The WebAssembly table description. +// pub table: Table, +// /// Our chosen implementation style. +// pub style: TableStyle, +// } + +// impl TablePlan { +// /// Draw up a plan for implementing a `Table`. +// pub fn for_table(table: Table, tunables: &Tunables) -> Self { +// let style = TableStyle::for_table(table, tunables); +// Self { table, style } +// } +// } /// Table initialization data for all tables in the module. #[derive(Debug, Default, Serialize, Deserialize)] @@ -481,10 +479,10 @@ pub struct Module { pub functions: PrimaryMap, /// WebAssembly tables. - pub table_plans: PrimaryMap, + pub tables: PrimaryMap, /// WebAssembly linear memory plans. - pub memory_plans: PrimaryMap, + pub memories: PrimaryMap, /// WebAssembly global variables. pub globals: PrimaryMap, @@ -589,7 +587,7 @@ impl Module { #[inline] pub fn owned_memory_index(&self, memory: DefinedMemoryIndex) -> OwnedMemoryIndex { assert!( - memory.index() < self.memory_plans.len(), + memory.index() < self.memories.len(), "non-shared memory must have an owned index" ); @@ -597,11 +595,11 @@ impl Module { // plans, we can iterate through the plans up to the memory index and // count how many are not shared (i.e., owned). let owned_memory_index = self - .memory_plans + .memories .iter() .skip(self.num_imported_memories) .take(memory.index()) - .filter(|(_, mp)| !mp.memory.shared) + .filter(|(_, mp)| !mp.shared) .count(); OwnedMemoryIndex::new(owned_memory_index) } @@ -651,8 +649,8 @@ impl Module { pub fn type_of(&self, index: EntityIndex) -> EntityType { match index { EntityIndex::Global(i) => EntityType::Global(self.globals[i]), - EntityIndex::Table(i) => EntityType::Table(self.table_plans[i].table), - EntityIndex::Memory(i) => EntityType::Memory(self.memory_plans[i].memory), + EntityIndex::Table(i) => EntityType::Table(self.tables[i]), + EntityIndex::Memory(i) => EntityType::Memory(self.memories[i]), EntityIndex::Function(i) => { EntityType::Function(EngineOrModuleTypeIndex::Module(self.functions[i].signature)) } diff --git a/crates/environ/src/tunables.rs b/crates/environ/src/tunables.rs index ddf317124a15..b005f4132486 100644 --- a/crates/environ/src/tunables.rs +++ b/crates/environ/src/tunables.rs @@ -5,21 +5,18 @@ use target_lexicon::{PointerWidth, Triple}; /// Tunable parameters for WebAssembly compilation. #[derive(Clone, Hash, Serialize, Deserialize, Debug)] pub struct Tunables { - /// For static heaps, the size in bytes of virtual memory reservation for - /// the heap. - pub static_memory_reservation: u64, + /// TODO + pub attempt_bounds_check_elision: bool, - /// The size in bytes of the offset guard for static heaps. - pub static_memory_offset_guard_size: u64, + /// TODO + pub guard_size: u64, - /// The size in bytes of the offset guard for dynamic heaps. - pub dynamic_memory_offset_guard_size: u64, + /// TODO + pub memory_reservation: u64, - /// The size, in bytes, of reserved memory at the end of a "dynamic" memory, - /// before the guard page, that memory can grow into. This is intended to - /// amortize the cost of `memory.grow` in the same manner that `Vec` has - /// space not in use to grow into. - pub dynamic_memory_growth_reserve: u64, + /// Whether or not linear memory allocations will have a guard region at the + /// beginning of the allocation in addition to the end. + pub guard_before_linear_memory: bool, /// Whether or not to generate native DWARF debug information. pub generate_native_debuginfo: bool, @@ -34,14 +31,6 @@ pub struct Tunables { /// Whether or not we use epoch-based interruption. pub epoch_interruption: bool, - /// Whether or not to treat the static memory bound as the maximum for - /// unbounded heaps. - pub static_memory_bound_is_maximum: bool, - - /// Whether or not linear memory allocations will have a guard region at the - /// beginning of the allocation in addition to the end. - pub guard_before_linear_memory: bool, - /// Whether to initialize tables lazily, so that instantiation is fast but /// indirect calls are a little slower. If false, tables are initialized /// eagerly from any active element segments that apply to them during @@ -99,10 +88,13 @@ impl Tunables { Tunables { // No virtual memory tricks are available on miri so make these // limits quite conservative. - static_memory_reservation: 1 << 20, - static_memory_offset_guard_size: 0, - dynamic_memory_offset_guard_size: 0, - dynamic_memory_growth_reserve: 0, + // static_memory_reservation: 1 << 20, + // static_memory_offset_guard_size: 0, + // dynamic_memory_offset_guard_size: 0, + // dynamic_memory_growth_reserve: 0, + attempt_bounds_check_elision: true, + guard_size: 0, + memory_reservation: 0, // General options which have the same defaults regardless of // architecture. @@ -110,7 +102,7 @@ impl Tunables { parse_wasm_debuginfo: true, consume_fuel: false, epoch_interruption: false, - static_memory_bound_is_maximum: false, + // static_memory_bound_is_maximum: false, guard_before_linear_memory: true, table_lazy_init: true, generate_address_map: true, @@ -127,10 +119,12 @@ impl Tunables { // For 32-bit we scale way down to 10MB of reserved memory. This // impacts performance severely but allows us to have more than a // few instances running around. - static_memory_reservation: 10 * (1 << 20), - static_memory_offset_guard_size: 0x1_0000, - dynamic_memory_offset_guard_size: 0x1_0000, - dynamic_memory_growth_reserve: 1 << 20, // 1MB + // static_memory_reservation: 10 * (1 << 20), + // static_memory_offset_guard_size: 0x1_0000, + // dynamic_memory_offset_guard_size: 0x1_0000, + // dynamic_memory_growth_reserve: 1 << 20, // 1MB + guard_size: 0x1_0000, + memory_reservation: 10 * (1 << 20), ..Tunables::default_miri() } @@ -139,26 +133,28 @@ impl Tunables { /// Returns the default set of tunables for running under a 64-bit host. pub fn default_u64() -> Tunables { Tunables { - // 64-bit has tons of address space to static memories can have 4gb - // address space reservations liberally by default, allowing us to - // help eliminate bounds checks. - // - // Coupled with a 2 GiB address space guard it lets us translate - // wasm offsets into x86 offsets as aggressively as we can. - static_memory_reservation: 1 << 32, - static_memory_offset_guard_size: 0x8000_0000, - - // Size in bytes of the offset guard for dynamic memories. - // - // Allocate a small guard to optimize common cases but without - // wasting too much memory. - dynamic_memory_offset_guard_size: 0x1_0000, - - // We've got lots of address space on 64-bit so use a larger - // grow-into-this area, but on 32-bit we aren't as lucky. Miri is - // not exactly fast so reduce memory consumption instead of trying - // to avoid memory movement. - dynamic_memory_growth_reserve: 2 << 30, // 2GB + //// 64-bit has tons of address space to static memories can have 4gb + //// address space reservations liberally by default, allowing us to + //// help eliminate bounds checks. + //// + //// Coupled with a 2 GiB address space guard it lets us translate + //// wasm offsets into x86 offsets as aggressively as we can. + //static_memory_reservation: 1 << 32, + //static_memory_offset_guard_size: 0x8000_0000, + + //// Size in bytes of the offset guard for dynamic memories. + //// + //// Allocate a small guard to optimize common cases but without + //// wasting too much memory. + //dynamic_memory_offset_guard_size: 0x1_0000, + + //// We've got lots of address space on 64-bit so use a larger + //// grow-into-this area, but on 32-bit we aren't as lucky. Miri is + //// not exactly fast so reduce memory consumption instead of trying + //// to avoid memory movement. + //dynamic_memory_growth_reserve: 2 << 30, // 2GB + guard_size: 0x8000_0000, + memory_reservation: 1 << 32, ..Tunables::default_miri() } diff --git a/crates/environ/src/vmoffsets.rs b/crates/environ/src/vmoffsets.rs index 8b4accffed6d..edc1f423bf51 100644 --- a/crates/environ/src/vmoffsets.rs +++ b/crates/environ/src/vmoffsets.rs @@ -338,10 +338,10 @@ impl VMOffsets

{ /// Return a new `VMOffsets` instance, for a given pointer size. pub fn new(ptr: P, module: &Module) -> Self { let num_owned_memories = module - .memory_plans + .memories .iter() .skip(module.num_imported_memories) - .filter(|p| !p.1.memory.shared) + .filter(|p| !p.1.shared) .count() .try_into() .unwrap(); @@ -351,10 +351,8 @@ impl VMOffsets

{ num_imported_tables: cast_to_u32(module.num_imported_tables), num_imported_memories: cast_to_u32(module.num_imported_memories), num_imported_globals: cast_to_u32(module.num_imported_globals), - num_defined_tables: cast_to_u32(module.table_plans.len() - module.num_imported_tables), - num_defined_memories: cast_to_u32( - module.memory_plans.len() - module.num_imported_memories, - ), + num_defined_tables: cast_to_u32(module.tables.len() - module.num_imported_tables), + num_defined_memories: cast_to_u32(module.memories.len() - module.num_imported_memories), num_owned_memories, num_defined_globals: cast_to_u32(module.globals.len() - module.num_imported_globals), num_escaped_funcs: cast_to_u32(module.num_escaped_funcs),