Skip to content

Commit

Permalink
wip
Browse files Browse the repository at this point in the history
  • Loading branch information
alexcrichton committed Nov 1, 2024
1 parent 288c151 commit 353f4b3
Show file tree
Hide file tree
Showing 36 changed files with 884 additions and 1,332 deletions.
60 changes: 42 additions & 18 deletions crates/cli-flags/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -42,22 +42,16 @@ wasmtime_option_group! {
/// Optimization level of generated code (0-2, s; default: 2)
pub opt_level: Option<wasmtime::OptLevel>,

/// Byte size of the guard region after dynamic memories are allocated
pub dynamic_memory_guard_size: Option<u64>,

/// Force using a "static" style for all wasm memories
///
/// TODO: deprecated
pub static_memory_forced: Option<bool>,

/// Maximum size in bytes of wasm memory before it becomes dynamically
/// relocatable instead of up-front-reserved.
pub static_memory_maximum_size: Option<u64>,

/// Byte size of the guard region after static memories are allocated
pub static_memory_guard_size: Option<u64>,
/// TODO
pub guard_size: Option<u64>,

/// Bytes to reserve at the end of linear memory for growth for dynamic
/// memories.
pub dynamic_memory_reserved_for_growth: Option<u64>,
/// TODO
pub memory_reservation: Option<u64>,

/// Indicates whether an unmapped region of memory is placed before all
/// linear memories.
Expand Down Expand Up @@ -167,6 +161,28 @@ wasmtime_option_group! {

/// Enable or disable the use of host signal handlers for traps.
pub signals_based_traps: Option<bool>,

/// [DEPRECATED] Maximum size in bytes of wasm memory before it becomes
/// dynamically relocatable instead of up-front-reserved.
///
/// Use `-O memory-reservation=N` instead
pub static_memory_maximum_size: Option<u64>,

/// [DEPRECATED] Bytes to reserve at the end of linear memory for growth
/// for dynamic memories.
///
/// Use `-O memory-reservation=N` instead
pub dynamic_memory_reserved_for_growth: Option<u64>,

/// [DEPRECATED] Byte size of the guard region after dynamic memories are allocated
///
/// Use `-O guard-size` instead
pub dynamic_memory_guard_size: Option<u64>,

/// [DEPRECATED] Byte size of the guard region after static memories are allocated
///
/// Use `-O guard-size` instead
pub static_memory_guard_size: Option<u64>,
}

enum Optimize {
Expand Down Expand Up @@ -629,22 +645,30 @@ impl CommonOptions {
}

if let Some(max) = self.opts.static_memory_maximum_size {
config.static_memory_maximum_size(max);
config.memory_reservation(max);
}
if let Some(size) = self.opts.dynamic_memory_reserved_for_growth {
config.memory_reservation(size);
}
if let Some(max) = self.opts.memory_reservation {
config.memory_reservation(max);
}

if let Some(enable) = self.opts.static_memory_forced {
config.static_memory_forced(enable);
let _ = enable;
todo!()
// config.static_memory_forced(enable);
}

if let Some(size) = self.opts.static_memory_guard_size {
config.static_memory_guard_size(size);
config.guard_size(size);
}

if let Some(size) = self.opts.dynamic_memory_guard_size {
config.dynamic_memory_guard_size(size);
config.guard_size(size);
}
if let Some(size) = self.opts.dynamic_memory_reserved_for_growth {
config.dynamic_memory_reserved_for_growth(size);
if let Some(size) = self.opts.guard_size {
config.guard_size(size);
}
if let Some(enable) = self.opts.guard_before_linear_memory {
config.guard_before_linear_memory(enable);
Expand Down
191 changes: 31 additions & 160 deletions crates/cranelift/src/func_environ.rs
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
use crate::translate::{
FuncEnvironment as _, FuncTranslationState, GlobalVariable, Heap, HeapData, HeapStyle,
StructFieldsVec, TableData, TableSize, TargetEnvironment,
FuncEnvironment as _, FuncTranslationState, GlobalVariable, Heap, HeapData, StructFieldsVec,
TableData, TableSize, TargetEnvironment,
};
use crate::{gc, BuiltinFunctionSignatures, TRAP_INTERNAL_ASSERT};
use cranelift_codegen::cursor::FuncCursor;
Expand All @@ -20,10 +20,10 @@ use std::mem;
use wasmparser::{Operator, WasmFeatures};
use wasmtime_environ::{
BuiltinFunctionIndex, DataIndex, ElemIndex, EngineOrModuleTypeIndex, FuncIndex, GlobalIndex,
IndexType, Memory, MemoryIndex, MemoryStyle, Module, ModuleInternedTypeIndex,
ModuleTranslation, ModuleTypesBuilder, PtrSize, Table, TableIndex, Tunables, TypeConvert,
TypeIndex, VMOffsets, WasmCompositeType, WasmFuncType, WasmHeapTopType, WasmHeapType,
WasmRefType, WasmResult, WasmValType,
IndexType, Memory, MemoryIndex, Module, ModuleInternedTypeIndex, ModuleTranslation,
ModuleTypesBuilder, PtrSize, Table, TableIndex, Tunables, TypeConvert, TypeIndex, VMOffsets,
WasmCompositeType, WasmFuncType, WasmHeapTopType, WasmHeapType, WasmRefType, WasmResult,
WasmValType,
};
use wasmtime_environ::{FUNCREF_INIT_BIT, FUNCREF_MASK};

Expand Down Expand Up @@ -1695,6 +1695,10 @@ impl<'module_environment> TargetEnvironment for FuncEnvironment<'module_environm
fn proof_carrying_code(&self) -> bool {
self.isa.flags().enable_pcc()
}

fn tunables(&self) -> &Tunables {
self.tunables
}
}

impl<'module_environment> crate::translate::FuncEnvironment
Expand Down Expand Up @@ -2305,18 +2309,6 @@ impl<'module_environment> crate::translate::FuncEnvironment
let memory = self.module.memories[index];
let is_shared = memory.shared;

let min_size = memory.minimum_byte_size().unwrap_or_else(|_| {
// The only valid Wasm memory size that won't fit in a 64-bit
// integer is the maximum memory64 size (2^64) which is one
// larger than `u64::MAX` (2^64 - 1). In this case, just say the
// minimum heap size is `u64::MAX`.
debug_assert_eq!(memory.limits.min, 1 << 48);
debug_assert_eq!(memory.page_size(), 1 << 16);
u64::MAX
});

let max_size = memory.maximum_byte_size().ok();

let (ptr, base_offset, current_length_offset, ptr_memtype) = {
let vmctx = self.vmctx(func);
if let Some(def_index) = self.module.defined_memory_index(index) {
Expand Down Expand Up @@ -2369,160 +2361,39 @@ impl<'module_environment> crate::translate::FuncEnvironment
}
};

let page_size_log2 = memory.page_size_log2;

// If we have a declared maximum, we can make this a "static" heap, which is
// allocated up front and never moved.
let (style, offset_guard_size) = MemoryStyle::for_memory(memory, self.tunables);
let (heap_style, readonly_base, base_fact, memory_type) = match style {
MemoryStyle::Dynamic { .. } => {
let heap_bound = func.create_global_value(ir::GlobalValueData::Load {
base: ptr,
offset: Offset32::new(current_length_offset),
global_type: pointer_type,
flags: MemFlags::trusted(),
});

let (base_fact, data_mt) = if let Some(ptr_memtype) = ptr_memtype {
// Create a memtype representing the untyped memory region.
let data_mt = func.create_memory_type(ir::MemoryTypeData::DynamicMemory {
gv: heap_bound,
size: offset_guard_size,
});
// This fact applies to any pointer to the start of the memory.
let base_fact = ir::Fact::dynamic_base_ptr(data_mt);
// This fact applies to the length.
let length_fact = ir::Fact::global_value(
u16::try_from(self.isa.pointer_type().bits()).unwrap(),
heap_bound,
);
// Create a field in the vmctx for the base pointer.
match &mut func.memory_types[ptr_memtype] {
ir::MemoryTypeData::Struct { size, fields } => {
let base_offset = u64::try_from(base_offset).unwrap();
fields.push(ir::MemoryTypeField {
offset: base_offset,
ty: self.isa.pointer_type(),
// Read-only field from the PoV of PCC checks:
// don't allow stores to this field. (Even if
// it is a dynamic memory whose base can
// change, that update happens inside the
// runtime, not in generated code.)
readonly: true,
fact: Some(base_fact.clone()),
});
let current_length_offset =
u64::try_from(current_length_offset).unwrap();
fields.push(ir::MemoryTypeField {
offset: current_length_offset,
ty: self.isa.pointer_type(),
// As above, read-only; only the runtime modifies it.
readonly: true,
fact: Some(length_fact),
});

let pointer_size = u64::from(self.isa.pointer_type().bytes());
let fields_end = std::cmp::max(
base_offset + pointer_size,
current_length_offset + pointer_size,
);
*size = std::cmp::max(*size, fields_end);
}
_ => {
panic!("Bad memtype");
}
}
// Apply a fact to the base pointer.
(Some(base_fact), Some(data_mt))
} else {
(None, None)
};

(
HeapStyle::Dynamic {
bound_gv: heap_bound,
},
false,
base_fact,
data_mt,
)
}
MemoryStyle::Static {
byte_reservation: bound_bytes,
} => {
let (base_fact, data_mt) = if let Some(ptr_memtype) = ptr_memtype {
// Create a memtype representing the untyped memory region.
let data_mt = func.create_memory_type(ir::MemoryTypeData::Memory {
size: bound_bytes
.checked_add(offset_guard_size)
.expect("Memory plan has overflowing size plus guard"),
});
// This fact applies to any pointer to the start of the memory.
let base_fact = Fact::Mem {
ty: data_mt,
min_offset: 0,
max_offset: 0,
nullable: false,
};
// Create a field in the vmctx for the base pointer.
match &mut func.memory_types[ptr_memtype] {
ir::MemoryTypeData::Struct { size, fields } => {
let offset = u64::try_from(base_offset).unwrap();
fields.push(ir::MemoryTypeField {
offset,
ty: self.isa.pointer_type(),
// Read-only field from the PoV of PCC checks:
// don't allow stores to this field. (Even if
// it is a dynamic memory whose base can
// change, that update happens inside the
// runtime, not in generated code.)
readonly: true,
fact: Some(base_fact.clone()),
});
*size = std::cmp::max(
*size,
offset + u64::from(self.isa.pointer_type().bytes()),
);
}
_ => {
panic!("Bad memtype");
}
}
// Apply a fact to the base pointer.
(Some(base_fact), Some(data_mt))
} else {
(None, None)
};
(
HeapStyle::Static { bound: bound_bytes },
true,
base_fact,
data_mt,
)
}
};
let _ = ptr_memtype;

// If the maximum byte size of this memory is less than or equal to the
// configured memory reservation for each memory then that means that
// the base pointer won't ever change at runtime. In this situation the
// load of the base pointer can be readonly and, for example, hoisted
// out of loops.
let mut flags = MemFlags::trusted().with_checked();
if readonly_base {
flags.set_readonly();
if let Ok(max) = memory.maximum_byte_size() {
if max <= self.tunables.memory_reservation {
flags.set_readonly();
}
}
let heap_base = func.create_global_value(ir::GlobalValueData::Load {
base: ptr,
offset: Offset32::new(base_offset),
global_type: pointer_type,
flags,
});
func.global_value_facts[heap_base] = base_fact;
// TODO
// func.global_value_facts[heap_base] = base_fact;
let bound_gv = func.create_global_value(ir::GlobalValueData::Load {
base: ptr,
offset: Offset32::new(current_length_offset),
global_type: pointer_type,
flags: MemFlags::trusted(),
});

Ok(self.heaps.push(HeapData {
base: heap_base,
min_size,
max_size,
offset_guard_size,
style: heap_style,
index_type: index_type_to_ir_type(self.memory(index).idx_type),
memory_type,
page_size_log2,
bound_gv,
memory_type: None,
ty: memory,
}))
}

Expand Down
16 changes: 9 additions & 7 deletions crates/cranelift/src/translate/code_translator.rs
Original file line number Diff line number Diff line change
Expand Up @@ -93,8 +93,8 @@ use std::collections::{hash_map, HashMap};
use std::vec::Vec;
use wasmparser::{FuncValidator, MemArg, Operator, WasmModuleResources};
use wasmtime_environ::{
wasm_unsupported, DataIndex, ElemIndex, FuncIndex, GlobalIndex, MemoryIndex, TableIndex,
TypeIndex, WasmRefType, WasmResult,
wasm_unsupported, DataIndex, ElemIndex, FuncIndex, GlobalIndex, MemoryIndex, Signed,
TableIndex, TypeIndex, WasmRefType, WasmResult,
};

/// Given a `Reachability<T>`, unwrap the inner `T` or, when unreachable, set
Expand Down Expand Up @@ -1255,8 +1255,8 @@ pub fn translate_operator<FE: FuncEnvironment + ?Sized>(
let effective_addr = if memarg.offset == 0 {
addr
} else {
let index_type = environ.heaps()[heap].index_type;
let offset = builder.ins().iconst(index_type, memarg.offset as i64);
let index_type = environ.heaps()[heap].index_type();
let offset = builder.ins().iconst(index_type, memarg.offset.signed());
environ.uadd_overflow_trap(builder, addr, offset, ir::TrapCode::HEAP_OUT_OF_BOUNDS)
};
// `fn translate_atomic_wait` can inspect the type of `expected` to figure out what
Expand All @@ -1279,8 +1279,8 @@ pub fn translate_operator<FE: FuncEnvironment + ?Sized>(
let effective_addr = if memarg.offset == 0 {
addr
} else {
let index_type = environ.heaps()[heap].index_type;
let offset = builder.ins().iconst(index_type, memarg.offset as i64);
let index_type = environ.heaps()[heap].index_type();
let offset = builder.ins().iconst(index_type, memarg.offset.signed());
environ.uadd_overflow_trap(builder, addr, offset, ir::TrapCode::HEAP_OUT_OF_BOUNDS)
};
let res = environ.translate_atomic_notify(
Expand Down Expand Up @@ -3223,7 +3223,9 @@ where
// relatively odd/rare. In the future if needed we can look into
// optimizing this more.
Err(_) => {
let offset = builder.ins().iconst(heap.index_type, memarg.offset as i64);
let offset = builder
.ins()
.iconst(heap.index_type(), memarg.offset.signed());
let adjusted_index = environ.uadd_overflow_trap(
builder,
index,
Expand Down
Loading

0 comments on commit 353f4b3

Please sign in to comment.