Skip to content

Commit

Permalink
Don't use MemoryStyle for heap base pointer relocations
Browse files Browse the repository at this point in the history
Instead add a helper method to `Memory` which indicates whether the base
pointer of memory can move. Use this and plumb the result around to the
various locations that require it. This improves the `readonly`
application of the base pointer in Cranelift by having the optimization
kick in in more scenarios. It additionally fixes combining shared linear
memories with 64-bit addressing or a page size of 1 by adding a runtime
check before relocation of a linear memory that it's allowed to do so.

A few codegen tests are added to ensure that `readonly` is applied where
it wasn't previously and additionally a new `*.wast` test was added with
the cross product of various features of linear memories and some basic
tests to ensure that the memories all work as expected.

This refactoring fixes two preexisting issues about `shared` memories
requiring a "static" memory style. The restriction is now based on
whether the pointer can relocate or not and that's upheld via the new
trait method added here.

To account for these bug fixes the fuzzers have been updated to allow
mixing the `threads` proposal with `memory64` or `custom-page-sizes`.

Closes #4267
Closes #9523
  • Loading branch information
alexcrichton committed Nov 5, 2024
1 parent cc6cf63 commit cb16230
Show file tree
Hide file tree
Showing 13 changed files with 500 additions and 40 deletions.
8 changes: 4 additions & 4 deletions crates/cranelift/src/func_environ.rs
Original file line number Diff line number Diff line change
Expand Up @@ -2371,7 +2371,7 @@ impl<'module_environment> crate::translate::FuncEnvironment
// If we have a declared maximum, we can make this a "static" heap, which is
// allocated up front and never moved.
let style = MemoryStyle::for_memory(memory, self.tunables);
let (readonly_base, base_fact, memory_type) = match style {
let (base_fact, memory_type) = match style {
MemoryStyle::Dynamic { .. } => {
let (base_fact, data_mt) = if let Some(ptr_memtype) = ptr_memtype {
// Create a memtype representing the untyped memory region.
Expand Down Expand Up @@ -2428,7 +2428,7 @@ impl<'module_environment> crate::translate::FuncEnvironment
(None, None)
};

(false, base_fact, data_mt)
(base_fact, data_mt)
}
MemoryStyle::Static {
byte_reservation: bound_bytes,
Expand Down Expand Up @@ -2476,12 +2476,12 @@ impl<'module_environment> crate::translate::FuncEnvironment
} else {
(None, None)
};
(true, base_fact, data_mt)
(base_fact, data_mt)
}
};

let mut flags = MemFlags::trusted().with_checked();
if readonly_base {
if !memory.memory_may_move(self.tunables) {
flags.set_readonly();
}
let heap_base = func.create_global_value(ir::GlobalValueData::Load {
Expand Down
28 changes: 27 additions & 1 deletion crates/environ/src/types.rs
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
use crate::{wasm_unsupported, WasmResult};
use crate::{wasm_unsupported, Tunables, WasmResult};
use alloc::borrow::Cow;
use alloc::boxed::Box;
use core::{fmt, ops::Range};
Expand Down Expand Up @@ -1810,6 +1810,32 @@ impl Memory {
None
}
}

/// Returs whether or not the base pointer of this memory is allowed to be
/// relocated at runtime.
///
/// When this function returns `false` then it means that after the initial
/// allocation the base pointer is constant for the entire lifetime of a
/// memory. This can enable compiler optimizations, for example.
pub fn memory_may_move(&self, tunables: &Tunables) -> bool {
// Shared memories cannot ever relocate their base pointer so the
// settings configured in the engine must be appropriate for them ahead
// of time.
if self.shared {
return false;
}

// If movement is disallowed in engine configuration, then the answer is
// "no".
if !tunables.memory_may_move {
return false;
}

// If the maximum size of this memory is above the threshold of the
// initial memory reservation then the memory may move.
let max = self.maximum_byte_size().unwrap_or(u64::MAX);
max > tunables.memory_reservation
}
}

#[derive(Copy, Clone, Debug)]
Expand Down
26 changes: 20 additions & 6 deletions crates/fuzzing/src/generators/config.rs
Original file line number Diff line number Diff line change
Expand Up @@ -496,15 +496,20 @@ impl WasmtimeConfig {
self.debug_info = false;
}

// Forcibly don't use the `CustomUnaligned` memory configuration when
// wasm threads are enabled or when the pooling allocator is used. For
// the pooling allocator it doesn't use custom memory creators anyway
// and for wasm threads that will require some refactoring of the
// `LinearMemory` trait to bubble up the request that the linear memory
// not move. Otherwise that just generates a panic right now.
if config.threads_enabled || matches!(self.strategy, InstanceAllocationStrategy::Pooling(_))
{
self.avoid_custom_unaligned_memory(u)?;
}

// If using the pooling allocator, constrain the memory and module configurations
// to the module limits.
if let InstanceAllocationStrategy::Pooling(pooling) = &mut self.strategy {
// Forcibly don't use the `CustomUnaligned` memory configuration
// with the pooling allocator active.
if let MemoryConfig::CustomUnaligned = self.memory_config {
self.memory_config = MemoryConfig::Normal(u.arbitrary()?);
}

// If the pooling allocator is used, do not allow shared memory to
// be created. FIXME: see
// https://github.com/bytecodealliance/wasmtime/issues/4244.
Expand Down Expand Up @@ -578,6 +583,15 @@ impl WasmtimeConfig {
Ok(())
}

/// Helper to switch `MemoryConfig::CustomUnaligned` to
/// `MemoryConfig::Normal`
fn avoid_custom_unaligned_memory(&mut self, u: &mut Unstructured<'_>) -> arbitrary::Result<()> {
if let MemoryConfig::CustomUnaligned = self.memory_config {
self.memory_config = MemoryConfig::Normal(u.arbitrary()?);
}
Ok(())
}

/// Helper method to handle some dependencies between various configuration
/// options. This is intended to be called whenever a `Config` is created or
/// modified to ensure that the final result is an instantiable `Config`.
Expand Down
11 changes: 1 addition & 10 deletions crates/fuzzing/src/generators/module.rs
Original file line number Diff line number Diff line change
Expand Up @@ -39,10 +39,7 @@ impl<'a> Arbitrary<'a> for ModuleConfig {
config.custom_page_sizes_enabled = u.arbitrary()?;
config.wide_arithmetic_enabled = u.arbitrary()?;
config.memory64_enabled = u.ratio(1, 20)?;
// Allow the threads proposal if memory64 is not already enabled. FIXME:
// to allow threads and memory64 to coexist, see
// https://github.com/bytecodealliance/wasmtime/issues/4267.
config.threads_enabled = !config.memory64_enabled && u.ratio(1, 20)?;
config.threads_enabled = u.ratio(1, 20)?;
// Allow multi-memory but make it unlikely
if u.ratio(1, 20)? {
config.max_memories = config.max_memories.max(2);
Expand All @@ -56,12 +53,6 @@ impl<'a> Arbitrary<'a> for ModuleConfig {
// do that most of the time.
config.disallow_traps = u.ratio(9, 10)?;

// FIXME(#9523) this `if` should ideally be deleted, requires some
// refactoring internally.
if config.custom_page_sizes_enabled && config.threads_enabled {
config.custom_page_sizes_enabled = false;
}

Ok(ModuleConfig { config })
}
}
Expand Down
7 changes: 7 additions & 0 deletions crates/wasmtime/src/runtime/trampoline/memory.rs
Original file line number Diff line number Diff line change
Expand Up @@ -114,6 +114,13 @@ impl RuntimeLinearMemory for LinearMemoryProxy {
fn wasm_accessible(&self) -> Range<usize> {
self.mem.wasm_accessible()
}

fn memory_may_move(&self) -> bool {
// FIXME(#9568): should propagate this up to the `LinearMemory` trait,
// but for now pessimistically assume that consumers might move linear
// memory.
true
}
}

#[derive(Clone)]
Expand Down
54 changes: 43 additions & 11 deletions crates/wasmtime/src/runtime/vm/memory.rs
Original file line number Diff line number Diff line change
Expand Up @@ -71,6 +71,10 @@ pub trait RuntimeLinearMemory: Send + Sync {
/// Returns `None` if the memory is unbounded.
fn maximum_byte_size(&self) -> Option<usize>;

/// Returns whether the base pointer of this memory may be relocated over
/// time to accommodate requests for growth.
fn memory_may_move(&self) -> bool;

/// Grows a memory by `delta_pages`.
///
/// This performs the necessary checks on the growth before delegating to
Expand Down Expand Up @@ -203,6 +207,9 @@ pub struct MmapMemory {
// specified so that the cost of repeated growth is amortized.
extra_to_reserve_on_growth: usize,

// Whether or not this memory is allowed to relocate the base pointer.
memory_may_move: bool,

// Size in bytes of extra guard pages before the start and after the end to
// optimize loads and stores with constant offsets.
pre_guard_size: usize,
Expand Down Expand Up @@ -305,6 +312,7 @@ impl MmapMemory {
offset_guard_size: offset_guard_bytes,
extra_to_reserve_on_growth,
memory_image,
memory_may_move: ty.memory_may_move(tunables),
})
}

Expand Down Expand Up @@ -332,13 +340,21 @@ impl RuntimeLinearMemory for MmapMemory {
self.maximum
}

fn memory_may_move(&self) -> bool {
self.memory_may_move
}

fn grow_to(&mut self, new_size: usize) -> Result<()> {
assert!(usize_is_multiple_of_host_page_size(self.offset_guard_size));
assert!(usize_is_multiple_of_host_page_size(self.pre_guard_size));
assert!(usize_is_multiple_of_host_page_size(self.mmap.len()));

let new_accessible = round_usize_up_to_host_pages(new_size)?;
if new_accessible > self.mmap.len() - self.offset_guard_size - self.pre_guard_size {
if !self.memory_may_move {
bail!("disallowing growth as base pointer of memory cannot move");
}

// If the new size of this heap exceeds the current size of the
// allocation we have, then this must be a dynamic heap. Use
// `new_size` to calculate a new size of an allocation, allocate it,
Expand Down Expand Up @@ -507,6 +523,10 @@ impl RuntimeLinearMemory for StaticMemory {
Some(self.capacity)
}

fn memory_may_move(&self) -> bool {
false
}

fn grow_to(&mut self, new_byte_size: usize) -> Result<()> {
// Never exceed the static memory size; this check should have been made
// prior to arriving here.
Expand Down Expand Up @@ -556,10 +576,27 @@ impl Memory {
let (minimum, maximum) = Self::limit_new(ty, Some(store))?;
let allocation = creator.new_memory(ty, tunables, minimum, maximum, memory_image)?;
let allocation = if ty.shared {
Box::new(SharedMemory::wrap(ty, tunables, allocation)?)
Box::new(SharedMemory::wrap(ty, allocation)?)
} else {
allocation
};

// Double-check that the created memory respects the safety invariant of
// whether the memory may move or not at runtime.
//
// * If the memory is allowed to move, that's ok.
// * If the allocation doesn't allow the memory to move, that's ok.
// * If the heap has a static size meaning the min is the same as the
// max, that's ok since it'll never be requested to move.
//
// Otherwise something went wrong so trigger an assert.
assert!(
ty.memory_may_move(tunables)
|| !allocation.memory_may_move()
|| ty.static_heap_size().is_some(),
"this linear memory should not be able to move its base pointer",
);

Ok(Memory(allocation))
}

Expand Down Expand Up @@ -652,16 +689,11 @@ impl Memory {
// informing the limiter is lossy and may not be 100% accurate, but for
// now the expected uses of limiter means that's ok.
if let Some(store) = store {
// We ignore the store limits for shared memories since they are
// technically not created within a store (though, trickily, they
// may be associated with one in order to get a `vmctx`).
if !ty.shared {
if !store.memory_growing(0, minimum.unwrap_or(absolute_max), maximum)? {
bail!(
"memory minimum size of {} pages exceeds memory limits",
ty.limits.min
);
}
if !store.memory_growing(0, minimum.unwrap_or(absolute_max), maximum)? {
bail!(
"memory minimum size of {} pages exceeds memory limits",
ty.limits.min
);
}
}

Expand Down
15 changes: 9 additions & 6 deletions crates/wasmtime/src/runtime/vm/threads/shared_memory.rs
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@ use std::ops::Range;
use std::sync::atomic::{AtomicU32, AtomicU64, Ordering};
use std::sync::{Arc, RwLock};
use std::time::{Duration, Instant};
use wasmtime_environ::{MemoryStyle, Trap, Tunables};
use wasmtime_environ::{Trap, Tunables};

/// For shared memory (and only for shared memory), this lock-version restricts
/// access when growing the memory or checking its size. This is to conform with
Expand All @@ -33,21 +33,19 @@ impl SharedMemory {
pub fn new(ty: &wasmtime_environ::Memory, tunables: &Tunables) -> Result<Self> {
let (minimum_bytes, maximum_bytes) = Memory::limit_new(ty, None)?;
let mmap_memory = MmapMemory::new(ty, tunables, minimum_bytes, maximum_bytes, None)?;
Self::wrap(ty, tunables, Box::new(mmap_memory))
Self::wrap(ty, Box::new(mmap_memory))
}

/// Wrap an existing [Memory] with the locking provided by a [SharedMemory].
pub fn wrap(
ty: &wasmtime_environ::Memory,
tunables: &Tunables,
mut memory: Box<dyn RuntimeLinearMemory>,
) -> Result<Self> {
if !ty.shared {
bail!("shared memory must have a `shared` memory type");
}
let style = MemoryStyle::for_memory(*ty, tunables);
if !matches!(style, MemoryStyle::Static { .. }) {
bail!("shared memory can only be built from a static memory allocation")
if memory.memory_may_move() {
bail!("shared memory cannot use a memory which may relocate the base pointer")
}
assert!(
memory.as_any_mut().type_id() != std::any::TypeId::of::<SharedMemory>(),
Expand Down Expand Up @@ -233,4 +231,9 @@ impl RuntimeLinearMemory for SharedMemory {
fn wasm_accessible(&self) -> Range<usize> {
self.0.memory.read().unwrap().wasm_accessible()
}

fn memory_may_move(&self) -> bool {
debug_assert!(!self.0.memory.read().unwrap().memory_may_move());
false
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,6 @@ pub enum SharedMemory {}
impl SharedMemory {
pub fn wrap(
_ty: &wasmtime_environ::Memory,
_tunables: &Tunables,
_memory: Box<dyn RuntimeLinearMemory>,
) -> Result<Self> {
bail!("support for shared memories was disabled at compile time");
Expand Down Expand Up @@ -101,4 +100,8 @@ impl RuntimeLinearMemory for SharedMemory {
fn wasm_accessible(&self) -> Range<usize> {
match *self {}
}

fn memory_may_move(&self) -> bool {
match *self {}
}
}
32 changes: 32 additions & 0 deletions tests/disas/readonly-heap-base-pointer1.wat
Original file line number Diff line number Diff line change
@@ -0,0 +1,32 @@
;;! test = "optimize"
;;! target = "x86_64"
;;! flags = ["-Omemory-reservation=0x20000", "-Omemory-may-move=n"]

(module
(memory 1)
(func $load (param i32) (result i32)
(i32.load (local.get 0)))
)
;; function u0:0(i64 vmctx, i64, i32) -> i32 tail {
;; gv0 = vmctx
;; gv1 = load.i64 notrap aligned readonly gv0+8
;; gv2 = load.i64 notrap aligned gv1
;; gv3 = vmctx
;; gv4 = load.i64 notrap aligned gv3+104
;; gv5 = load.i64 notrap aligned readonly checked gv3+96
;; stack_limit = gv2
;;
;; block0(v0: i64, v1: i64, v2: i32):
;; @0020 v4 = uextend.i64 v2
;; @0020 v5 = iconst.i64 0x0001_fffc
;; @0020 v6 = icmp ugt v4, v5 ; v5 = 0x0001_fffc
;; @0020 v9 = iconst.i64 0
;; @0020 v7 = load.i64 notrap aligned readonly checked v0+96
;; @0020 v8 = iadd v7, v4
;; @0020 v10 = select_spectre_guard v6, v9, v8 ; v9 = 0
;; @0020 v11 = load.i32 little heap v10
;; @0023 jump block1
;;
;; block1:
;; @0023 return v11
;; }
34 changes: 34 additions & 0 deletions tests/disas/readonly-heap-base-pointer2.wat
Original file line number Diff line number Diff line change
@@ -0,0 +1,34 @@
;;! test = "optimize"
;;! target = "x86_64"
;;! flags = ["-Omemory-reservation=0x20000"]

(module
(memory 1 200 shared)
(func $load (param i32) (result i32)
(i32.load (local.get 0)))
)
;; function u0:0(i64 vmctx, i64, i32) -> i32 tail {
;; gv0 = vmctx
;; gv1 = load.i64 notrap aligned readonly gv0+8
;; gv2 = load.i64 notrap aligned gv1
;; gv3 = vmctx
;; gv4 = load.i64 notrap aligned readonly gv3+88
;; gv5 = load.i64 notrap aligned gv4+8
;; gv6 = load.i64 notrap aligned readonly checked gv4
;; stack_limit = gv2
;;
;; block0(v0: i64, v1: i64, v2: i32):
;; @0022 v12 = load.i64 notrap aligned readonly v0+88
;; @0022 v5 = load.i64 notrap aligned v12+8
;; @0022 v4 = uextend.i64 v2
;; @0022 v6 = icmp ugt v4, v5
;; @0022 v9 = iconst.i64 0
;; @0022 v7 = load.i64 notrap aligned readonly checked v12
;; @0022 v8 = iadd v7, v4
;; @0022 v10 = select_spectre_guard v6, v9, v8 ; v9 = 0
;; @0022 v11 = load.i32 little heap v10
;; @0025 jump block1
;;
;; block1:
;; @0025 return v11
;; }
Loading

0 comments on commit cb16230

Please sign in to comment.