diff --git a/benches/memory_mapping.rs b/benches/memory_mapping.rs index dab6cbd5..f70efdf3 100644 --- a/benches/memory_mapping.rs +++ b/benches/memory_mapping.rs @@ -12,7 +12,7 @@ extern crate test; use rand::{rngs::SmallRng, Rng, SeedableRng}; use solana_rbpf::{ - memory_region::{AccessType, MemoryMapping, MemoryRegion}, + memory_region::{AccessType, AlignedMemoryMapping, MemoryRegion, UnalignedMemoryMapping}, user_error::UserError, vm::Config, }; @@ -23,8 +23,7 @@ fn generate_memory_regions( is_writable: bool, mut prng: Option<&mut SmallRng>, ) -> (Vec, u64) { - let mut memory_regions = Vec::with_capacity(entries + 1); - memory_regions.push(MemoryRegion::default()); + let mut memory_regions = Vec::with_capacity(entries); let mut offset = 0x100000000; for _ in 0..entries { let length = match &mut prng { @@ -55,157 +54,226 @@ fn bench_prng(bencher: &mut Bencher) { bencher.iter(|| prng.gen::()); } -#[bench] -fn bench_gapped_randomized_access_with_1024_entries(bencher: &mut Bencher) { - let frame_size: u64 = 2; - let frame_count: u64 = 1024; - let content = vec![0; (frame_size * frame_count * 2) as usize]; - let memory_regions = vec![ - MemoryRegion::default(), - MemoryRegion::new_for_testing(&content[..], 0x100000000, frame_size, false), - ]; - let config = Config::default(); - let memory_mapping = MemoryMapping::new::(memory_regions, &config).unwrap(); - let mut prng = new_prng!(); - bencher.iter(|| { - assert!(memory_mapping - .map::( - AccessType::Load, - 0x100000000 + (prng.gen::() % frame_count * (frame_size * 2)), - 1 - ) - .is_ok()); - }); -} - -#[bench] -fn bench_randomized_access_with_0001_entry(bencher: &mut Bencher) { - let content = vec![0; 1024 * 2]; - let memory_regions = vec![ - MemoryRegion::default(), - MemoryRegion::new_readonly(&content[..], 0x100000000), - ]; - let config = Config::default(); - let memory_mapping = MemoryMapping::new::(memory_regions, &config).unwrap(); - let mut prng = new_prng!(); - bencher.iter(|| { - let _ = memory_mapping.map::( - AccessType::Load, - 0x100000000 + (prng.gen::() % content.len() as u64), - 1, - ); - }); -} - -#[bench] -fn bench_randomized_mapping_access_with_0004_entries(bencher: &mut Bencher) { - let mut prng = new_prng!(); - let (memory_regions, end_address) = generate_memory_regions(4, false, Some(&mut prng)); - let config = Config::default(); - let memory_mapping = MemoryMapping::new::(memory_regions, &config).unwrap(); - bencher.iter(|| { - let _ = memory_mapping.map::( - AccessType::Load, - 0x100000000 + (prng.gen::() % end_address), - 1, - ); - }); -} - -#[bench] -fn bench_randomized_mapping_access_with_0016_entries(bencher: &mut Bencher) { - let mut prng = new_prng!(); - let (memory_regions, end_address) = generate_memory_regions(16, false, Some(&mut prng)); - let config = Config::default(); - let memory_mapping = MemoryMapping::new::(memory_regions, &config).unwrap(); - bencher.iter(|| { - let _ = memory_mapping.map::( - AccessType::Load, - 0x100000000 + (prng.gen::() % end_address), - 1, +macro_rules! bench_gapped_randomized_access_with_1024_entries { + (do_bench, $name:ident, $mem:tt) => { + #[bench] + fn $name(bencher: &mut Bencher) { + let frame_size: u64 = 2; + let frame_count: u64 = 1024; + let content = vec![0; (frame_size * frame_count * 2) as usize]; + let memory_regions = vec![MemoryRegion::new_for_testing( + &content[..], + 0x100000000, + frame_size, + false, + )]; + bencher.bench(|bencher| { + let config = Config::default(); + let memory_mapping = + $mem::new::(memory_regions.clone(), &config).unwrap(); + let mut prng = new_prng!(); + bencher.iter(|| { + assert!(memory_mapping + .map::( + AccessType::Load, + 0x100000000 + (prng.gen::() % frame_count * (frame_size * 2)), + 1 + ) + .is_ok()); + }); + }); + } + }; + () => { + bench_gapped_randomized_access_with_1024_entries!( + do_bench, + bench_gapped_randomized_access_with_1024_entries_aligned, + AlignedMemoryMapping ); - }); -} - -#[bench] -fn bench_randomized_mapping_access_with_0064_entries(bencher: &mut Bencher) { - let mut prng = new_prng!(); - let (memory_regions, end_address) = generate_memory_regions(64, false, Some(&mut prng)); - let config = Config::default(); - let memory_mapping = MemoryMapping::new::(memory_regions, &config).unwrap(); - bencher.iter(|| { - let _ = memory_mapping.map::( - AccessType::Load, - 0x100000000 + (prng.gen::() % end_address), - 1, + bench_gapped_randomized_access_with_1024_entries!( + do_bench, + bench_gapped_randomized_access_with_1024_entries_unaligned, + UnalignedMemoryMapping ); - }); + }; } +bench_gapped_randomized_access_with_1024_entries!(); -#[bench] -fn bench_randomized_mapping_access_with_0256_entries(bencher: &mut Bencher) { - let mut prng = new_prng!(); - let (memory_regions, end_address) = generate_memory_regions(256, false, Some(&mut prng)); - let config = Config::default(); - let memory_mapping = MemoryMapping::new::(memory_regions, &config).unwrap(); - bencher.iter(|| { - let _ = memory_mapping.map::( - AccessType::Load, - 0x100000000 + (prng.gen::() % end_address), - 1, +macro_rules! bench_randomized_access_with_0001_entry { + (do_bench, $name:ident, $mem:tt) => { + #[bench] + fn $name(bencher: &mut Bencher) { + let content = vec![0; 1024 * 2]; + let memory_regions = vec![MemoryRegion::new_readonly(&content[..], 0x100000000)]; + let config = Config::default(); + let memory_mapping = $mem::new::(memory_regions, &config).unwrap(); + let mut prng = new_prng!(); + bencher.iter(|| { + let _ = memory_mapping.map::( + AccessType::Load, + 0x100000000 + (prng.gen::() % content.len() as u64), + 1, + ); + }); + } + }; + () => { + bench_randomized_access_with_0001_entry!( + do_bench, + bench_randomized_access_with_0001_entry_aligned, + AlignedMemoryMapping ); - }); -} - -#[bench] -fn bench_randomized_mapping_access_with_1024_entries(bencher: &mut Bencher) { - let mut prng = new_prng!(); - let (memory_regions, end_address) = generate_memory_regions(1024, false, Some(&mut prng)); - let config = Config::default(); - let memory_mapping = MemoryMapping::new::(memory_regions, &config).unwrap(); - bencher.iter(|| { - let _ = memory_mapping.map::( - AccessType::Load, - 0x100000000 + (prng.gen::() % end_address), - 1, + bench_randomized_access_with_0001_entry!( + do_bench, + bench_randomized_access_with_0001_entry_unaligned, + UnalignedMemoryMapping ); - }); + }; } +bench_randomized_access_with_0001_entry!(); -#[bench] -fn bench_randomized_access_with_1024_entries(bencher: &mut Bencher) { - let mut prng = new_prng!(); - let (memory_regions, end_address) = generate_memory_regions(1024, false, None); - let config = Config::default(); - let memory_mapping = MemoryMapping::new::(memory_regions, &config).unwrap(); - bencher.iter(|| { - let _ = memory_mapping.map::( - AccessType::Load, - 0x100000000 + (prng.gen::() % end_address), - 1, - ); - }); +macro_rules! bench_randomized_access_with_n_entries { + (do_bench, $name:ident, $mem:tt, $n:expr) => { + #[bench] + fn $name(bencher: &mut Bencher) { + let mut prng = new_prng!(); + let (memory_regions, end_address) = generate_memory_regions($n, false, Some(&mut prng)); + let config = Config::default(); + let memory_mapping = $mem::new::(memory_regions, &config).unwrap(); + bencher.iter(|| { + let _ = memory_mapping.map::( + AccessType::Load, + 0x100000000 + (prng.gen::() % end_address), + 1, + ); + }); + } + }; + ($n:expr, $aligned:ident, $unaligned:ident) => { + bench_randomized_access_with_n_entries!(do_bench, $aligned, AlignedMemoryMapping, $n); + bench_randomized_access_with_n_entries!(do_bench, $unaligned, UnalignedMemoryMapping, $n); + }; } +bench_randomized_access_with_n_entries!( + 4, + bench_randomized_access_with_0004_entries_aligned, + bench_randomized_access_with_0004_entries_unaligned +); +bench_randomized_access_with_n_entries!( + 16, + bench_randomized_access_with_0016_entries_aligned, + bench_randomized_access_with_0016_entries_unaligned +); +bench_randomized_access_with_n_entries!( + 64, + bench_randomized_access_with_0064_entries_aligned, + bench_randomized_access_with_0064_entries_unaligned +); +bench_randomized_access_with_n_entries!( + 256, + bench_randomized_access_with_0256_entries_aligned, + bench_randomized_access_with_0256_entries_unaligned +); +bench_randomized_access_with_n_entries!( + 1024, + bench_randomized_access_with_1024_entries_aligned, + bench_randomized_access_with_1024_entries_unaligned +); -#[bench] -fn bench_randomized_mapping_with_1024_entries(bencher: &mut Bencher) { - let mut prng = new_prng!(); - let (memory_regions, _end_address) = generate_memory_regions(1024, false, Some(&mut prng)); - let config = Config::default(); - let memory_mapping = MemoryMapping::new::(memory_regions, &config).unwrap(); - bencher.iter(|| { - let _ = memory_mapping.map::(AccessType::Load, 0x100000000, 1); - }); +macro_rules! bench_randomized_mapping_with_n_entries { + (do_bench, $name:ident, $mem:tt, $n:expr) => { + #[bench] + fn $name(bencher: &mut Bencher) { + let mut prng = new_prng!(); + let (memory_regions, _end_address) = + generate_memory_regions($n, false, Some(&mut prng)); + let config = Config::default(); + let memory_mapping = $mem::new::(memory_regions, &config).unwrap(); + bencher.iter(|| { + let _ = memory_mapping.map::(AccessType::Load, 0x100000000, 1); + }); + } + }; + ($n:expr, $aligned:ident, $unaligned:ident) => { + bench_randomized_mapping_with_n_entries!(do_bench, $aligned, AlignedMemoryMapping, $n); + bench_randomized_mapping_with_n_entries!(do_bench, $unaligned, UnalignedMemoryMapping, $n); + }; } +bench_randomized_mapping_with_n_entries!( + 1, + bench_randomized_mapping_with_001_entries_aligned, + bench_randomized_mapping_with_001_entries_unaligned +); +bench_randomized_mapping_with_n_entries!( + 4, + bench_randomized_mapping_with_004_entries_aligned, + bench_randomized_mapping_with_004_entries_unaligned +); +bench_randomized_mapping_with_n_entries!( + 16, + bench_randomized_mapping_with_0016_entries_aligned, + bench_randomized_mapping_with_0016_entries_unaligned +); +bench_randomized_mapping_with_n_entries!( + 64, + bench_randomized_mapping_with_0064_entries_aligned, + bench_randomized_mapping_with_0064_entries_unaligned +); +bench_randomized_mapping_with_n_entries!( + 256, + bench_randomized_mapping_with_0256_entries_aligned, + bench_randomized_mapping_with_0256_entries_unaligned +); +bench_randomized_mapping_with_n_entries!( + 1024, + bench_randomized_mapping_with_1024_entries_aligned, + bench_randomized_mapping_with_1024_entries_unaligned +); -#[bench] -fn bench_mapping_with_1024_entries(bencher: &mut Bencher) { - let (memory_regions, _end_address) = generate_memory_regions(1024, false, None); - let config = Config::default(); - let memory_mapping = MemoryMapping::new::(memory_regions, &config).unwrap(); - bencher.iter(|| { - assert!(memory_mapping - .map::(AccessType::Load, 0x100000000, 1) - .is_ok()); - }); +macro_rules! bench_mapping_with_n_entries { + (do_bench, $name:ident, $mem:tt, $n:expr) => { + #[bench] + fn $name(bencher: &mut Bencher) { + let (memory_regions, _end_address) = generate_memory_regions($n, false, None); + let config = Config::default(); + let memory_mapping = $mem::new::(memory_regions, &config).unwrap(); + bencher.iter(|| { + let _ = memory_mapping.map::(AccessType::Load, 0x100000000, 1); + }); + } + }; + ($n:expr, $aligned:ident, $unaligned:ident) => { + bench_mapping_with_n_entries!(do_bench, $aligned, AlignedMemoryMapping, $n); + bench_mapping_with_n_entries!(do_bench, $unaligned, UnalignedMemoryMapping, $n); + }; } +bench_mapping_with_n_entries!( + 1, + bench_mapping_with_001_entries_aligned, + bench_mapping_with_001_entries_unaligned +); +bench_mapping_with_n_entries!( + 4, + bench_mapping_with_004_entries_aligned, + bench_mapping_with_004_entries_unaligned +); +bench_mapping_with_n_entries!( + 16, + bench_mapping_with_0016_entries_aligned, + bench_mapping_with_0016_entries_unaligned +); +bench_mapping_with_n_entries!( + 64, + bench_mapping_with_0064_entries_aligned, + bench_mapping_with_0064_entries_unaligned +); +bench_mapping_with_n_entries!( + 256, + bench_mapping_with_0256_entries_aligned, + bench_mapping_with_0256_entries_unaligned +); +bench_mapping_with_n_entries!( + 1024, + bench_mapping_with_1024_entries_aligned, + bench_mapping_with_1024_entries_unaligned +); diff --git a/src/jit.rs b/src/jit.rs index d2ba8d6f..7ef30ba9 100644 --- a/src/jit.rs +++ b/src/jit.rs @@ -17,8 +17,7 @@ extern crate libc; use std::{ - fmt::{Debug, Error as FormatterError, Formatter}, - mem, + fmt::{Debug, Error as FormatterError, Formatter}, mem, ops::{Index, IndexMut}, ptr, }; @@ -29,7 +28,7 @@ use crate::{ vm::{Config, ProgramResult, InstructionMeter, Tracer, ProgramEnvironment}, ebpf::{self, INSN_SIZE, FIRST_SCRATCH_REG, SCRATCH_REGS, FRAME_PTR_REG, MM_STACK_START, STACK_PTR_REG}, error::{UserDefinedError, EbpfError}, - memory_region::{AccessType, MemoryMapping, MemoryRegion}, + memory_region::{AccessType, MemoryMapping}, user_error::UserError, x86::*, }; @@ -847,6 +846,13 @@ fn emit_set_exception_kind(jit: &mut JitCompiler, err: Ebpf emit_ins(jit, X86Instruction::store_immediate(OperandSize::S64, R10, X86IndirectAccess::Offset((std::mem::size_of::() * jit.err_kind_offset) as i32), err_kind as i64)); } +fn emit_result_is_err(jit: &mut JitCompiler, source: u8, destination: u8, indirect: X86IndirectAccess) { + let ok = Result::>::Ok(0); + let err_kind = unsafe { *(&ok as *const _ as *const u64).add(jit.err_kind_offset) }; + emit_ins(jit, X86Instruction::load(OperandSize::S64, source, destination, indirect)); + emit_ins(jit, X86Instruction::cmp_immediate(OperandSize::S64, destination, err_kind as i64, Some(X86IndirectAccess::Offset(0)))); +} + #[derive(Debug)] struct Jump { location: *const u8, @@ -1512,11 +1518,9 @@ impl JitCompiler { ], Some(ARGUMENT_REGISTERS[0])); emit_ins(self, X86Instruction::store(OperandSize::S64, ARGUMENT_REGISTERS[0], RBP, X86IndirectAccess::Offset(slot_on_environment_stack(self, EnvironmentStackSlot::PrevInsnMeter)))); } + // Test if result indicates that an error occured - let ok = Result::>::Ok(0); - let err_kind = unsafe { *(&ok as *const _ as *const u64).add(self.err_kind_offset) }; - emit_ins(self, X86Instruction::load(OperandSize::S64, RBP, R11, X86IndirectAccess::Offset(slot_on_environment_stack(self, EnvironmentStackSlot::OptRetValPtr)))); - emit_ins(self, X86Instruction::cmp_immediate(OperandSize::S64, R11, err_kind as i64, Some(X86IndirectAccess::Offset(0)))); + emit_result_is_err::(self, RBP, R11, X86IndirectAccess::Offset(slot_on_environment_stack(self, EnvironmentStackSlot::OptRetValPtr))); emit_ins(self, X86Instruction::conditional_jump_immediate(0x85, self.relative_to_anchor(ANCHOR_RUST_EXCEPTION, 6))); // Store Ok value in result register emit_ins(self, X86Instruction::pop(R11)); @@ -1607,6 +1611,12 @@ impl JitCompiler { emit_ins(self, X86Instruction::pop(REGISTER_MAP[0])); // Restore REGISTER_MAP[0] emit_ins(self, X86Instruction::return_near()); + self.set_anchor(ANCHOR_MEMORY_ACCESS_VIOLATION); + emit_ins(self, X86Instruction::alu(OperandSize::S64, 0x81, 0, RSP, 8, None)); + emit_ins(self, X86Instruction::pop(R11)); // Put callers PC in R11 + emit_ins(self, X86Instruction::call_immediate(self.relative_to_anchor(ANCHOR_TRANSLATE_PC, 5))); + emit_ins(self, X86Instruction::jump_immediate(self.relative_to_anchor(ANCHOR_EXCEPTION_AT, 5))); + // Translates a vm memory address to a host memory address for (access_type, len) in &[ (AccessType::Load, 1i32), @@ -1619,73 +1629,25 @@ impl JitCompiler { (AccessType::Store, 8i32), ] { let target_offset = len.trailing_zeros() as usize + 4 * (*access_type as usize); - let stack_offset = if !self.config.dynamic_stack_frames && self.config.enable_stack_frame_gaps { - 24 - } else { - 16 - }; - - self.set_anchor(ANCHOR_MEMORY_ACCESS_VIOLATION + target_offset); - emit_ins(self, X86Instruction::alu(OperandSize::S64, 0x31, R11, R11, 0, None)); // R11 = 0; - emit_ins(self, X86Instruction::load(OperandSize::S64, RSP, R11, X86IndirectAccess::OffsetIndexShift(stack_offset, R11, 0))); - emit_rust_call(self, Value::Constant64(MemoryMapping::generate_access_violation:: as *const u8 as i64, false), &[ + self.set_anchor(ANCHOR_TRANSLATE_MEMORY_ADDRESS + target_offset); + emit_ins(self, X86Instruction::push(R11, None)); + // call MemoryMapping::map() storing the result in EnvironmentStackSlot::OptRetValPtr + emit_rust_call(self, Value::Constant64(MemoryMapping::map:: as *const u8 as i64, false), &[ Argument { index: 3, value: Value::Register(R11) }, // Specify first as the src register could be overwritten by other arguments Argument { index: 4, value: Value::Constant64(*len as i64, false) }, Argument { index: 2, value: Value::Constant64(*access_type as i64, false) }, Argument { index: 1, value: Value::RegisterPlusConstant32(R10, ProgramEnvironment::MEMORY_MAPPING_OFFSET as i32 + self.program_argument_key, false) }, // jit_program_argument.memory_mapping Argument { index: 0, value: Value::RegisterIndirect(RBP, slot_on_environment_stack(self, EnvironmentStackSlot::OptRetValPtr), false) }, // Pointer to optional typed return value ], None); - emit_ins(self, X86Instruction::alu(OperandSize::S64, 0x81, 0, RSP, stack_offset as i64 + 8, None)); // Drop R11, RAX, RCX, RDX from stack - emit_ins(self, X86Instruction::pop(R11)); // Put callers PC in R11 - emit_ins(self, X86Instruction::call_immediate(self.relative_to_anchor(ANCHOR_TRANSLATE_PC, 5))); - emit_ins(self, X86Instruction::jump_immediate(self.relative_to_anchor(ANCHOR_EXCEPTION_AT, 5))); - self.set_anchor(ANCHOR_TRANSLATE_MEMORY_ADDRESS + target_offset); - emit_ins(self, X86Instruction::push(R11, None)); - emit_ins(self, X86Instruction::push(RAX, None)); - emit_ins(self, X86Instruction::push(RCX, None)); - if !self.config.dynamic_stack_frames && self.config.enable_stack_frame_gaps { - emit_ins(self, X86Instruction::push(RDX, None)); - } - emit_ins(self, X86Instruction::mov(OperandSize::S64, R11, RAX)); // RAX = vm_addr; - emit_ins(self, X86Instruction::alu(OperandSize::S64, 0xc1, 5, RAX, ebpf::VIRTUAL_ADDRESS_BITS as i64, None)); // RAX >>= ebpf::VIRTUAL_ADDRESS_BITS; - emit_ins(self, X86Instruction::cmp(OperandSize::S64, RAX, R10, Some(X86IndirectAccess::Offset(self.program_argument_key + 8)))); // region_index >= jit_program_argument.memory_mapping.regions.len() - emit_ins(self, X86Instruction::conditional_jump_immediate(0x86, self.relative_to_anchor(ANCHOR_MEMORY_ACCESS_VIOLATION + target_offset, 6))); - debug_assert_eq!(1 << 5, mem::size_of::()); - emit_ins(self, X86Instruction::alu(OperandSize::S64, 0xc1, 4, RAX, 5, None)); // RAX *= mem::size_of::(); - emit_ins(self, X86Instruction::alu(OperandSize::S64, 0x03, RAX, R10, 0, Some(X86IndirectAccess::Offset(self.program_argument_key)))); // region = &jit_program_argument.memory_mapping.regions[region_index]; - if *access_type == AccessType::Store { - emit_ins(self, X86Instruction::cmp_immediate(OperandSize::S8, RAX, 0, Some(X86IndirectAccess::Offset(MemoryRegion::IS_WRITABLE_OFFSET)))); // region.is_writable == 0 - emit_ins(self, X86Instruction::conditional_jump_immediate(0x84, self.relative_to_anchor(ANCHOR_MEMORY_ACCESS_VIOLATION + target_offset, 6))); - } - emit_ins(self, X86Instruction::load(OperandSize::S64, RAX, RCX, X86IndirectAccess::Offset(MemoryRegion::VM_ADDR_OFFSET))); // RCX = region.vm_addr - emit_ins(self, X86Instruction::cmp(OperandSize::S64, RCX, R11, None)); // vm_addr < region.vm_addr - emit_ins(self, X86Instruction::conditional_jump_immediate(0x82, self.relative_to_anchor(ANCHOR_MEMORY_ACCESS_VIOLATION + target_offset, 6))); - emit_ins(self, X86Instruction::alu(OperandSize::S64, 0x29, RCX, R11, 0, None)); // vm_addr -= region.vm_addr - if !self.config.dynamic_stack_frames && self.config.enable_stack_frame_gaps { - emit_ins(self, X86Instruction::load(OperandSize::S8, RAX, RCX, X86IndirectAccess::Offset(MemoryRegion::VM_GAP_SHIFT_OFFSET))); // RCX = region.vm_gap_shift; - emit_ins(self, X86Instruction::mov(OperandSize::S64, R11, RDX)); // RDX = R11; - emit_ins(self, X86Instruction::alu(OperandSize::S64, 0xd3, 5, RDX, 0, None)); // RDX = R11 >> region.vm_gap_shift; - emit_ins(self, X86Instruction::test_immediate(OperandSize::S64, RDX, 1, None)); // (RDX & 1) != 0 - emit_ins(self, X86Instruction::conditional_jump_immediate(0x85, self.relative_to_anchor(ANCHOR_MEMORY_ACCESS_VIOLATION + target_offset, 6))); - emit_ins(self, X86Instruction::load_immediate(OperandSize::S64, RDX, -1)); // RDX = -1; - emit_ins(self, X86Instruction::alu(OperandSize::S64, 0xd3, 4, RDX, 0, None)); // gap_mask = -1 << region.vm_gap_shift; - emit_ins(self, X86Instruction::mov(OperandSize::S64, RDX, RCX)); // RCX = RDX; - emit_ins(self, X86Instruction::alu(OperandSize::S64, 0xf7, 2, RCX, 0, None)); // inverse_gap_mask = !gap_mask; - emit_ins(self, X86Instruction::alu(OperandSize::S64, 0x21, R11, RCX, 0, None)); // below_gap = R11 & inverse_gap_mask; - emit_ins(self, X86Instruction::alu(OperandSize::S64, 0x21, RDX, R11, 0, None)); // above_gap = R11 & gap_mask; - emit_ins(self, X86Instruction::alu(OperandSize::S64, 0xc1, 5, R11, 1, None)); // above_gap >>= 1; - emit_ins(self, X86Instruction::alu(OperandSize::S64, 0x09, RCX, R11, 0, None)); // gapped_offset = above_gap | below_gap; - } - emit_ins(self, X86Instruction::lea(OperandSize::S64, R11, RCX, Some(X86IndirectAccess::Offset(*len)))); // RCX = R11 + len; - emit_ins(self, X86Instruction::cmp(OperandSize::S64, RCX, RAX, Some(X86IndirectAccess::Offset(MemoryRegion::LEN_OFFSET)))); // region.len < R11 + len - emit_ins(self, X86Instruction::conditional_jump_immediate(0x82, self.relative_to_anchor(ANCHOR_MEMORY_ACCESS_VIOLATION + target_offset, 6))); - emit_ins(self, X86Instruction::alu(OperandSize::S64, 0x03, R11, RAX, 0, Some(X86IndirectAccess::Offset(MemoryRegion::HOST_ADDR_OFFSET)))); // R11 += region.host_addr; - if !self.config.dynamic_stack_frames && self.config.enable_stack_frame_gaps { - emit_ins(self, X86Instruction::pop(RDX)); - } - emit_ins(self, X86Instruction::pop(RCX)); - emit_ins(self, X86Instruction::pop(RAX)); + // Throw error if the result indicates one + emit_result_is_err::(self, RBP, R11, X86IndirectAccess::Offset(slot_on_environment_stack(self, EnvironmentStackSlot::OptRetValPtr))); + emit_ins(self, X86Instruction::conditional_jump_immediate(0x85, self.relative_to_anchor(ANCHOR_MEMORY_ACCESS_VIOLATION, 6))); + + // unwrap() the host addr into R11 + emit_ins(self, X86Instruction::load(OperandSize::S64, RBP, R11, X86IndirectAccess::Offset(slot_on_environment_stack(self, EnvironmentStackSlot::OptRetValPtr)))); + emit_ins(self, X86Instruction::load(OperandSize::S64, R11, R11, X86IndirectAccess::Offset(8))); + emit_ins(self, X86Instruction::alu(OperandSize::S64, 0x81, 0, RSP, 8, None)); emit_ins(self, X86Instruction::return_near()); } @@ -1779,7 +1741,7 @@ mod tests { ) .unwrap() } - + #[test] fn test_code_length_estimate() { const INSTRUCTION_COUNT: usize = 256; diff --git a/src/memory_region.rs b/src/memory_region.rs index c7b32e6e..36559327 100644 --- a/src/memory_region.rs +++ b/src/memory_region.rs @@ -5,7 +5,7 @@ use crate::{ error::{EbpfError, UserDefinedError}, vm::Config, }; -use std::fmt; +use std::{array, cell::UnsafeCell, fmt, ops::Range}; /* Explaination of the Gapped Memory @@ -39,17 +39,8 @@ pub struct MemoryRegion { /// Is also writable (otherwise it is readonly) pub is_writable: bool, } -impl MemoryRegion { - pub(crate) const HOST_ADDR_OFFSET: i32 = 0; - pub(crate) const VM_ADDR_OFFSET: i32 = - MemoryRegion::HOST_ADDR_OFFSET + std::mem::size_of::() as i32; - pub(crate) const LEN_OFFSET: i32 = - MemoryRegion::VM_ADDR_OFFSET + std::mem::size_of::() as i32; - pub(crate) const VM_GAP_SHIFT_OFFSET: i32 = - MemoryRegion::LEN_OFFSET + std::mem::size_of::() as i32; - pub(crate) const IS_WRITABLE_OFFSET: i32 = - MemoryRegion::VM_GAP_SHIFT_OFFSET + std::mem::size_of::() as i32; +impl MemoryRegion { fn new(slice: &[u8], vm_addr: u64, vm_gap_size: u64, is_writable: bool) -> Self { let mut vm_gap_shift = (std::mem::size_of::() as u8) .saturating_mul(8) @@ -122,6 +113,7 @@ impl MemoryRegion { Err(EbpfError::InvalidVirtualAddress(vm_addr)) } } + impl fmt::Debug for MemoryRegion { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!( @@ -155,20 +147,156 @@ pub enum AccessType { Store, } -/// Indirection to use instead of a slice to make handling easier +/// Memory mapping based on eytzinger search. #[derive(Debug)] -pub struct MemoryMapping<'a> { +pub struct UnalignedMemoryMapping<'a> { /// Mapped memory regions regions: Box<[MemoryRegion]>, + /// Copy of the regions vm_addr fields to improve cache density + region_addresses: Box<[u64]>, + /// Cache of the last `MappingCache::SIZE` vm_addr => region_index lookups + cache: UnsafeCell, /// VM configuration config: &'a Config, } -impl<'a> MemoryMapping<'a> { + +impl<'a> UnalignedMemoryMapping<'a> { + fn construct_eytzinger_order( + &mut self, + ascending_regions: &[MemoryRegion], + mut in_index: usize, + out_index: usize, + ) -> usize { + if out_index >= self.regions.len() { + return in_index; + } + in_index = self.construct_eytzinger_order( + ascending_regions, + in_index, + out_index.saturating_mul(2).saturating_add(1), + ); + self.regions[out_index] = ascending_regions[in_index].clone(); + self.region_addresses[out_index] = ascending_regions[in_index].vm_addr; + self.construct_eytzinger_order( + ascending_regions, + in_index.saturating_add(1), + out_index.saturating_mul(2).saturating_add(2), + ) + } + + /// Creates a new MemoryMapping structure from the given regions + pub fn new( + mut regions: Vec, + config: &'a Config, + ) -> Result> { + regions.sort(); + for index in 1..regions.len() { + let first = ®ions[index.saturating_sub(1)]; + let second = ®ions[index]; + if first.vm_addr.saturating_add(first.len) > second.vm_addr { + return Err(EbpfError::InvalidMemoryRegion(index)); + } + } + + let mut result = Self { + regions: vec![MemoryRegion::default(); regions.len()].into_boxed_slice(), + region_addresses: vec![0; regions.len()].into_boxed_slice(), + cache: UnsafeCell::new(MappingCache::new()), + config, + }; + result.construct_eytzinger_order(®ions, 0, 0); + Ok(result) + } + + /// Given a list of regions translate from virtual machine to host address + #[allow(clippy::integer_arithmetic)] + pub fn map( + &self, + access_type: AccessType, + vm_addr: u64, + len: u64, + ) -> Result> { + // Safety: + // &mut references to the mapping cache are only created internally here + // and in replace_region(). The methods never invoke each other and + // UnalignedMemoryMapping is !Sync, so the cache reference below is + // guaranteed to be unique. + let cache = unsafe { &mut *self.cache.get() }; + let (cache_miss, index) = if let Some(region) = cache.find(vm_addr) { + (false, region) + } else { + let mut index = 1; + while index <= self.region_addresses.len() { + // Safety: + // we start the search at index=1 and in the loop condition check + // for index <= len, so bound checks can be avoided + index = (index << 1) + + unsafe { *self.region_addresses.get_unchecked(index - 1) <= vm_addr } + as usize; + } + index >>= index.trailing_zeros() + 1; + if index == 0 { + return generate_access_violation(self.config, access_type, vm_addr, len); + } + (true, index) + }; + + // Safety: + // we check for index==0 above, and by construction if we get here index + // must be contained in region + let region = unsafe { self.regions.get_unchecked(index - 1) }; + if access_type == AccessType::Load || region.is_writable { + if let Ok(host_addr) = region.vm_to_host::(vm_addr, len as u64) { + if cache_miss { + cache.insert( + region.vm_addr..region.vm_addr.saturating_add(region.len), + index, + ); + } + return Ok(host_addr); + } + } + + generate_access_violation(self.config, access_type, vm_addr, len) + } + + /// Returns the `MemoryRegion`s in this mapping + pub fn get_regions(&self) -> &[MemoryRegion] { + &self.regions + } + + /// Replaces the `MemoryRegion` at the given index + pub fn replace_region( + &mut self, + index: usize, + region: MemoryRegion, + ) -> Result<(), EbpfError> { + if index >= self.regions.len() || self.regions[index].vm_addr != region.vm_addr { + return Err(EbpfError::InvalidMemoryRegion(index)); + } + self.regions[index] = region; + self.cache.get_mut().flush(); + Ok(()) + } +} + +/// Memory mapping that uses the upper half of an address to identify the +/// underlying memory region. +#[derive(Debug)] +pub struct AlignedMemoryMapping<'a> { + /// Mapped memory regions + regions: Box<[MemoryRegion]>, + /// VM configuration + config: &'a Config, +} + +impl<'a> AlignedMemoryMapping<'a> { /// Creates a new MemoryMapping structure from the given regions pub fn new( mut regions: Vec, config: &'a Config, ) -> Result> { + regions.insert(0, MemoryRegion::new_readonly(&[], 0)); regions.sort(); for (index, region) in regions.iter().enumerate() { if region @@ -204,46 +332,7 @@ impl<'a> MemoryMapping<'a> { } } } - self.generate_access_violation(access_type, vm_addr, len) - } - - /// Helper for map to generate errors - pub fn generate_access_violation( - &self, - access_type: AccessType, - vm_addr: u64, - len: u64, - ) -> Result> { - let stack_frame = (vm_addr as i64) - .saturating_sub(ebpf::MM_STACK_START as i64) - .checked_div(self.config.stack_frame_size as i64) - .unwrap_or(0); - if !self.config.dynamic_stack_frames - && (-1..(self.config.max_call_depth as i64).saturating_add(1)).contains(&stack_frame) - { - Err(EbpfError::StackAccessViolation( - 0, // Filled out later - access_type, - vm_addr, - len, - stack_frame, - )) - } else { - let region_name = match vm_addr & (!ebpf::MM_PROGRAM_START.saturating_sub(1)) { - ebpf::MM_PROGRAM_START => "program", - ebpf::MM_STACK_START => "stack", - ebpf::MM_HEAP_START => "heap", - ebpf::MM_INPUT_START => "input", - _ => "unknown", - }; - Err(EbpfError::AccessViolation( - 0, // Filled out later - access_type, - vm_addr, - len, - region_name, - )) - } + generate_access_violation(self.config, access_type, vm_addr, len) } /// Returns the `MemoryRegion`s in this mapping @@ -276,3 +365,456 @@ impl<'a> MemoryMapping<'a> { Ok(()) } } + +/// Maps virtual memory to host memory. +#[derive(Debug)] +pub enum MemoryMapping<'a> { + /// Aligned memory mapping which uses the upper half of an address to + /// identify the underlying memory region. + Aligned(AlignedMemoryMapping<'a>), + /// Memory mapping that allows mapping unaligned memory regions. + Unaligned(UnalignedMemoryMapping<'a>), +} + +impl<'a> MemoryMapping<'a> { + /// Creates a new memory mapping. + /// + /// Uses aligned or unaligned memory mapping depending on the value of + /// `config.aligned_memory_mapping=true`. + pub fn new( + regions: Vec, + config: &'a Config, + ) -> Result> { + if config.aligned_memory_mapping { + AlignedMemoryMapping::new(regions, config).map(MemoryMapping::Aligned) + } else { + UnalignedMemoryMapping::new(regions, config).map(MemoryMapping::Unaligned) + } + } + + /// Map virtual memory to host memory. + pub fn map( + &self, + access_type: AccessType, + vm_addr: u64, + len: u64, + ) -> Result> { + match self { + MemoryMapping::Aligned(m) => m.map(access_type, vm_addr, len), + MemoryMapping::Unaligned(m) => m.map(access_type, vm_addr, len), + } + } + + /// Returns the `MemoryRegion`s in this mapping. + pub fn get_regions(&self) -> &[MemoryRegion] { + match self { + MemoryMapping::Aligned(m) => m.get_regions(), + MemoryMapping::Unaligned(m) => m.get_regions(), + } + } + + /// Replaces the `MemoryRegion` at the given index + pub fn replace_region( + &mut self, + index: usize, + region: MemoryRegion, + ) -> Result<(), EbpfError> { + match self { + MemoryMapping::Aligned(m) => m.replace_region(index, region), + MemoryMapping::Unaligned(m) => m.replace_region(index, region), + } + } +} + +/// Helper for map to generate errors +fn generate_access_violation( + config: &Config, + access_type: AccessType, + vm_addr: u64, + len: u64, +) -> Result> { + let stack_frame = (vm_addr as i64) + .saturating_sub(ebpf::MM_STACK_START as i64) + .checked_div(config.stack_frame_size as i64) + .unwrap_or(0); + if !config.dynamic_stack_frames + && (-1..(config.max_call_depth as i64).saturating_add(1)).contains(&stack_frame) + { + Err(EbpfError::StackAccessViolation( + 0, // Filled out later + access_type, + vm_addr, + len, + stack_frame, + )) + } else { + let region_name = match vm_addr & (!ebpf::MM_PROGRAM_START.saturating_sub(1)) { + ebpf::MM_PROGRAM_START => "program", + ebpf::MM_STACK_START => "stack", + ebpf::MM_HEAP_START => "heap", + ebpf::MM_INPUT_START => "input", + _ => "unknown", + }; + Err(EbpfError::AccessViolation( + 0, // Filled out later + access_type, + vm_addr, + len, + region_name, + )) + } +} + +/// Fast, small linear cache used to speed up unaligned memory mapping. +#[derive(Debug)] +struct MappingCache { + // The cached entries. + entries: [(Range, usize); MappingCache::SIZE as usize], + // Index of the last accessed memory region. + // + // New entries are written backwards, so that find() can always scan + // forward which is faster. + head: isize, +} + +impl MappingCache { + const SIZE: isize = 4; + + fn new() -> MappingCache { + MappingCache { + entries: array::from_fn(|_| (0..0, 0)), + head: 0, + } + } + + #[allow(clippy::integer_arithmetic)] + #[inline] + fn find(&self, vm_addr: u64) -> Option { + for i in 0..Self::SIZE { + let index = (self.head + i) % Self::SIZE; + // Safety: + // index is guaranteed to be between 0..Self::SIZE + let (vm_range, region_index) = unsafe { self.entries.get_unchecked(index as usize) }; + if vm_range.contains(&vm_addr) { + return Some(*region_index); + } + } + + None + } + + #[allow(clippy::integer_arithmetic)] + #[inline] + fn insert(&mut self, vm_range: Range, region_index: usize) { + self.head = (self.head - 1).rem_euclid(Self::SIZE); + // Safety: + // self.head is guaranteed to be between 0..Self::SIZE + unsafe { *self.entries.get_unchecked_mut(self.head as usize) = (vm_range, region_index) }; + } + + #[inline] + fn flush(&mut self) { + self.entries = array::from_fn(|_| (0..0, 0)); + self.head = 0; + } +} + +#[cfg(test)] +mod test { + use crate::user_error::UserError; + + use super::*; + + #[test] + fn test_mapping_cache() { + let mut cache = MappingCache::new(); + assert_eq!(cache.find(0), None); + + let mut ranges = vec![10u64..20, 20..30, 30..40, 40..50]; + for (region, range) in ranges.iter().cloned().enumerate() { + cache.insert(range, region); + } + for (region, range) in ranges.iter().enumerate() { + if region > 0 { + assert_eq!(cache.find(range.start - 1), Some(region - 1)); + } else { + assert_eq!(cache.find(range.start - 1), None); + } + assert_eq!(cache.find(range.start), Some(region)); + assert_eq!(cache.find(range.start + 1), Some(region)); + assert_eq!(cache.find(range.end - 1), Some(region)); + if region < 3 { + assert_eq!(cache.find(range.end), Some(region + 1)); + } else { + assert_eq!(cache.find(range.end), None); + } + } + + cache.insert(50..60, 4); + ranges.push(50..60); + for (region, range) in ranges.iter().enumerate() { + if region == 0 { + assert_eq!(cache.find(range.start), None); + continue; + } + if region > 1 { + assert_eq!(cache.find(range.start - 1), Some(region - 1)); + } else { + assert_eq!(cache.find(range.start - 1), None); + } + assert_eq!(cache.find(range.start), Some(region)); + assert_eq!(cache.find(range.start + 1), Some(region)); + assert_eq!(cache.find(range.end - 1), Some(region)); + if region < 4 { + assert_eq!(cache.find(range.end), Some(region + 1)); + } else { + assert_eq!(cache.find(range.end), None); + } + } + } + + #[test] + fn test_mapping_cache_flush() { + let mut cache = MappingCache::new(); + assert_eq!(cache.find(0), None); + cache.insert(0..10, 0); + assert_eq!(cache.find(0), Some(0)); + cache.flush(); + assert_eq!(cache.find(0), None); + } + + #[test] + fn test_map_empty() { + let config = Config::default(); + let m = UnalignedMemoryMapping::new::(vec![], &config).unwrap(); + assert!(matches!( + m.map::(AccessType::Load, ebpf::MM_INPUT_START, 8), + Err(EbpfError::AccessViolation(..)) + )); + + let m = AlignedMemoryMapping::new::(vec![], &config).unwrap(); + assert!(matches!( + m.map::(AccessType::Load, ebpf::MM_INPUT_START, 8), + Err(EbpfError::AccessViolation(..)) + )); + } + + #[test] + fn test_unaligned_map_overlap() { + let config = Config::default(); + let mem1 = [1, 2, 3, 4]; + let mem2 = [5, 6]; + assert_eq!( + UnalignedMemoryMapping::new::( + vec![ + MemoryRegion::new_readonly(&mem1, ebpf::MM_INPUT_START), + MemoryRegion::new_readonly(&mem2, ebpf::MM_INPUT_START + mem1.len() as u64 - 1), + ], + &config, + ) + .unwrap_err(), + EbpfError::InvalidMemoryRegion(1) + ); + assert!(UnalignedMemoryMapping::new::( + vec![ + MemoryRegion::new_readonly(&mem1, ebpf::MM_INPUT_START), + MemoryRegion::new_readonly(&mem2, ebpf::MM_INPUT_START + mem1.len() as u64), + ], + &config, + ) + .is_ok()); + } + + #[test] + fn test_unaligned_map() { + let config = Config::default(); + let mem1 = [11]; + let mem2 = [22, 22]; + let mem3 = [33]; + let mem4 = [44, 44]; + let m = UnalignedMemoryMapping::new::( + vec![ + MemoryRegion::new_readonly(&mem1, ebpf::MM_INPUT_START), + MemoryRegion::new_readonly(&mem2, ebpf::MM_INPUT_START + mem1.len() as u64), + MemoryRegion::new_readonly( + &mem3, + ebpf::MM_INPUT_START + (mem1.len() + mem2.len()) as u64, + ), + MemoryRegion::new_readonly( + &mem4, + ebpf::MM_INPUT_START + (mem1.len() + mem2.len() + mem3.len()) as u64, + ), + ], + &config, + ) + .unwrap(); + + assert_eq!( + m.map::(AccessType::Load, ebpf::MM_INPUT_START, 1) + .unwrap(), + mem1.as_ptr() as u64 + ); + + assert_eq!( + m.map::( + AccessType::Load, + ebpf::MM_INPUT_START + mem1.len() as u64, + 1 + ) + .unwrap(), + mem2.as_ptr() as u64 + ); + + assert_eq!( + m.map::( + AccessType::Load, + ebpf::MM_INPUT_START + (mem1.len() + mem2.len()) as u64, + 1 + ) + .unwrap(), + mem3.as_ptr() as u64 + ); + + assert_eq!( + m.map::( + AccessType::Load, + ebpf::MM_INPUT_START + (mem1.len() + mem2.len() + mem3.len()) as u64, + 1 + ) + .unwrap(), + mem4.as_ptr() as u64 + ); + + assert!(matches!( + m.map::( + AccessType::Load, + ebpf::MM_INPUT_START + (mem1.len() + mem2.len() + mem3.len() + mem4.len()) as u64, + 1 + ), + Err(EbpfError::AccessViolation(..)) + )); + } + + #[test] + fn test_unaligned_map_replace_region() { + let config = Config::default(); + let mem1 = [11]; + let mem2 = [22, 22]; + let mem3 = [33]; + let mut m = UnalignedMemoryMapping::new::( + vec![ + MemoryRegion::new_readonly(&mem1, ebpf::MM_INPUT_START), + MemoryRegion::new_readonly(&mem2, ebpf::MM_INPUT_START + mem1.len() as u64), + ], + &config, + ) + .unwrap(); + + assert_eq!( + m.map::(AccessType::Load, ebpf::MM_INPUT_START, 1) + .unwrap(), + mem1.as_ptr() as u64 + ); + + assert_eq!( + m.map::( + AccessType::Load, + ebpf::MM_INPUT_START + mem1.len() as u64, + 1 + ) + .unwrap(), + mem2.as_ptr() as u64 + ); + + assert!(matches!( + m.replace_region( + 2, + MemoryRegion::new_readonly(&mem3, ebpf::MM_INPUT_START + mem1.len() as u64) + ), + Err(EbpfError::::InvalidMemoryRegion(2)) + )); + + let region_index = m + .get_regions() + .iter() + .position(|mem| mem.vm_addr == ebpf::MM_INPUT_START + mem1.len() as u64) + .unwrap(); + + // old.vm_addr != new.vm_addr + assert!(matches!( + m.replace_region( + region_index, + MemoryRegion::new_readonly(&mem3, ebpf::MM_INPUT_START + mem1.len() as u64 + 1) + ), + Err(EbpfError::::InvalidMemoryRegion(i)) if i == region_index + )); + + m.replace_region::( + region_index, + MemoryRegion::new_readonly(&mem3, ebpf::MM_INPUT_START + mem1.len() as u64), + ) + .unwrap(); + + assert_eq!( + m.map::( + AccessType::Load, + ebpf::MM_INPUT_START + mem1.len() as u64, + 1 + ) + .unwrap(), + mem3.as_ptr() as u64 + ); + } + + #[test] + fn test_aligned_map_replace_region() { + let config = Config::default(); + let mem1 = [11]; + let mem2 = [22, 22]; + let mem3 = [33, 33]; + let mut m = AlignedMemoryMapping::new::( + vec![ + MemoryRegion::new_readonly(&mem1, ebpf::MM_PROGRAM_START), + MemoryRegion::new_readonly(&mem2, ebpf::MM_STACK_START), + ], + &config, + ) + .unwrap(); + + assert_eq!( + m.map::(AccessType::Load, ebpf::MM_STACK_START, 1) + .unwrap(), + mem2.as_ptr() as u64 + ); + + // index > regions.len() + assert!(matches!( + m.replace_region(3, MemoryRegion::new_readonly(&mem3, ebpf::MM_STACK_START)), + Err(EbpfError::::InvalidMemoryRegion(3)) + )); + + // index != addr >> VIRTUAL_ADDRESS_BITS + assert!(matches!( + m.replace_region(2, MemoryRegion::new_readonly(&mem3, ebpf::MM_HEAP_START)), + Err(EbpfError::::InvalidMemoryRegion(2)) + )); + + // index + len != addr >> VIRTUAL_ADDRESS_BITS + assert!(matches!( + m.replace_region( + 2, + MemoryRegion::new_readonly(&mem3, ebpf::MM_HEAP_START - 1) + ), + Err(EbpfError::::InvalidMemoryRegion(2)) + )); + + m.replace_region::(2, MemoryRegion::new_readonly(&mem3, ebpf::MM_STACK_START)) + .unwrap(); + + assert_eq!( + m.map::(AccessType::Load, ebpf::MM_STACK_START, 1) + .unwrap(), + mem3.as_ptr() as u64 + ); + } +} diff --git a/src/syscalls.rs b/src/syscalls.rs index b0eb9dc7..73a165bb 100644 --- a/src/syscalls.rs +++ b/src/syscalls.rs @@ -181,7 +181,7 @@ impl SyscallObject for BpfGatherBytes { /// /// let mut result: Result = Ok(0); /// let config = Config::default(); -/// let mut memory_mapping = MemoryMapping::new::(vec![MemoryRegion::default(), MemoryRegion::new_writable(val, val_va)], &config).unwrap(); +/// let mut memory_mapping = MemoryMapping::new::(vec![MemoryRegion::new_writable(val, val_va)], &config).unwrap(); /// BpfMemFrob::call(&mut BpfMemFrob {}, val_va, 8, 0, 0, 0, &mut memory_mapping, &mut result); /// assert_eq!(val, &[0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x3b, 0x08, 0x19]); /// BpfMemFrob::call(&mut BpfMemFrob {}, val_va, 8, 0, 0, 0, &mut memory_mapping, &mut result); @@ -233,11 +233,11 @@ impl SyscallObject for BpfMemFrob { /// /// let mut result: Result = Ok(0); /// let config = Config::default(); -/// let mut memory_mapping = MemoryMapping::new::(vec![MemoryRegion::default(), MemoryRegion::new_readonly(foo.as_bytes(), va_foo)], &config).unwrap(); +/// let mut memory_mapping = MemoryMapping::new::(vec![MemoryRegion::new_readonly(foo.as_bytes(), va_foo)], &config).unwrap(); /// BpfStrCmp::call(&mut BpfStrCmp {}, va_foo, va_foo, 0, 0, 0, &mut memory_mapping, &mut result); /// assert!(result.unwrap() == 0); /// let mut result: Result = Ok(0); -/// let mut memory_mapping = MemoryMapping::new::(vec![MemoryRegion::default(), MemoryRegion::new_readonly(foo.as_bytes(), va_foo), MemoryRegion::new_readonly(bar.as_bytes(), va_bar)], &config).unwrap(); +/// let mut memory_mapping = MemoryMapping::new::(vec![MemoryRegion::new_readonly(foo.as_bytes(), va_foo), MemoryRegion::new_readonly(bar.as_bytes(), va_bar)], &config).unwrap(); /// BpfStrCmp::call(&mut BpfStrCmp {}, va_foo, va_bar, 0, 0, 0, &mut memory_mapping, &mut result); /// assert!(result.unwrap() != 0); /// ``` diff --git a/src/vm.rs b/src/vm.rs index 94ca6be6..27b239d1 100644 --- a/src/vm.rs +++ b/src/vm.rs @@ -229,6 +229,8 @@ pub struct Config { /// Ensure that rodata sections don't exceed their maximum allowed size and /// overlap with the stack pub reject_rodata_stack_overlap: bool, + /// Use aligned memory mapping + pub aligned_memory_mapping: bool, } impl Config { @@ -261,6 +263,7 @@ impl Default for Config { enable_elf_vaddr: true, new_elf_parser: true, reject_rodata_stack_overlap: true, + aligned_memory_mapping: true, } } } @@ -514,7 +517,6 @@ impl<'a, V: Verifier, E: UserDefinedError, I: InstructionMeter> EbpfVm<'a, V, E, let config = executable.get_config(); let mut stack = CallFrames::new(config); let regions: Vec = vec![ - MemoryRegion::new_readonly(&[], 0), verified_executable.get_executable().get_ro_region(), stack.get_memory_region(), MemoryRegion::new_writable(heap_region, ebpf::MM_HEAP_START),