From 2643a04446b86e6bb6cc6c053c76effddb55bf08 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Alexander=20Mei=C3=9Fner?= Date: Fri, 14 Apr 2023 08:45:08 +0200 Subject: [PATCH] Adds Config::enable_address_translation and MemoryMapping::Identity. (#460) --- src/elf.rs | 2 +- src/jit.rs | 28 +++++++++++++++++++++++----- src/memory_region.rs | 17 +++++++++++++++++ src/vm.rs | 8 +++++++- 4 files changed, 48 insertions(+), 7 deletions(-) diff --git a/src/elf.rs b/src/elf.rs index a2ce330f..998028d9 100644 --- a/src/elf.rs +++ b/src/elf.rs @@ -2146,6 +2146,6 @@ mod test { Executable::jit_compile(&mut executable).unwrap(); } - assert_eq!(10538, executable.mem_size()); + assert_eq!(10546, executable.mem_size()); } } diff --git a/src/jit.rs b/src/jit.rs index 22983b83..4176f668 100644 --- a/src/jit.rs +++ b/src/jit.rs @@ -1014,10 +1014,29 @@ impl<'a, V: Verifier, C: ContextObject> JitCompiler<'a, V, C> { _ => {} } - let access_type = if value.is_none() { AccessType::Load } else { AccessType::Store }; - let anchor = ANCHOR_TRANSLATE_MEMORY_ADDRESS + len.trailing_zeros() as usize + 4 * (access_type as usize); - self.emit_ins(X86Instruction::push_immediate(OperandSize::S64, self.pc as i32)); - self.emit_ins(X86Instruction::call_immediate(self.relative_to_anchor(anchor, 5))); + if self.config.enable_address_translation { + let access_type = if value.is_none() { AccessType::Load } else { AccessType::Store }; + let anchor = ANCHOR_TRANSLATE_MEMORY_ADDRESS + len.trailing_zeros() as usize + 4 * (access_type as usize); + self.emit_ins(X86Instruction::push_immediate(OperandSize::S64, self.pc as i32)); + self.emit_ins(X86Instruction::call_immediate(self.relative_to_anchor(anchor, 5))); + } else if value.is_some() { + match len { + 1 => self.emit_ins(X86Instruction::store(OperandSize::S8, R10, R11, X86IndirectAccess::Offset(0))), + 2 => self.emit_ins(X86Instruction::store(OperandSize::S16, R10, R11, X86IndirectAccess::Offset(0))), + 4 => self.emit_ins(X86Instruction::store(OperandSize::S32, R10, R11, X86IndirectAccess::Offset(0))), + 8 => self.emit_ins(X86Instruction::store(OperandSize::S64, R10, R11, X86IndirectAccess::Offset(0))), + _ => unreachable!(), + } + } else { + match len { + 1 => self.emit_ins(X86Instruction::load(OperandSize::S8, R11, R10, X86IndirectAccess::Offset(0))), + 2 => self.emit_ins(X86Instruction::load(OperandSize::S16, R11, R10, X86IndirectAccess::Offset(0))), + 4 => self.emit_ins(X86Instruction::load(OperandSize::S32, R11, R10, X86IndirectAccess::Offset(0))), + 8 => self.emit_ins(X86Instruction::load(OperandSize::S64, R11, R10, X86IndirectAccess::Offset(0))), + _ => unreachable!(), + } + } + if let Some(dst) = dst { self.emit_ins(X86Instruction::mov(OperandSize::S64, R11, dst)); } @@ -1432,7 +1451,6 @@ impl<'a, V: Verifier, C: ContextObject> JitCompiler<'a, V, C> { 4 => MemoryMapping::load:: as *const u8 as i64, 8 => MemoryMapping::load:: as *const u8 as i64, _ => unreachable!() - }; self.emit_rust_call(Value::Constant64(load, false), &[ Argument { index: 2, value: Value::Register(R11) }, // Specify first as the src register could be overwritten by other arguments diff --git a/src/memory_region.rs b/src/memory_region.rs index 841e69f0..8d60bc9c 100644 --- a/src/memory_region.rs +++ b/src/memory_region.rs @@ -681,6 +681,8 @@ impl<'a> AlignedMemoryMapping<'a> { /// Maps virtual memory to host memory. #[derive(Debug)] pub enum MemoryMapping<'a> { + /// Used when address translation is disabled + Identity, /// Aligned memory mapping which uses the upper half of an address to /// identify the underlying memory region. Aligned(AlignedMemoryMapping<'a>), @@ -689,6 +691,10 @@ pub enum MemoryMapping<'a> { } impl<'a> MemoryMapping<'a> { + pub(crate) fn new_identity() -> Self { + MemoryMapping::Identity + } + /// Creates a new memory mapping. /// /// Uses aligned or unaligned memory mapping depending on the value of @@ -721,6 +727,7 @@ impl<'a> MemoryMapping<'a> { /// Map virtual memory to host memory. pub fn map(&self, access_type: AccessType, vm_addr: u64, len: u64, pc: usize) -> ProgramResult { match self { + MemoryMapping::Identity => ProgramResult::Ok(vm_addr), MemoryMapping::Aligned(m) => m.map(access_type, vm_addr, len, pc), MemoryMapping::Unaligned(m) => m.map(access_type, vm_addr, len, pc), } @@ -732,6 +739,9 @@ impl<'a> MemoryMapping<'a> { #[inline] pub fn load>(&self, vm_addr: u64, pc: usize) -> ProgramResult { match self { + MemoryMapping::Identity => unsafe { + ProgramResult::Ok(ptr::read_unaligned(vm_addr as *const T).into()) + }, MemoryMapping::Aligned(m) => m.load::(vm_addr, pc), MemoryMapping::Unaligned(m) => m.load::(vm_addr, pc), } @@ -743,6 +753,10 @@ impl<'a> MemoryMapping<'a> { #[inline] pub fn store(&self, value: T, vm_addr: u64, pc: usize) -> ProgramResult { match self { + MemoryMapping::Identity => unsafe { + ptr::write_unaligned(vm_addr as *mut T, value); + ProgramResult::Ok(0) + }, MemoryMapping::Aligned(m) => m.store(value, vm_addr, pc), MemoryMapping::Unaligned(m) => m.store(value, vm_addr, pc), } @@ -755,6 +769,7 @@ impl<'a> MemoryMapping<'a> { vm_addr: u64, ) -> Result<&MemoryRegion, Box> { match self { + MemoryMapping::Identity => Err(Box::new(EbpfError::InvalidMemoryRegion(0))), MemoryMapping::Aligned(m) => m.region(access_type, vm_addr), MemoryMapping::Unaligned(m) => m.region(access_type, vm_addr), } @@ -763,6 +778,7 @@ impl<'a> MemoryMapping<'a> { /// Returns the `MemoryRegion`s in this mapping. pub fn get_regions(&self) -> &[MemoryRegion] { match self { + MemoryMapping::Identity => &[], MemoryMapping::Aligned(m) => m.get_regions(), MemoryMapping::Unaligned(m) => m.get_regions(), } @@ -771,6 +787,7 @@ impl<'a> MemoryMapping<'a> { /// Replaces the `MemoryRegion` at the given index pub fn replace_region(&mut self, index: usize, region: MemoryRegion) -> Result<(), EbpfError> { match self { + MemoryMapping::Identity => Err(EbpfError::InvalidMemoryRegion(index)), MemoryMapping::Aligned(m) => m.replace_region(index, region), MemoryMapping::Unaligned(m) => m.replace_region(index, region), } diff --git a/src/vm.rs b/src/vm.rs index fe58e0fa..33dd1d83 100644 --- a/src/vm.rs +++ b/src/vm.rs @@ -199,6 +199,8 @@ pub struct Config { pub max_call_depth: usize, /// Size of a stack frame in bytes, must match the size specified in the LLVM BPF backend pub stack_frame_size: usize, + /// Enables the use of MemoryMapping and MemoryRegion for address translation + pub enable_address_translation: bool, /// Enables gaps in VM address space between the stack frames pub enable_stack_frame_gaps: bool, /// Maximal pc distance after which a new instruction meter validation is emitted by the JIT @@ -255,6 +257,7 @@ impl Default for Config { Self { max_call_depth: 20, stack_frame_size: 4_096, + enable_address_translation: true, enable_stack_frame_gaps: true, instruction_meter_checkpoint_distance: 10000, enable_instruction_meter: true, @@ -500,7 +503,7 @@ impl<'a, V: Verifier, C: ContextObject> EbpfVm<'a, V, C> { pub fn new( executable: &'a Executable, context_object: &'a mut C, - memory_mapping: MemoryMapping<'a>, + mut memory_mapping: MemoryMapping<'a>, stack_len: usize, ) -> EbpfVm<'a, V, C> { let config = executable.get_config(); @@ -511,6 +514,9 @@ impl<'a, V: Verifier, C: ContextObject> EbpfVm<'a, V, C> { // within a frame the stack grows down, but frames are ascending config.stack_frame_size } as u64); + if !config.enable_address_translation { + memory_mapping = MemoryMapping::new_identity(); + } EbpfVm { executable, #[cfg(feature = "debugger")]