From 709167719f339cefa01d7b9915128a51a458082d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Alexander=20Mei=C3=9Fner?= Date: Fri, 23 Jul 2021 13:41:59 +0200 Subject: [PATCH] Feature/address translation cleanup (#196) * Concatenates all read only sections including the text section into one. * Enforces exactly one heap memory region per VM. * Renames frames => stack. * Enforces memory regions virtual addresses to be aligned. * Makes address translation constant time complexity. * Adds an explicit empty NULL region to avoid index shift. * Implements JIT address translation in x86 directly (no more Rust calls). * Fixes benchmarks * Makes stack frame gaps in vm address space optional / configurable. --- benches/jit_compile.rs | 3 +- benches/memory_mapping.rs | 90 +++++++++++++++------------- benches/vm_execution.rs | 8 ++- examples/uptime.rs | 6 +- src/assembler.rs | 2 +- src/call_frames.rs | 80 ++++++++++++++++--------- src/ebpf.rs | 23 ++----- src/elf.rs | 87 ++++++++++++++------------- src/error.rs | 6 +- src/jit.rs | 123 +++++++++++++++++++++++--------------- src/memory_region.rs | 88 ++++++++++----------------- src/static_analysis.rs | 2 +- src/syscalls.rs | 22 +++---- src/vm.rs | 77 ++++++++++-------------- tests/assembler.rs | 4 +- tests/misc.rs | 2 +- tests/ubpf_execution.rs | 12 ++-- tests/ubpf_verifier.rs | 4 +- 18 files changed, 327 insertions(+), 312 deletions(-) diff --git a/benches/jit_compile.rs b/benches/jit_compile.rs index 1fcc1b50..fca74a5d 100644 --- a/benches/jit_compile.rs +++ b/benches/jit_compile.rs @@ -29,7 +29,8 @@ fn bench_init_vm(bencher: &mut Bencher) { ) .unwrap(); bencher.iter(|| { - EbpfVm::::new(executable.as_ref(), &mut [], &[]).unwrap() + EbpfVm::::new(executable.as_ref(), &mut [], &mut []) + .unwrap() }); } diff --git a/benches/memory_mapping.rs b/benches/memory_mapping.rs index 8dfef3dc..5de35fb5 100644 --- a/benches/memory_mapping.rs +++ b/benches/memory_mapping.rs @@ -23,8 +23,9 @@ fn generate_memory_regions( is_writable: bool, mut prng: Option<&mut SmallRng>, ) -> (Vec, u64) { - let mut memory_regions = Vec::with_capacity(entries); - let mut offset = 0; + let mut memory_regions = Vec::with_capacity(entries + 1); + memory_regions.push(MemoryRegion::default()); + let mut offset = 0x100000000; for _ in 0..entries { let length = match &mut prng { Some(prng) => (*prng).gen::() as u64 + 4, @@ -37,7 +38,7 @@ fn generate_memory_regions( 0, is_writable, )); - offset += length; + offset += 0x100000000; } (memory_regions, offset) } @@ -59,12 +60,10 @@ fn bench_gapped_randomized_access_with_1024_entries(bencher: &mut Bencher) { let frame_size: u64 = 2; let frame_count: u64 = 1024; let content = vec![0; (frame_size * frame_count * 2) as usize]; - let memory_regions = vec![MemoryRegion::new_from_slice( - &content[..], - 0, - frame_size, - false, - )]; + let memory_regions = vec![ + MemoryRegion::default(), + MemoryRegion::new_from_slice(&content[..], 0x100000000, frame_size, false), + ]; let config = Config::default(); let memory_mapping = MemoryMapping::new::(memory_regions, &config).unwrap(); let mut prng = new_prng!(); @@ -72,7 +71,7 @@ fn bench_gapped_randomized_access_with_1024_entries(bencher: &mut Bencher) { assert!(memory_mapping .map::( AccessType::Load, - prng.gen::() % frame_count * (frame_size * 2), + 0x100000000 + (prng.gen::() % frame_count * (frame_size * 2)), 1 ) .is_ok()); @@ -82,18 +81,19 @@ fn bench_gapped_randomized_access_with_1024_entries(bencher: &mut Bencher) { #[bench] fn bench_randomized_access_with_0001_entry(bencher: &mut Bencher) { let content = vec![0; 1024 * 2]; - let memory_regions = vec![MemoryRegion::new_from_slice(&content[..], 0, 0, false)]; + let memory_regions = vec![ + MemoryRegion::default(), + MemoryRegion::new_from_slice(&content[..], 0x100000000, 0, false), + ]; let config = Config::default(); let memory_mapping = MemoryMapping::new::(memory_regions, &config).unwrap(); let mut prng = new_prng!(); bencher.iter(|| { - assert!(memory_mapping - .map::( - AccessType::Load, - prng.gen::() % content.len() as u64, - 1 - ) - .is_ok()); + let _ = memory_mapping.map::( + AccessType::Load, + 0x100000000 + (prng.gen::() % content.len() as u64), + 1, + ); }); } @@ -104,9 +104,11 @@ fn bench_randomized_mapping_access_with_0004_entries(bencher: &mut Bencher) { let config = Config::default(); let memory_mapping = MemoryMapping::new::(memory_regions, &config).unwrap(); bencher.iter(|| { - assert!(memory_mapping - .map::(AccessType::Load, prng.gen::() % end_address, 1) - .is_ok()); + let _ = memory_mapping.map::( + AccessType::Load, + 0x100000000 + (prng.gen::() % end_address), + 1, + ); }); } @@ -117,9 +119,11 @@ fn bench_randomized_mapping_access_with_0016_entries(bencher: &mut Bencher) { let config = Config::default(); let memory_mapping = MemoryMapping::new::(memory_regions, &config).unwrap(); bencher.iter(|| { - assert!(memory_mapping - .map::(AccessType::Load, prng.gen::() % end_address, 1) - .is_ok()); + let _ = memory_mapping.map::( + AccessType::Load, + 0x100000000 + (prng.gen::() % end_address), + 1, + ); }); } @@ -130,9 +134,11 @@ fn bench_randomized_mapping_access_with_0064_entries(bencher: &mut Bencher) { let config = Config::default(); let memory_mapping = MemoryMapping::new::(memory_regions, &config).unwrap(); bencher.iter(|| { - assert!(memory_mapping - .map::(AccessType::Load, prng.gen::() % end_address, 1) - .is_ok()); + let _ = memory_mapping.map::( + AccessType::Load, + 0x100000000 + (prng.gen::() % end_address), + 1, + ); }); } @@ -143,9 +149,11 @@ fn bench_randomized_mapping_access_with_0256_entries(bencher: &mut Bencher) { let config = Config::default(); let memory_mapping = MemoryMapping::new::(memory_regions, &config).unwrap(); bencher.iter(|| { - assert!(memory_mapping - .map::(AccessType::Load, prng.gen::() % end_address, 1) - .is_ok()); + let _ = memory_mapping.map::( + AccessType::Load, + 0x100000000 + (prng.gen::() % end_address), + 1, + ); }); } @@ -156,9 +164,11 @@ fn bench_randomized_mapping_access_with_1024_entries(bencher: &mut Bencher) { let config = Config::default(); let memory_mapping = MemoryMapping::new::(memory_regions, &config).unwrap(); bencher.iter(|| { - assert!(memory_mapping - .map::(AccessType::Load, prng.gen::() % end_address, 1) - .is_ok()); + let _ = memory_mapping.map::( + AccessType::Load, + 0x100000000 + (prng.gen::() % end_address), + 1, + ); }); } @@ -169,9 +179,11 @@ fn bench_randomized_access_with_1024_entries(bencher: &mut Bencher) { let config = Config::default(); let memory_mapping = MemoryMapping::new::(memory_regions, &config).unwrap(); bencher.iter(|| { - assert!(memory_mapping - .map::(AccessType::Load, prng.gen::() % end_address, 1) - .is_ok()); + let _ = memory_mapping.map::( + AccessType::Load, + 0x100000000 + (prng.gen::() % end_address), + 1, + ); }); } @@ -182,9 +194,7 @@ fn bench_randomized_mapping_with_1024_entries(bencher: &mut Bencher) { let config = Config::default(); let memory_mapping = MemoryMapping::new::(memory_regions, &config).unwrap(); bencher.iter(|| { - assert!(memory_mapping - .map::(AccessType::Load, 0, 1) - .is_ok()); + let _ = memory_mapping.map::(AccessType::Load, 0x100000000, 1); }); } @@ -195,7 +205,7 @@ fn bench_mapping_with_1024_entries(bencher: &mut Bencher) { let memory_mapping = MemoryMapping::new::(memory_regions, &config).unwrap(); bencher.iter(|| { assert!(memory_mapping - .map::(AccessType::Load, 0, 1) + .map::(AccessType::Load, 0x100000000, 1) .is_ok()); }); } diff --git a/benches/vm_execution.rs b/benches/vm_execution.rs index 9f1300a4..583b65e2 100644 --- a/benches/vm_execution.rs +++ b/benches/vm_execution.rs @@ -29,7 +29,8 @@ fn bench_init_interpreter_execution(bencher: &mut Bencher) { ) .unwrap(); let mut vm = - EbpfVm::::new(executable.as_ref(), &mut [], &[]).unwrap(); + EbpfVm::::new(executable.as_ref(), &mut [], &mut []) + .unwrap(); bencher.iter(|| { vm.execute_program_interpreted(&mut TestInstructionMeter { remaining: 29 }) .unwrap() @@ -51,7 +52,8 @@ fn bench_init_jit_execution(bencher: &mut Bencher) { .unwrap(); executable.jit_compile().unwrap(); let mut vm = - EbpfVm::::new(executable.as_ref(), &mut [], &[]).unwrap(); + EbpfVm::::new(executable.as_ref(), &mut [], &mut []) + .unwrap(); bencher.iter(|| { vm.execute_program_jit(&mut TestInstructionMeter { remaining: 29 }) .unwrap() @@ -73,7 +75,7 @@ fn bench_jit_vs_interpreter( ) .unwrap(); executable.jit_compile().unwrap(); - let mut vm = EbpfVm::new(executable.as_ref(), mem, &[]).unwrap(); + let mut vm = EbpfVm::new(executable.as_ref(), &mut [], mem).unwrap(); let interpreter_summary = bencher .bench(|bencher| { bencher.iter(|| { diff --git a/examples/uptime.rs b/examples/uptime.rs index d37f4c0b..e6c29299 100644 --- a/examples/uptime.rs +++ b/examples/uptime.rs @@ -52,7 +52,8 @@ fn main() { ) .unwrap(); let mut vm = - EbpfVm::::new(executable.as_ref(), &mut [], &[]).unwrap(); + EbpfVm::::new(executable.as_ref(), &mut [], &mut []) + .unwrap(); // Execute prog1. assert_eq!( vm.execute_program_interpreted(&mut TestInstructionMeter { remaining: 5 }) @@ -88,7 +89,8 @@ fn main() { executable.jit_compile().unwrap(); } let mut vm = - EbpfVm::::new(executable.as_ref(), &mut [], &[]).unwrap(); + EbpfVm::::new(executable.as_ref(), &mut [], &mut []) + .unwrap(); vm.bind_syscall_context_object(Box::new(syscalls::BpfTimeGetNs {}), None) .unwrap(); diff --git a/src/assembler.rs b/src/assembler.rs index de6af9ec..2ca90a04 100644 --- a/src/assembler.rs +++ b/src/assembler.rs @@ -190,7 +190,7 @@ fn insn(opc: u8, dst: i64, src: i64, off: i64, imm: i64) -> Result /// Config::default(), /// SyscallRegistry::default(), /// ).unwrap(); -/// let program = executable.get_text_bytes().unwrap().1; +/// let program = executable.get_text_bytes().1; /// println!("{:?}", program); /// # assert_eq!(program, /// # &[0x07, 0x01, 0x00, 0x00, 0x05, 0x06, 0x00, 0x00, diff --git a/src/call_frames.rs b/src/call_frames.rs index 69b35189..b81fa029 100644 --- a/src/call_frames.rs +++ b/src/call_frames.rs @@ -5,6 +5,7 @@ use crate::{ ebpf::{ELF_INSN_DUMP_OFFSET, HOST_ALIGN, MM_STACK_START, SCRATCH_REGS}, error::{EbpfError, UserDefinedError}, memory_region::MemoryRegion, + vm::Config, }; /// One call frame @@ -19,23 +20,24 @@ struct CallFrame { /// function to be called in its own frame. CallFrames manages /// call frames #[derive(Clone, Debug)] -pub struct CallFrames { +pub struct CallFrames<'a> { + config: &'a Config, stack: AlignedMemory, - region: MemoryRegion, frame_index: usize, frame_index_max: usize, frames: Vec, } -impl CallFrames { +impl<'a> CallFrames<'a> { /// New call frame, depth indicates maximum call depth - pub fn new(depth: usize, frame_size: usize) -> Self { - let mut stack = AlignedMemory::new(depth * frame_size, HOST_ALIGN); - stack.resize(depth * frame_size, 0).unwrap(); - let region = - MemoryRegion::new_from_slice(stack.as_slice(), MM_STACK_START, frame_size as u64, true); + pub fn new(config: &'a Config) -> Self { + let mut stack = + AlignedMemory::new(config.max_call_depth * config.stack_frame_size, HOST_ALIGN); + stack + .resize(config.max_call_depth * config.stack_frame_size, 0) + .unwrap(); let mut frames = CallFrames { + config, stack, - region, frame_index: 0, frame_index_max: 0, frames: vec![ @@ -44,19 +46,30 @@ impl CallFrames { saved_reg: [0u64; SCRATCH_REGS], return_ptr: 0 }; - depth + config.max_call_depth ], }; - for i in 0..depth { - // Seperate each stack frame's virtual address so that stack over/under-run is caught explicitly - frames.frames[i].vm_addr = MM_STACK_START + (i * 2 * frame_size) as u64; + // Seperate each stack frame's virtual address so that stack over/under-run is caught explicitly + let gap_factor = if config.enable_stack_frame_gaps { 2 } else { 1 }; + for i in 0..config.max_call_depth { + frames.frames[i].vm_addr = + MM_STACK_START + (i * gap_factor * config.stack_frame_size) as u64; } frames } /// Get stack memory region - pub fn get_region(&self) -> &MemoryRegion { - &self.region + pub fn get_memory_region(&self) -> MemoryRegion { + MemoryRegion::new_from_slice( + self.stack.as_slice(), + MM_STACK_START, + if self.config.enable_stack_frame_gaps { + self.config.stack_frame_size as u64 + } else { + 0 + }, + true, + ) } /// Get the vm address of the beginning of each stack frame @@ -66,7 +79,7 @@ impl CallFrames { /// Get the address of a frame's top of stack pub fn get_stack_top(&self) -> u64 { - self.frames[self.frame_index].vm_addr + (1 << self.region.vm_gap_shift) + self.frames[self.frame_index].vm_addr + self.config.stack_frame_size as u64 } /// Get current call frame index, 0 is the root frame @@ -121,32 +134,41 @@ mod tests { #[test] fn test_frames() { - const DEPTH: usize = 10; - const FRAME_SIZE: u64 = 8; - let mut frames = CallFrames::new(DEPTH, FRAME_SIZE as usize); + let config = Config { + max_call_depth: 10, + stack_frame_size: 8, + enable_stack_frame_gaps: true, + ..Config::default() + }; + let mut frames = CallFrames::new(&config); let mut ptrs: Vec = Vec::new(); - for i in 0..DEPTH - 1 { - let registers = vec![i as u64; FRAME_SIZE as usize]; + for i in 0..config.max_call_depth - 1 { + let registers = vec![i as u64; config.stack_frame_size]; assert_eq!(frames.get_frame_index(), i); ptrs.push(frames.get_frame_pointers()[i]); let top = frames.push::(®isters[0..4], i).unwrap(); let new_ptrs = frames.get_frame_pointers(); - assert_eq!(top, new_ptrs[i + 1] + FRAME_SIZE); - assert_ne!(top, ptrs[i] + FRAME_SIZE - 1); - assert!(!(ptrs[i] <= new_ptrs[i + 1] && new_ptrs[i + 1] < ptrs[i] + FRAME_SIZE)); + assert_eq!(top, new_ptrs[i + 1] + config.stack_frame_size as u64); + assert_ne!(top, ptrs[i] + config.stack_frame_size as u64 - 1); + assert!( + !(ptrs[i] <= new_ptrs[i + 1] + && new_ptrs[i + 1] < ptrs[i] + config.stack_frame_size as u64) + ); } - let i = DEPTH - 1; - let registers = vec![i as u64; FRAME_SIZE as usize]; + let i = config.max_call_depth - 1; + let registers = vec![i as u64; config.stack_frame_size]; assert_eq!(frames.get_frame_index(), i); ptrs.push(frames.get_frame_pointers()[i]); - assert!(frames.push::(®isters, DEPTH - 1).is_err()); + assert!(frames + .push::(®isters, config.max_call_depth - 1) + .is_err()); - for i in (0..DEPTH - 1).rev() { + for i in (0..config.max_call_depth - 1).rev() { let (saved_reg, stack_ptr, return_ptr) = frames.pop::().unwrap(); assert_eq!(saved_reg, [i as u64, i as u64, i as u64, i as u64]); - assert_eq!(ptrs[i] + FRAME_SIZE, stack_ptr); + assert_eq!(ptrs[i] + config.stack_frame_size as u64, stack_ptr); assert_eq!(i, return_ptr); } diff --git a/src/ebpf.rs b/src/ebpf.rs index c92f6a3d..7d5b5d09 100644 --- a/src/ebpf.rs +++ b/src/ebpf.rs @@ -35,21 +35,13 @@ pub const SCRATCH_REGS: usize = 4; /// Instruction numbers typically start at 29 in the ELF dump, use this offset /// when reporting so that trace aligns with the dump. pub const ELF_INSN_DUMP_OFFSET: usize = 29; +/// Alignment of the memory regions in host address space in bytes +pub const HOST_ALIGN: usize = 16; +/// Upper half of a pointer is the region index, lower half the virtual address inside that region. +pub const VIRTUAL_ADDRESS_BITS: usize = 32; -// Memory map -// +-----------------+ -// | Program | -// +-----------------+ -// | Stack | -// +-----------------+ -// | Heap | -// +-----------------+ -// | Input | -// +-----------------+ -// The values below providesufficient separations between the map areas. Avoid using -// 0x0 to distinguish virtual addresses from null pointers. -// Note: Compiled programs themselves have no direct dependency on these values so -// they may be modified based on new requirements. +// Memory map regions virtual addresses need to be (1 << VIRTUAL_ADDRESS_BITS) bytes apart. +// Also the region at index 0 should be skipped to catch NULL ptr accesses. /// Start of the program bits (text and ro segments) in the memory map pub const MM_PROGRAM_START: u64 = 0x100000000; @@ -60,9 +52,6 @@ pub const MM_HEAP_START: u64 = 0x300000000; /// Start of the input buffers in the memory map pub const MM_INPUT_START: u64 = 0x400000000; -/// Alignment of the memory regions in host address space in bytes -pub const HOST_ALIGN: usize = 16; - // eBPF op codes. // See also https://www.kernel.org/doc/Documentation/networking/filter.txt diff --git a/src/elf.rs b/src/elf.rs index 2a92581b..b5a86373 100644 --- a/src/elf.rs +++ b/src/elf.rs @@ -210,10 +210,10 @@ pub struct EBpfElf { config: Config, /// Loaded and executable elf elf_bytes: AlignedMemory, + /// Read-only section + ro_section: Vec, /// Text section info text_section_info: SectionInfo, - /// Read-only section info - ro_section_infos: Vec, /// Call resolution map (hash, pc, name) bpf_functions: BTreeMap, /// Syscall symbol map (hash, name) @@ -231,30 +231,17 @@ impl Executable for EBpfElf Result<(u64, &[u8]), EbpfError> { - Ok(( + fn get_text_bytes(&self) -> (u64, &[u8]) { + let offset = (self.text_section_info.vaddr - ebpf::MM_PROGRAM_START) as usize; + ( self.text_section_info.vaddr, - self.elf_bytes - .as_slice() - .get(self.text_section_info.offset_range.clone()) - .ok_or(ElfError::OutOfBounds)?, - )) + &self.ro_section[offset..offset + self.text_section_info.offset_range.len()], + ) } - /// Get a vector of virtual addresses for each read-only section - fn get_ro_sections(&self) -> Result, EbpfError> { - self.ro_section_infos - .iter() - .map(|section_info| { - Ok(( - section_info.vaddr, - self.elf_bytes - .as_slice() - .get(section_info.offset_range.clone()) - .ok_or(ElfError::OutOfBounds)?, - )) - }) - .collect::, EbpfError>>() + /// Get the concatenated read-only sections (including the text section) + fn get_ro_section(&self) -> &[u8] { + self.ro_section.as_slice() } /// Get the entry point offset into the text section @@ -347,6 +334,7 @@ impl<'a, E: UserDefinedError, I: InstructionMeter> EBpfElf { Self { config, elf_bytes, + ro_section: text_bytes.to_vec(), text_section_info: SectionInfo { name: ".text".to_string(), vaddr: ebpf::MM_PROGRAM_START, @@ -355,7 +343,6 @@ impl<'a, E: UserDefinedError, I: InstructionMeter> EBpfElf { end: text_bytes.len(), }, }, - ro_section_infos: vec![], bpf_functions, syscall_symbols: BTreeMap::default(), syscall_registry, @@ -410,36 +397,50 @@ impl<'a, E: UserDefinedError, I: InstructionMeter> EBpfElf { bpf_functions.remove(&ebpf::hash_symbol_name(b"entrypoint")); register_bpf_function(&mut bpf_functions, entrypoint, "entrypoint")?; - // calculate the read-only section infos - let ro_section_infos = elf + // concatenate the read-only sections into one + let mut ro_length = text_section.sh_addr as usize + text_section_info.offset_range.len(); + let ro_slices = elf .section_headers .iter() - .filter_map(|section_header| { + .filter(|section_header| { if let Some(name) = elf.shdr_strtab.get_at(section_header.sh_name) { - if name == ".rodata" || name == ".data.rel.ro" || name == ".eh_frame" { - return Some(SectionInfo { - name: name.to_string(), - vaddr: section_header - .sh_addr - .saturating_add(ebpf::MM_PROGRAM_START), - offset_range: section_header.file_range().unwrap_or_default(), - }); - } + return name == ".rodata" || name == ".data.rel.ro" || name == ".eh_frame"; } - None + false }) - .collect::>(); - for ro_section_info in ro_section_infos.iter() { - if ro_section_info.vaddr > ebpf::MM_STACK_START { - return Err(ElfError::OutOfBounds); - } + .map(|section_header| { + let vaddr = section_header + .sh_addr + .saturating_add(ebpf::MM_PROGRAM_START); + if vaddr > ebpf::MM_STACK_START { + return Err(ElfError::OutOfBounds); + } + let slice = elf_bytes + .as_slice() + .get(section_header.file_range().unwrap_or_default()) + .ok_or(ElfError::OutOfBounds)?; + ro_length = ro_length.max(section_header.sh_addr as usize + slice.len()); + Ok((section_header.sh_addr as usize, slice)) + }) + .collect::, ElfError>>()?; + let mut ro_section = vec![0; ro_length]; + ro_section[text_section.sh_addr as usize + ..text_section.sh_addr as usize + text_section_info.offset_range.len()] + .copy_from_slice( + elf_bytes + .as_slice() + .get(text_section_info.offset_range.clone()) + .ok_or(ElfError::OutOfBounds)?, + ); + for (offset, slice) in ro_slices.iter() { + ro_section[*offset..*offset + slice.len()].copy_from_slice(slice); } Ok(Self { config, elf_bytes, + ro_section, text_section_info, - ro_section_infos, bpf_functions, syscall_symbols, syscall_registry, diff --git a/src/error.rs b/src/error.rs index 49788de9..7575bc6a 100644 --- a/src/error.rs +++ b/src/error.rs @@ -66,9 +66,9 @@ pub enum EbpfError { /// Invalid virtual address #[error("invalid virtual address {0:x?}")] InvalidVirtualAddress(u64), - /// Virtual address overlap - #[error("virtual address overlap {0:x?}")] - VirtualAddressOverlap(u64), + /// Memory region index or virtual address space is invalid + #[error("Invalid memory region at index {0}")] + InvalidMemoryRegion(usize), /// Access violation (general) #[error("Access violation in {4} section at address {2:#x} of size {3:?} by instruction #{0}")] AccessViolation(usize, AccessType, u64, u64, &'static str), diff --git a/src/jit.rs b/src/jit.rs index 26d284a4..002f2f37 100644 --- a/src/jit.rs +++ b/src/jit.rs @@ -26,7 +26,7 @@ use crate::{ vm::{Config, Executable, ProgramResult, InstructionMeter, Tracer, DynTraitFatPointer, SYSCALL_CONTEXT_OBJECTS_OFFSET, REPORT_UNRESOLVED_SYMBOL_INDEX}, ebpf::{self, INSN_SIZE, FIRST_SCRATCH_REG, SCRATCH_REGS, STACK_REG, MM_STACK_START}, error::{UserDefinedError, EbpfError}, - memory_region::{AccessType, MemoryMapping}, + memory_region::{AccessType, MemoryMapping, MemoryRegion}, user_error::UserError, x86::*, }; @@ -146,7 +146,7 @@ impl PartialEq for JitProgram { impl JitProgram { pub fn new(executable: &dyn Executable) -> Result> { - let program = executable.get_text_bytes()?.1; + let program = executable.get_text_bytes().1; let mut jit = JitCompiler::new::(program, executable.get_config())?; jit.compile::(executable)?; let main = unsafe { mem::transmute(jit.result.text_section.as_ptr()) }; @@ -561,7 +561,7 @@ fn emit_bpf_call(jit: &mut JitCompiler, dst: Value, number_ emit_alu(jit, OperandSize::S64, 0xc1, 5, REGISTER_MAP[STACK_REG], shift_amount as i64, None)?; X86Instruction::push(REGISTER_MAP[STACK_REG]).emit(jit)?; } - // Load host target_address from JitProgramArgument.instruction_addresses + // Load host target_address from jit_program_argument.instruction_addresses debug_assert_eq!(INSN_SIZE, 8); // Because the instruction size is also the slot size we do not need to shift the offset X86Instruction::mov(OperandSize::S64, REGISTER_MAP[0], REGISTER_MAP[STACK_REG]).emit(jit)?; X86Instruction::load_immediate(OperandSize::S64, REGISTER_MAP[STACK_REG], jit.result.pc_section.as_ptr() as i64).emit(jit)?; @@ -575,13 +575,13 @@ fn emit_bpf_call(jit: &mut JitCompiler, dst: Value, number_ } } + let stack_frame_size = jit.config.stack_frame_size as i64 * if jit.config.enable_stack_frame_gaps { 2 } else { 1 }; X86Instruction::load(OperandSize::S64, RBP, REGISTER_MAP[STACK_REG], X86IndirectAccess::Offset(slot_on_environment_stack(jit, EnvironmentStackSlot::BpfStackPtr))).emit(jit)?; - emit_alu(jit, OperandSize::S64, 0x81, 4, REGISTER_MAP[STACK_REG], !(jit.config.stack_frame_size as i64 * 2 - 1), None)?; // stack_ptr &= !(jit.config.stack_frame_size * 2 - 1); - emit_alu(jit, OperandSize::S64, 0x81, 0, REGISTER_MAP[STACK_REG], jit.config.stack_frame_size as i64 * 3, None)?; // stack_ptr += jit.config.stack_frame_size * 3; + emit_alu(jit, OperandSize::S64, 0x81, 0, REGISTER_MAP[STACK_REG], stack_frame_size, None)?; // stack_ptr += stack_frame_increment; X86Instruction::store(OperandSize::S64, REGISTER_MAP[STACK_REG], RBP, X86IndirectAccess::Offset(slot_on_environment_stack(jit, EnvironmentStackSlot::BpfStackPtr))).emit(jit)?; - // if(stack_ptr >= MM_STACK_START + jit.config.max_call_depth * jit.config.stack_frame_size * 2) throw EbpfError::CallDepthExeeded; - X86Instruction::load_immediate(OperandSize::S64, R11, MM_STACK_START as i64 + (jit.config.max_call_depth * jit.config.stack_frame_size * 2) as i64).emit(jit)?; + // if(stack_ptr >= MM_STACK_START + jit.config.stack_frame_size + jit.config.max_call_depth * stack_frame_size) throw EbpfError::CallDepthExeeded; + X86Instruction::load_immediate(OperandSize::S64, R11, MM_STACK_START as i64 + jit.config.stack_frame_size as i64 + (jit.config.max_call_depth as i64 * stack_frame_size)).emit(jit)?; X86Instruction::cmp(OperandSize::S64, R11, REGISTER_MAP[STACK_REG], None).emit(jit)?; // Store PC in case the bounds check fails X86Instruction::load_immediate(OperandSize::S64, R11, jit.pc as i64).emit(jit)?; @@ -806,11 +806,7 @@ fn emit_muldivmod(jit: &mut JitCompiler, opc: u8, src: u8, if (div || modrm) && imm.is_none() { // Save pc X86Instruction::load_immediate(OperandSize::S64, R11, jit.pc as i64).emit(jit)?; - - // test src,src - emit_alu(jit, size, 0x85, src, src, 0, None)?; - - // Jump if src is zero + X86Instruction::test(size, src, src, None).emit(jit)?; // src == 0 emit_jcc(jit, 0x84, TARGET_PC_DIV_BY_ZERO)?; } @@ -955,7 +951,7 @@ impl JitCompiler { }; } - let mut code_length_estimate = pc * 256 + 512; + let mut code_length_estimate = pc * 256 + 4096; code_length_estimate += (code_length_estimate as f64 * _config.noop_instruction_ratio) as usize; let mut rng = rand::thread_rng(); let (environment_stack_key, program_argument_key) = @@ -982,7 +978,7 @@ impl JitCompiler { fn compile(&mut self, executable: &dyn Executable) -> Result<(), EbpfError> { - let (program_vm_addr, program) = executable.get_text_bytes()?; + let (program_vm_addr, program) = executable.get_text_bytes(); self.program_vm_addr = program_vm_addr; self.generate_prologue::()?; @@ -1269,7 +1265,7 @@ impl JitCompiler { X86Instruction::load(OperandSize::S64, R10, RAX, X86IndirectAccess::Offset((SYSCALL_CONTEXT_OBJECTS_OFFSET + syscall.context_object_slot) as i32 * 8 + self.program_argument_key)).emit(self)?; emit_rust_call(self, syscall.function as *const u8, &[ Argument { index: 7, value: Value::RegisterIndirect(RBP, slot_on_environment_stack(self, EnvironmentStackSlot::OptRetValPtr), false) }, - Argument { index: 6, value: Value::RegisterPlusConstant32(R10, self.program_argument_key, false) }, // JitProgramArgument::memory_mapping + Argument { index: 6, value: Value::RegisterPlusConstant32(R10, self.program_argument_key, false) }, // jit_program_argument.memory_mapping Argument { index: 5, value: Value::Register(ARGUMENT_REGISTERS[5]) }, Argument { index: 4, value: Value::Register(ARGUMENT_REGISTERS[4]) }, Argument { index: 3, value: Value::Register(ARGUMENT_REGISTERS[3]) }, @@ -1317,16 +1313,14 @@ impl JitCompiler { ebpf::EXIT => { emit_validate_and_profile_instruction_count(self, true, Some(0))?; + let stack_frame_size = self.config.stack_frame_size as i64 * if self.config.enable_stack_frame_gaps { 2 } else { 1 }; X86Instruction::load(OperandSize::S64, RBP, REGISTER_MAP[STACK_REG], X86IndirectAccess::Offset(slot_on_environment_stack(self, EnvironmentStackSlot::BpfStackPtr))).emit(self)?; - emit_alu(self, OperandSize::S64, 0x81, 4, REGISTER_MAP[STACK_REG], !(self.config.stack_frame_size as i64 * 2 - 1), None)?; // stack_ptr &= !(jit.config.stack_frame_size * 2 - 1); - emit_alu(self, OperandSize::S64, 0x81, 5, REGISTER_MAP[STACK_REG], self.config.stack_frame_size as i64 * 2, None)?; // stack_ptr -= jit.config.stack_frame_size * 2; + emit_alu(self, OperandSize::S64, 0x81, 5, REGISTER_MAP[STACK_REG], stack_frame_size, None)?; // stack_ptr -= stack_frame_size; X86Instruction::store(OperandSize::S64, REGISTER_MAP[STACK_REG], RBP, X86IndirectAccess::Offset(slot_on_environment_stack(self, EnvironmentStackSlot::BpfStackPtr))).emit(self)?; - // if(stack_ptr < MM_STACK_START) goto exit; - X86Instruction::mov(OperandSize::S64, REGISTER_MAP[0], R11).emit(self)?; - X86Instruction::load_immediate(OperandSize::S64, REGISTER_MAP[0], MM_STACK_START as i64).emit(self)?; - X86Instruction::cmp(OperandSize::S64, REGISTER_MAP[0], REGISTER_MAP[STACK_REG], None).emit(self)?; - X86Instruction::mov(OperandSize::S64, R11, REGISTER_MAP[0]).emit(self)?; + // if(stack_ptr < MM_STACK_START + self.config.stack_frame_size) goto exit; + X86Instruction::load_immediate(OperandSize::S64, R11, MM_STACK_START as i64 + self.config.stack_frame_size as i64).emit(self)?; + X86Instruction::cmp(OperandSize::S64, R11, REGISTER_MAP[STACK_REG], None).emit(self)?; emit_jcc(self, 0x82, TARGET_PC_EXIT)?; // else return; @@ -1397,48 +1391,79 @@ impl JitCompiler { // Translates a vm memory address to a host memory address for (access_type, len) in &[ - (AccessType::Load, 1i64), - (AccessType::Load, 2i64), - (AccessType::Load, 4i64), - (AccessType::Load, 8i64), - (AccessType::Store, 1i64), - (AccessType::Store, 2i64), - (AccessType::Store, 4i64), - (AccessType::Store, 8i64), + (AccessType::Load, 1i32), + (AccessType::Load, 2i32), + (AccessType::Load, 4i32), + (AccessType::Load, 8i32), + (AccessType::Store, 1i32), + (AccessType::Store, 2i32), + (AccessType::Store, 4i32), + (AccessType::Store, 8i32), ] { let target_offset = len.trailing_zeros() as usize + 4 * (*access_type as usize); set_anchor(self, TARGET_PC_TRANSLATE_MEMORY_ADDRESS + target_offset); X86Instruction::push(R11).emit(self)?; - emit_rust_call(self, MemoryMapping::map:: as *const u8, &[ - Argument { index: 3, value: Value::Register(R11) }, // Specify first as the src register could be overwritten by other arguments - Argument { index: 4, value: Value::Constant64(*len, false) }, - Argument { index: 2, value: Value::Constant64(*access_type as i64, false) }, - Argument { index: 1, value: Value::RegisterPlusConstant32(R10, self.program_argument_key, false) }, // JitProgramArgument::memory_mapping - Argument { index: 0, value: Value::RegisterIndirect(RBP, slot_on_environment_stack(self, EnvironmentStackSlot::OptRetValPtr), false) }, // Pointer to optional typed return value - ], None, true)?; - - // Throw error if the result indicates one - emit_jcc(self, 0x85, TARGET_PC_MEMORY_ACCESS_VIOLATION + target_offset)?; - - // Store Ok value in result register - X86Instruction::load(OperandSize::S64, RBP, R11, X86IndirectAccess::Offset(slot_on_environment_stack(self, EnvironmentStackSlot::OptRetValPtr))).emit(self)?; - X86Instruction::load(OperandSize::S64, R11, R11, X86IndirectAccess::Offset(8)).emit(self)?; + X86Instruction::push(RAX).emit(self)?; + X86Instruction::push(RCX).emit(self)?; + let stack_offset = if self.config.enable_stack_frame_gaps { + X86Instruction::push(RDX).emit(self)?; + 24 + } else { + 16 + }; + X86Instruction::mov(OperandSize::S64, R11, RAX).emit(self)?; // RAX = vm_addr; + emit_alu(self, OperandSize::S64, 0xc1, 5, RAX, ebpf::VIRTUAL_ADDRESS_BITS as i64, None)?; // RAX >>= ebpf::VIRTUAL_ADDRESS_BITS; + X86Instruction::cmp(OperandSize::S64, RAX, R10, Some(X86IndirectAccess::Offset(self.program_argument_key + 8))).emit(self)?; // region_index >= jit_program_argument.memory_mapping.regions.len() + emit_jcc(self, 0x86, TARGET_PC_MEMORY_ACCESS_VIOLATION + target_offset)?; + debug_assert_eq!(1 << 5, std::mem::size_of::()); + emit_alu(self, OperandSize::S64, 0xc1, 4, RAX, 5, None)?; // RAX *= std::mem::size_of::(); + emit_alu(self, OperandSize::S64, 0x03, RAX, R10, 0, Some(X86IndirectAccess::Offset(self.program_argument_key)))?; // region = &jit_program_argument.memory_mapping.regions[region_index]; + if *access_type == AccessType::Store { + X86Instruction::cmp_immediate(OperandSize::S8, RAX, 0, Some(X86IndirectAccess::Offset(25))).emit(self)?; // region.is_writable == 0 + emit_jcc(self, 0x84, TARGET_PC_MEMORY_ACCESS_VIOLATION + target_offset)?; + } + X86Instruction::load_immediate(OperandSize::S64, RCX, (1i64 << ebpf::VIRTUAL_ADDRESS_BITS) - 1).emit(self)?; // RCX = (1 << ebpf::VIRTUAL_ADDRESS_BITS) - 1; + emit_alu(self, OperandSize::S64, 0x21, RCX, R11, 0, None)?; // R11 &= (1 << ebpf::VIRTUAL_ADDRESS_BITS) - 1; + if self.config.enable_stack_frame_gaps { + X86Instruction::load(OperandSize::S8, RAX, RCX, X86IndirectAccess::Offset(24)).emit(self)?; // RCX = region.vm_gap_shift; + X86Instruction::mov(OperandSize::S64, R11, RDX).emit(self)?; // RDX = R11; + emit_alu(self, OperandSize::S64, 0xd3, 5, RDX, 0, None)?; // RDX = R11 >> region.vm_gap_shift; + X86Instruction::test_immediate(OperandSize::S64, RDX, 1, None).emit(self)?; // (RDX & 1) != 0 + emit_jcc(self, 0x85, TARGET_PC_MEMORY_ACCESS_VIOLATION + target_offset)?; + X86Instruction::load_immediate(OperandSize::S64, RDX, -1).emit(self)?; // RDX = -1; + emit_alu(self, OperandSize::S64, 0xd3, 4, RDX, 0, None)?; // gap_mask = -1 << region.vm_gap_shift; + X86Instruction::mov(OperandSize::S64, RDX, RCX).emit(self)?; // RCX = RDX; + emit_alu(self, OperandSize::S64, 0xf7, 2, RCX, 0, None)?; // inverse_gap_mask = !gap_mask; + emit_alu(self, OperandSize::S64, 0x21, R11, RCX, 0, None)?; // below_gap = R11 & inverse_gap_mask; + emit_alu(self, OperandSize::S64, 0x21, RDX, R11, 0, None)?; // above_gap = R11 & gap_mask; + emit_alu(self, OperandSize::S64, 0xc1, 5, R11, 1, None)?; // above_gap >>= 1; + emit_alu(self, OperandSize::S64, 0x09, RCX, R11, 0, None)?; // gapped_offset = above_gap | below_gap; + } + X86Instruction::lea(OperandSize::S64, R11, RCX, Some(X86IndirectAccess::Offset(*len))).emit(self)?; // RCX = R11 + len; + X86Instruction::cmp(OperandSize::S8, RCX, RAX, Some(X86IndirectAccess::Offset(16))).emit(self)?; // region.len < R11 + len + emit_jcc(self, 0x82, TARGET_PC_MEMORY_ACCESS_VIOLATION + target_offset)?; + emit_alu(self, OperandSize::S64, 0x03, R11, RAX, 0, Some(X86IndirectAccess::Offset(0)))?; // R11 += region.host_addr; + if self.config.enable_stack_frame_gaps { + X86Instruction::pop(RDX).emit(self)?; + } + X86Instruction::pop(RCX).emit(self)?; + X86Instruction::pop(RAX).emit(self)?; emit_alu(self, OperandSize::S64, 0x81, 0, RSP, 8, None)?; X86Instruction::return_near().emit(self)?; set_anchor(self, TARGET_PC_MEMORY_ACCESS_VIOLATION + target_offset); emit_alu(self, OperandSize::S64, 0x31, R11, R11, 0, None)?; // R11 = 0; - X86Instruction::load(OperandSize::S64, RSP, R11, X86IndirectAccess::OffsetIndexShift(0, R11, 0)).emit(self)?; + X86Instruction::load(OperandSize::S64, RSP, R11, X86IndirectAccess::OffsetIndexShift(stack_offset, R11, 0)).emit(self)?; emit_rust_call(self, MemoryMapping::generate_access_violation:: as *const u8, &[ Argument { index: 3, value: Value::Register(R11) }, // Specify first as the src register could be overwritten by other arguments - Argument { index: 4, value: Value::Constant64(*len, false) }, + Argument { index: 4, value: Value::Constant64(*len as i64, false) }, Argument { index: 2, value: Value::Constant64(*access_type as i64, false) }, - Argument { index: 1, value: Value::RegisterPlusConstant32(R10, self.program_argument_key, false) }, // JitProgramArgument::memory_mapping + Argument { index: 1, value: Value::RegisterPlusConstant32(R10, self.program_argument_key, false) }, // jit_program_argument.memory_mapping Argument { index: 0, value: Value::RegisterIndirect(RBP, slot_on_environment_stack(self, EnvironmentStackSlot::OptRetValPtr), false) }, // Pointer to optional typed return value ], None, true)?; - X86Instruction::pop(R11).emit(self)?; - X86Instruction::pop(R11).emit(self)?; + emit_alu(self, OperandSize::S64, 0x81, 0, RSP, stack_offset as i64 + 8, None)?; // Drop R11, RAX, RCX, RDX from stack + X86Instruction::pop(R11).emit(self)?; // Put callers PC in R11 emit_call(self, TARGET_PC_TRANSLATE_PC)?; emit_jmp(self, TARGET_PC_EXCEPTION_AT)?; } diff --git a/src/memory_region.rs b/src/memory_region.rs index ea3a9b8b..6a05e4d5 100644 --- a/src/memory_region.rs +++ b/src/memory_region.rs @@ -23,7 +23,7 @@ pub struct MemoryRegion { } impl MemoryRegion { /// Creates a new MemoryRegion structure from a slice - pub fn new_from_slice(v: &[u8], vm_addr: u64, vm_gap_size: u64, is_writable: bool) -> Self { + pub fn new_from_slice(slice: &[u8], vm_addr: u64, vm_gap_size: u64, is_writable: bool) -> Self { let vm_gap_shift = if vm_gap_size > 0 { let vm_gap_shift = std::mem::size_of::() as u8 * 8 - vm_gap_size.leading_zeros() as u8 - 1; @@ -33,9 +33,9 @@ impl MemoryRegion { std::mem::size_of::() as u8 * 8 - 1 }; MemoryRegion { - host_addr: v.as_ptr() as u64, + host_addr: slice.as_ptr() as u64, vm_addr, - len: v.len() as u64, + len: slice.len() as u64, vm_gap_shift, is_writable, } @@ -48,13 +48,13 @@ impl MemoryRegion { vm_addr: u64, len: u64, ) -> Result> { - let mut begin_offset = vm_addr - self.vm_addr; + let begin_offset = vm_addr - self.vm_addr; let is_in_gap = ((begin_offset >> self.vm_gap_shift as u32) & 1) == 1; - let gap_mask = (1 << self.vm_gap_shift) - 1; - begin_offset = (begin_offset & !gap_mask) >> 1 | (begin_offset & gap_mask); - if let Some(end_offset) = begin_offset.checked_add(len as u64) { + let gap_mask = (-1i64 << self.vm_gap_shift) as u64; + let gapped_offset = (begin_offset & gap_mask) >> 1 | (begin_offset & !gap_mask); + if let Some(end_offset) = gapped_offset.checked_add(len as u64) { if end_offset <= self.len && !is_in_gap { - return Ok(self.host_addr + begin_offset); + return Ok(self.host_addr + gapped_offset); } } Err(EbpfError::InvalidVirtualAddress(vm_addr)) @@ -95,49 +95,31 @@ pub enum AccessType { /// Indirection to use instead of a slice to make handling easier pub struct MemoryMapping<'a> { - /// Mapped (valid) regions + /// Mapped memory regions regions: Box<[MemoryRegion]>, - /// Copy of the regions vm_addr fields to improve cache density - dense_keys: Box<[u64]>, /// VM configuration config: &'a Config, } impl<'a> MemoryMapping<'a> { - fn construct_eytzinger_order( - &mut self, - ascending_regions: &[MemoryRegion], - mut in_index: usize, - out_index: usize, - ) -> usize { - if out_index >= self.regions.len() { - return in_index; - } - in_index = self.construct_eytzinger_order(ascending_regions, in_index, 2 * out_index + 1); - self.regions[out_index] = ascending_regions[in_index].clone(); - self.dense_keys[out_index] = ascending_regions[in_index].vm_addr; - self.construct_eytzinger_order(ascending_regions, in_index + 1, 2 * out_index + 2) - } - /// Creates a new MemoryMapping structure from the given regions pub fn new( mut regions: Vec, config: &'a Config, ) -> Result> { - let mut result = Self { - regions: vec![MemoryRegion::default(); regions.len()].into_boxed_slice(), - dense_keys: vec![0; regions.len()].into_boxed_slice(), - config, - }; regions.sort(); - for index in 1..regions.len() { - let first = ®ions[index - 1]; - let second = ®ions[index]; - if first.vm_addr.saturating_add(first.len) > second.vm_addr { - return Err(EbpfError::VirtualAddressOverlap(second.vm_addr)); + for (index, region) in regions.iter().enumerate() { + if region.vm_addr != (index as u64) << ebpf::VIRTUAL_ADDRESS_BITS + || (region.len > 0 + && ((region.vm_addr + region.len - 1) >> ebpf::VIRTUAL_ADDRESS_BITS) as usize + != index) + { + return Err(EbpfError::InvalidMemoryRegion(index)); } } - result.construct_eytzinger_order(®ions, 0, 0); - Ok(result) + Ok(Self { + regions: regions.into_boxed_slice(), + config, + }) } /// Given a list of regions translate from virtual machine to host address @@ -147,18 +129,13 @@ impl<'a> MemoryMapping<'a> { vm_addr: u64, len: u64, ) -> Result> { - let mut index = 1; - while index <= self.dense_keys.len() { - index = (index << 1) + (self.dense_keys[index - 1] <= vm_addr) as usize; - } - index >>= index.trailing_zeros() + 1; - if index == 0 { - return self.generate_access_violation(access_type, vm_addr, len); - } - let region = &self.regions[index - 1]; - if access_type == AccessType::Load || region.is_writable { - if let Ok(host_addr) = region.vm_to_host::(vm_addr, len as u64) { - return Ok(host_addr); + let index = (vm_addr >> ebpf::VIRTUAL_ADDRESS_BITS) as usize; + if (1..self.regions.len()).contains(&index) { + let region = &self.regions[index]; + if access_type == AccessType::Load || region.is_writable { + if let Ok(host_addr) = region.vm_to_host::(vm_addr, len as u64) { + return Ok(host_addr); + } } } self.generate_access_violation(access_type, vm_addr, len) @@ -205,12 +182,13 @@ impl<'a> MemoryMapping<'a> { index: usize, new_len: u64, ) -> Result<(), EbpfError> { - if index < self.regions.len() - 1 - && self.regions[index].vm_addr.saturating_add(new_len) > self.regions[index + 1].vm_addr + if index >= self.regions.len() + || (new_len > 0 + && ((self.regions[index].vm_addr + new_len - 1) >> ebpf::VIRTUAL_ADDRESS_BITS) + as usize + != index) { - return Err(EbpfError::VirtualAddressOverlap( - self.regions[index + 1].vm_addr, - )); + return Err(EbpfError::InvalidMemoryRegion(index)); } self.regions[index].len = new_len; Ok(()) diff --git a/src/static_analysis.rs b/src/static_analysis.rs index 4b03b713..f16e2857 100644 --- a/src/static_analysis.rs +++ b/src/static_analysis.rs @@ -113,7 +113,7 @@ pub struct Analysis<'a, E: UserDefinedError, I: InstructionMeter> { impl<'a, E: UserDefinedError, I: InstructionMeter> Analysis<'a, E, I> { /// Analyze an executable statically pub fn from_executable(executable: &'a dyn Executable) -> Self { - let (_program_vm_addr, program) = executable.get_text_bytes().unwrap(); + let (_program_vm_addr, program) = executable.get_text_bytes(); let functions = executable.get_function_symbols(); debug_assert!( program.len() % ebpf::INSN_SIZE == 0, diff --git a/src/syscalls.rs b/src/syscalls.rs index bdd69353..dc28e083 100644 --- a/src/syscalls.rs +++ b/src/syscalls.rs @@ -55,7 +55,7 @@ pub const BPF_KTIME_GETNS_IDX: u32 = 5; /// /// let mut result: Result = Ok(0); /// let config = Config::default(); -/// let memory_mapping = MemoryMapping::new::(vec![MemoryRegion::default()], &config).unwrap(); +/// let memory_mapping = MemoryMapping::new::(vec![], &config).unwrap(); /// BpfTimeGetNs::call(&mut BpfTimeGetNs {}, 0, 0, 0, 0, 0, &memory_mapping, &mut result); /// let t = result.unwrap(); /// let d = t / 10u64.pow(9) / 60 / 60 / 24; @@ -103,7 +103,7 @@ pub const BPF_TRACE_PRINTK_IDX: u32 = 6; /// /// let mut result: Result = Ok(0); /// let config = Config::default(); -/// let memory_mapping = MemoryMapping::new::(vec![MemoryRegion::default()], &config).unwrap(); +/// let memory_mapping = MemoryMapping::new::(vec![], &config).unwrap(); /// BpfTracePrintf::call(&mut BpfTracePrintf {}, 0, 0, 1, 15, 32, &memory_mapping, &mut result); /// assert_eq!(result.unwrap() as usize, "BpfTracePrintf: 0x1, 0xf, 0x20\n".len()); /// ``` @@ -172,7 +172,7 @@ impl SyscallObject for BpfTracePrintf { /// /// let mut result: Result = Ok(0); /// let config = Config::default(); -/// let memory_mapping = MemoryMapping::new::(vec![MemoryRegion::default()], &config).unwrap(); +/// let memory_mapping = MemoryMapping::new::(vec![], &config).unwrap(); /// BpfGatherBytes::call(&mut BpfGatherBytes {}, 0x11, 0x22, 0x33, 0x44, 0x55, &memory_mapping, &mut result); /// assert_eq!(result.unwrap(), 0x1122334455); /// ``` @@ -211,11 +211,11 @@ impl SyscallObject for BpfGatherBytes { /// use solana_rbpf::user_error::UserError; /// /// let val = vec![0x00, 0x00, 0x00, 0x00, 0x00, 0x11, 0x22, 0x33]; -/// let val_va = 0x1000; +/// let val_va = 0x100000000; /// /// let mut result: Result = Ok(0); /// let config = Config::default(); -/// let memory_mapping = MemoryMapping::new::(vec![MemoryRegion::new_from_slice(&val, val_va, 0, true)], &config).unwrap(); +/// let memory_mapping = MemoryMapping::new::(vec![MemoryRegion::default(), MemoryRegion::new_from_slice(&val, val_va, 0, true)], &config).unwrap(); /// BpfMemFrob::call(&mut BpfMemFrob {}, val_va, 8, 0, 0, 0, &memory_mapping, &mut result); /// assert_eq!(val, vec![0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x3b, 0x08, 0x19]); /// BpfMemFrob::call(&mut BpfMemFrob {}, val_va, 8, 0, 0, 0, &memory_mapping, &mut result); @@ -257,7 +257,7 @@ impl SyscallObject for BpfMemFrob { /// /// let mut result: Result = Ok(0); /// let config = Config::default(); -/// let memory_mapping = MemoryMapping::new::(vec![MemoryRegion::default()], &config).unwrap(); +/// let memory_mapping = MemoryMapping::new::(vec![], &config).unwrap(); /// BpfSqrtI::call(&mut BpfSqrtI {}, 9, 0, 0, 0, 0, &memory_mapping, &mut result); /// assert_eq!(result.unwrap(), 3); /// ``` @@ -288,17 +288,17 @@ impl SyscallObject for BpfSqrtI { /// /// let foo = "This is a string."; /// let bar = "This is another sting."; -/// let va_foo = 0x1000; -/// let va_bar = 0x2000; +/// let va_foo = 0x100000000; +/// let va_bar = 0x200000000; /// use solana_rbpf::user_error::UserError; /// /// let mut result: Result = Ok(0); /// let config = Config::default(); -/// let memory_mapping = MemoryMapping::new::(vec![MemoryRegion::new_from_slice(foo.as_bytes(), va_foo, 0, false)], &config).unwrap(); +/// let memory_mapping = MemoryMapping::new::(vec![MemoryRegion::default(), MemoryRegion::new_from_slice(foo.as_bytes(), va_foo, 0, false)], &config).unwrap(); /// BpfStrCmp::call(&mut BpfStrCmp {}, va_foo, va_foo, 0, 0, 0, &memory_mapping, &mut result); /// assert!(result.unwrap() == 0); /// let mut result: Result = Ok(0); -/// let memory_mapping = MemoryMapping::new::(vec![MemoryRegion::new_from_slice(foo.as_bytes(), va_foo, 0, false), MemoryRegion::new_from_slice(bar.as_bytes(), va_bar, 0, false)], &config).unwrap(); +/// let memory_mapping = MemoryMapping::new::(vec![MemoryRegion::default(), MemoryRegion::new_from_slice(foo.as_bytes(), va_foo, 0, false), MemoryRegion::new_from_slice(bar.as_bytes(), va_bar, 0, false)], &config).unwrap(); /// BpfStrCmp::call(&mut BpfStrCmp {}, va_foo, va_bar, 0, 0, 0, &memory_mapping, &mut result); /// assert!(result.unwrap() != 0); /// ``` @@ -365,7 +365,7 @@ impl SyscallObject for BpfStrCmp { /// /// let mut result: Result = Ok(0); /// let config = Config::default(); -/// let memory_mapping = MemoryMapping::new::(vec![MemoryRegion::default()], &config).unwrap(); +/// let memory_mapping = MemoryMapping::new::(vec![], &config).unwrap(); /// BpfRand::call(&mut BpfRand {}, 3, 6, 0, 0, 0, &memory_mapping, &mut result); /// let n = result.unwrap(); /// assert!(3 <= n && n <= 6); diff --git a/src/vm.rs b/src/vm.rs index 50f4c731..4143755f 100644 --- a/src/vm.rs +++ b/src/vm.rs @@ -179,6 +179,8 @@ pub struct Config { pub max_call_depth: usize, /// Size of a stack frame in bytes, must match the size specified in the LLVM BPF backend pub stack_frame_size: usize, + /// Enables gaps in VM address space between the stack frames + pub enable_stack_frame_gaps: bool, /// Maximal pc distance after which a new instruction meter validation is emitted by the JIT pub instruction_meter_checkpoint_distance: usize, /// Enable instruction meter and limiting @@ -201,6 +203,7 @@ impl Default for Config { Self { max_call_depth: 20, stack_frame_size: 4_096, + enable_stack_frame_gaps: true, instruction_meter_checkpoint_distance: 10000, enable_instruction_meter: true, enable_instruction_tracing: false, @@ -218,9 +221,9 @@ pub trait Executable: Send + Sync { /// Get the configuration settings fn get_config(&self) -> &Config; /// Get the .text section virtual address and bytes - fn get_text_bytes(&self) -> Result<(u64, &[u8]), EbpfError>; - /// Get a vector of virtual addresses for each read-only section - fn get_ro_sections(&self) -> Result, EbpfError>; + fn get_text_bytes(&self) -> (u64, &[u8]); + /// Get the concatenated read-only sections (including the text section) + fn get_ro_section(&self) -> &[u8]; /// Get the entry point offset into the text section fn get_entrypoint_instruction_offset(&self) -> Result>; /// Get a symbol's instruction offset @@ -243,7 +246,7 @@ pub trait Executable: Send + Sync { pub const REPORT_UNRESOLVED_SYMBOL_INDEX: usize = 8; /// The syscall_context_objects field stores some metadata in the front, thus the entries are shifted -pub const SYSCALL_CONTEXT_OBJECTS_OFFSET: usize = 6; +pub const SYSCALL_CONTEXT_OBJECTS_OFFSET: usize = 4; /// Static constructors for Executable impl dyn Executable { @@ -255,7 +258,7 @@ impl dyn Executable { syscall_registry: SyscallRegistry, ) -> Result, EbpfError> { let ebpf_elf = EBpfElf::load(config, elf_bytes, syscall_registry)?; - let text_bytes = ebpf_elf.get_text_bytes()?.1; + let text_bytes = ebpf_elf.get_text_bytes().1; if let Some(verifier) = verifier { verifier(text_bytes, &config)?; } @@ -454,7 +457,7 @@ macro_rules! translate_memory_access { /// let mut bpf_functions = std::collections::BTreeMap::new(); /// register_bpf_function(&mut bpf_functions, 0, "entrypoint").unwrap(); /// let mut executable = >::from_text_bytes(prog, None, Config::default(), SyscallRegistry::default(), bpf_functions).unwrap(); -/// let mut vm = EbpfVm::::new(executable.as_ref(), mem, &[]).unwrap(); +/// let mut vm = EbpfVm::::new(executable.as_ref(), &mut [], mem).unwrap(); /// /// // Provide a reference to the packet data. /// let res = vm.execute_program_interpreted(&mut TestInstructionMeter { remaining: 1 }).unwrap(); @@ -468,7 +471,7 @@ pub struct EbpfVm<'a, E: UserDefinedError, I: InstructionMeter> { tracer: Tracer, syscall_context_objects: Vec<*mut u8>, syscall_context_object_pool: Vec + 'a>>, - frames: CallFrames, + stack: CallFrames<'a>, last_insn_count: u64, total_insn_count: u64, } @@ -490,42 +493,24 @@ impl<'a, E: UserDefinedError, I: InstructionMeter> EbpfVm<'a, E, I> { /// let mut bpf_functions = std::collections::BTreeMap::new(); /// register_bpf_function(&mut bpf_functions, 0, "entrypoint").unwrap(); /// let mut executable = >::from_text_bytes(prog, None, Config::default(), SyscallRegistry::default(), bpf_functions).unwrap(); - /// let mut vm = EbpfVm::::new(executable.as_ref(), &mut [], &[]).unwrap(); + /// let mut vm = EbpfVm::::new(executable.as_ref(), &mut [], &mut []).unwrap(); /// ``` pub fn new( executable: &'a dyn Executable, - mem: &mut [u8], - granted_regions: &[MemoryRegion], + heap_region: &mut [u8], + input_region: &mut [u8], ) -> Result, EbpfError> { let config = executable.get_config(); - let const_data_regions: Vec = - if let Ok(sections) = executable.get_ro_sections() { - sections - .iter() - .map(|(addr, slice)| MemoryRegion::new_from_slice(slice, *addr, 0, false)) - .collect() - } else { - Vec::new() - }; - let mut regions: Vec = - Vec::with_capacity(granted_regions.len() + const_data_regions.len() + 3); - regions.extend(granted_regions.iter().cloned()); - let frames = CallFrames::new(config.max_call_depth, config.stack_frame_size); - regions.push(frames.get_region().clone()); - regions.extend(const_data_regions); - regions.push(MemoryRegion::new_from_slice( - mem, - ebpf::MM_INPUT_START, - 0, - true, - )); - let (program_vm_addr, program) = executable.get_text_bytes()?; - regions.push(MemoryRegion::new_from_slice( - program, - program_vm_addr, - 0, - false, - )); + let ro_region = executable.get_ro_section(); + let stack = CallFrames::new(config); + let regions: Vec = vec![ + MemoryRegion::new_from_slice(&[], 0, 0, false), + MemoryRegion::new_from_slice(ro_region, ebpf::MM_PROGRAM_START, 0, false), + stack.get_memory_region(), + MemoryRegion::new_from_slice(heap_region, ebpf::MM_HEAP_START, 0, true), + MemoryRegion::new_from_slice(input_region, ebpf::MM_INPUT_START, 0, true), + ]; + let (program_vm_addr, program) = executable.get_text_bytes(); let number_of_syscalls = executable.get_syscall_registry().get_number_of_syscalls(); let mut vm = EbpfVm { executable, @@ -538,7 +523,7 @@ impl<'a, E: UserDefinedError, I: InstructionMeter> EbpfVm<'a, E, I> { SYSCALL_CONTEXT_OBJECTS_OFFSET + number_of_syscalls ], syscall_context_object_pool: Vec::with_capacity(number_of_syscalls), - frames, + stack, last_insn_count: 0, total_insn_count: 0, }; @@ -598,7 +583,7 @@ impl<'a, E: UserDefinedError, I: InstructionMeter> EbpfVm<'a, E, I> { /// let mut bpf_functions = std::collections::BTreeMap::new(); /// register_bpf_function(&mut bpf_functions, 0, "entrypoint").unwrap(); /// let mut executable = >::from_text_bytes(prog, None, Config::default(), syscall_registry, bpf_functions).unwrap(); - /// let mut vm = EbpfVm::::new(executable.as_ref(), &mut [], &[]).unwrap(); + /// let mut vm = EbpfVm::::new(executable.as_ref(), &mut [], &mut []).unwrap(); /// // Bind a context object instance to the previously registered syscall /// vm.bind_syscall_context_object(Box::new(BpfTracePrintf {}), None); /// ``` @@ -662,7 +647,7 @@ impl<'a, E: UserDefinedError, I: InstructionMeter> EbpfVm<'a, E, I> { /// let mut bpf_functions = std::collections::BTreeMap::new(); /// register_bpf_function(&mut bpf_functions, 0, "entrypoint").unwrap(); /// let mut executable = >::from_text_bytes(prog, None, Config::default(), SyscallRegistry::default(), bpf_functions).unwrap(); - /// let mut vm = EbpfVm::::new(executable.as_ref(), mem, &[]).unwrap(); + /// let mut vm = EbpfVm::::new(executable.as_ref(), &mut [], mem).unwrap(); /// /// // Provide a reference to the packet data. /// let res = vm.execute_program_interpreted(&mut TestInstructionMeter { remaining: 1 }).unwrap(); @@ -690,7 +675,7 @@ impl<'a, E: UserDefinedError, I: InstructionMeter> EbpfVm<'a, E, I> { const U32MAX: u64 = u32::MAX as u64; // R1 points to beginning of input memory, R10 to the stack of the first frame - let mut reg: [u64; 11] = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, self.frames.get_stack_top()]; + let mut reg: [u64; 11] = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, self.stack.get_stack_top()]; if self.memory_mapping.map::(AccessType::Store, ebpf::MM_INPUT_START, 1).is_ok() { reg[1] = ebpf::MM_INPUT_START; @@ -962,7 +947,7 @@ impl<'a, E: UserDefinedError, I: InstructionMeter> EbpfVm<'a, E, I> { ebpf::CALL_REG => { let target_address = reg[insn.imm as usize]; reg[ebpf::STACK_REG] = - self.frames.push(®[ebpf::FIRST_SCRATCH_REG..ebpf::FIRST_SCRATCH_REG + ebpf::SCRATCH_REGS], next_pc)?; + self.stack.push(®[ebpf::FIRST_SCRATCH_REG..ebpf::FIRST_SCRATCH_REG + ebpf::SCRATCH_REGS], next_pc)?; if target_address < self.program_vm_addr { return Err(EbpfError::CallOutsideTextSegment(pc + ebpf::ELF_INSN_DUMP_OFFSET, target_address / ebpf::INSN_SIZE as u64 * ebpf::INSN_SIZE as u64)); } @@ -995,7 +980,7 @@ impl<'a, E: UserDefinedError, I: InstructionMeter> EbpfVm<'a, E, I> { } } else if let Some(target_pc) = self.executable.lookup_bpf_function(insn.imm as u32) { // make BPF to BPF call - reg[ebpf::STACK_REG] = self.frames.push( + reg[ebpf::STACK_REG] = self.stack.push( ®[ebpf::FIRST_SCRATCH_REG ..ebpf::FIRST_SCRATCH_REG + ebpf::SCRATCH_REGS], next_pc, @@ -1007,7 +992,7 @@ impl<'a, E: UserDefinedError, I: InstructionMeter> EbpfVm<'a, E, I> { } ebpf::EXIT => { - match self.frames.pop::() { + match self.stack.pop::() { Ok((saved_reg, stack_ptr, ptr)) => { // Return from BPF to BPF call reg[ebpf::FIRST_SCRATCH_REG @@ -1020,7 +1005,7 @@ impl<'a, E: UserDefinedError, I: InstructionMeter> EbpfVm<'a, E, I> { debug!("BPF instructions executed (interp): {:?}", total_insn_count + self.last_insn_count); debug!( "Max frame depth reached: {:?}", - self.frames.get_max_frame_index() + self.stack.get_max_frame_index() ); return Ok(reg[0]); } diff --git a/tests/assembler.rs b/tests/assembler.rs index d6812744..d36afb53 100644 --- a/tests/assembler.rs +++ b/tests/assembler.rs @@ -22,7 +22,7 @@ fn asm(src: &str) -> Result, String> { Config::default(), SyscallRegistry::default(), )?; - let (_program_vm_addr, program) = executable.get_text_bytes().unwrap(); + let (_program_vm_addr, program) = executable.get_text_bytes(); Ok((0..program.len() / ebpf::INSN_SIZE) .map(|insn_ptr| ebpf::get_insn(program, insn_ptr)) .collect()) @@ -581,7 +581,7 @@ fn test_tcp_sack() { SyscallRegistry::default(), ) .unwrap(); - let (_program_vm_addr, program) = executable.get_text_bytes().unwrap(); + let (_program_vm_addr, program) = executable.get_text_bytes(); assert_eq!(program, TCP_SACK_BIN.to_vec()); } diff --git a/tests/misc.rs b/tests/misc.rs index fc4caa4a..ca6c001b 100644 --- a/tests/misc.rs +++ b/tests/misc.rs @@ -127,7 +127,7 @@ fn test_fuzz_execute() { let mut vm = EbpfVm::::new( executable.as_ref(), &mut [], - &[], + &mut [], ) .unwrap(); vm.bind_syscall_context_object(Box::new(BpfSyscallString {}), None) diff --git a/tests/ubpf_execution.rs b/tests/ubpf_execution.rs index d958e398..81462859 100644 --- a/tests/ubpf_execution.rs +++ b/tests/ubpf_execution.rs @@ -36,7 +36,7 @@ macro_rules! test_interpreter_and_jit { let check_closure = $check; let (instruction_count_interpreter, _tracer_interpreter) = { let mut mem = $mem; - let mut vm = EbpfVm::new($executable.as_ref(), &mut mem, &[]).unwrap(); + let mut vm = EbpfVm::new($executable.as_ref(), &mut [], &mut mem).unwrap(); $(test_interpreter_and_jit!(bind, vm, $location => $syscall_function; $syscall_context_object);)* let result = vm.execute_program_interpreted(&mut TestInstructionMeter { remaining: $expected_instruction_count }); assert!(check_closure(&vm, result)); @@ -47,7 +47,7 @@ macro_rules! test_interpreter_and_jit { let check_closure = $check; let compilation_result = $executable.jit_compile(); let mut mem = $mem; - let mut vm = EbpfVm::new($executable.as_ref(), &mut mem, &[]).unwrap(); + let mut vm = EbpfVm::new($executable.as_ref(), &mut [], &mut mem).unwrap(); match compilation_result { Err(err) => assert!(check_closure(&vm, Err(err))), Ok(()) => { @@ -2738,7 +2738,7 @@ impl SyscallObject for NestedVmSyscall { { executable.jit_compile().unwrap(); } - let mut vm = EbpfVm::new(executable.as_ref(), mem, &[]).unwrap(); + let mut vm = EbpfVm::new(executable.as_ref(), &mut [], mem).unwrap(); vm.bind_syscall_context_object(Box::new(NestedVmSyscall {}), None) .unwrap(); let mut instruction_meter = TestInstructionMeter { remaining: 6 }; @@ -3170,7 +3170,7 @@ fn test_err_unresolved_elf() { file.read_to_end(&mut elf).unwrap(); let config = Config { reject_unresolved_syscalls: true, - ..Default::default() + ..Config::default() }; assert!( matches!(>::from_elf(&elf, None, config, syscall_registry), Err(EbpfError::ElfError(ElfError::UnresolvedSymbol(symbol, pc, offset))) if symbol == "log_64" && pc == 550 && offset == 4168) @@ -3416,7 +3416,7 @@ fn execute_generated_program(prog: &[u8]) -> bool { } let (instruction_count_interpreter, tracer_interpreter, result_interpreter) = { let mut mem = vec![0u8; mem_size]; - let mut vm = EbpfVm::new(executable.as_ref(), &mut mem, &[]).unwrap(); + let mut vm = EbpfVm::new(executable.as_ref(), &mut [], &mut mem).unwrap(); let result_interpreter = vm.execute_program_interpreted(&mut TestInstructionMeter { remaining: max_instruction_count, }); @@ -3428,7 +3428,7 @@ fn execute_generated_program(prog: &[u8]) -> bool { ) }; let mut mem = vec![0u8; mem_size]; - let mut vm = EbpfVm::new(executable.as_ref(), &mut mem, &[]).unwrap(); + let mut vm = EbpfVm::new(executable.as_ref(), &mut [], &mut mem).unwrap(); let result_jit = vm.execute_program_jit(&mut TestInstructionMeter { remaining: max_instruction_count, }); diff --git a/tests/ubpf_verifier.rs b/tests/ubpf_verifier.rs index d0edd75c..c51e1b95 100644 --- a/tests/ubpf_verifier.rs +++ b/tests/ubpf_verifier.rs @@ -51,8 +51,8 @@ fn test_verifier_success() { SyscallRegistry::default(), ) .unwrap(); - let _vm = - EbpfVm::::new(executable.as_ref(), &mut [], &[]).unwrap(); + let _vm = EbpfVm::::new(executable.as_ref(), &mut [], &mut []) + .unwrap(); } #[test]