diff --git a/src/debugger.rs b/src/debugger.rs index 81cfbe0b..b33ae1a2 100644 --- a/src/debugger.rs +++ b/src/debugger.rs @@ -153,8 +153,13 @@ fn get_host_ptr( interpreter: &mut Interpreter, mut vm_addr: u64, ) -> Result<*mut u8, EbpfError> { - if vm_addr < ebpf::MM_PROGRAM_START { - vm_addr += ebpf::MM_PROGRAM_START; + if !interpreter + .executable + .get_sbpf_version() + .enable_lower_bytecode_vaddr() + && vm_addr < ebpf::MM_RODATA_START + { + vm_addr += ebpf::MM_RODATA_START; } match interpreter.vm.memory_mapping.map( AccessType::Load, diff --git a/src/ebpf.rs b/src/ebpf.rs index 9c443092..81d929ef 100644 --- a/src/ebpf.rs +++ b/src/ebpf.rs @@ -41,17 +41,18 @@ pub const HOST_ALIGN: usize = 16; /// Upper half of a pointer is the region index, lower half the virtual address inside that region. pub const VIRTUAL_ADDRESS_BITS: usize = 32; -// Memory map regions virtual addresses need to be (1 << VIRTUAL_ADDRESS_BITS) bytes apart. -// Also the region at index 0 should be skipped to catch NULL ptr accesses. - -/// Start of the program bits (text and ro segments) in the memory map -pub const MM_PROGRAM_START: u64 = 0x100000000; -/// Start of the stack in the memory map -pub const MM_STACK_START: u64 = 0x200000000; -/// Start of the heap in the memory map -pub const MM_HEAP_START: u64 = 0x300000000; -/// Start of the input buffers in the memory map -pub const MM_INPUT_START: u64 = 0x400000000; +/// Size (and alignment) of a memory region +pub const MM_REGION_SIZE: u64 = 1 << VIRTUAL_ADDRESS_BITS; +/// Virtual address of the bytecode region (not available in SBPFv1) +pub const MM_BYTECODE_START: u64 = 0; +/// Virtual address of the readonly data region (also contains the bytecode in SBPFv1) +pub const MM_RODATA_START: u64 = MM_REGION_SIZE; +/// Virtual address of the stack region +pub const MM_STACK_START: u64 = MM_REGION_SIZE * 2; +/// Virtual address of the heap region +pub const MM_HEAP_START: u64 = MM_REGION_SIZE * 3; +/// Virtual address of the input region +pub const MM_INPUT_START: u64 = MM_REGION_SIZE * 4; // eBPF op codes. // See also https://www.kernel.org/doc/Documentation/networking/filter.txt diff --git a/src/elf.rs b/src/elf.rs index f907705c..287f53e7 100644 --- a/src/elf.rs +++ b/src/elf.rs @@ -217,14 +217,14 @@ impl BpfRelocationType { pub(crate) enum Section { /// Owned section data. /// - /// The first field is the offset of the section from MM_PROGRAM_START. The - /// second field is the actual section data. + /// The first field is virtual address of the section. + /// The second field is the actual section data. Owned(usize, Vec), /// Borrowed section data. /// - /// The first field is the offset of the section from MM_PROGRAM_START. The - /// second field an be used to index the input ELF buffer to retrieve the - /// section data. + /// The first field is virtual address of the section. + /// The second field can be used to index the input ELF buffer to + /// retrieve the section data. Borrowed(usize, Range), } @@ -355,8 +355,12 @@ impl Executable { Ok(Self { elf_bytes, sbpf_version, - ro_section: Section::Borrowed(ebpf::MM_PROGRAM_START as usize, 0..text_bytes.len()), - text_section_vaddr: ebpf::MM_PROGRAM_START, + ro_section: Section::Borrowed(ebpf::MM_RODATA_START as usize, 0..text_bytes.len()), + text_section_vaddr: if sbpf_version.enable_lower_bytecode_vaddr() { + ebpf::MM_BYTECODE_START + } else { + ebpf::MM_RODATA_START + }, text_section_range: 0..text_bytes.len(), entry_pc, function_registry, @@ -400,10 +404,10 @@ impl Executable { // calculate the text section info let text_section = get_section(elf, b".text")?; let text_section_vaddr = - if sbpf_version.enable_elf_vaddr() && text_section.sh_addr >= ebpf::MM_PROGRAM_START { + if sbpf_version.enable_elf_vaddr() && text_section.sh_addr >= ebpf::MM_RODATA_START { text_section.sh_addr } else { - text_section.sh_addr.saturating_add(ebpf::MM_PROGRAM_START) + text_section.sh_addr.saturating_add(ebpf::MM_RODATA_START) }; let vaddr_end = if sbpf_version.reject_rodata_stack_overlap() { text_section_vaddr.saturating_add(text_section.sh_size) @@ -640,7 +644,7 @@ impl Executable { // If sbpf_version.enable_elf_vaddr()=true, we allow section_addr > // sh_offset, if section_addr - sh_offset is constant across all // sections. That is, we allow the linker to align rodata to a - // positive base address (MM_PROGRAM_START) as long as the mapping + // positive base address (MM_RODATA_START) as long as the mapping // to sh_offset(s) stays linear. // // If sbpf_version.enable_elf_vaddr()=false, section_addr must match @@ -667,10 +671,10 @@ impl Executable { } let mut vaddr_end = - if sbpf_version.enable_elf_vaddr() && section_addr >= ebpf::MM_PROGRAM_START { + if sbpf_version.enable_elf_vaddr() && section_addr >= ebpf::MM_RODATA_START { section_addr } else { - section_addr.saturating_add(ebpf::MM_PROGRAM_START) + section_addr.saturating_add(ebpf::MM_RODATA_START) }; if sbpf_version.reject_rodata_stack_overlap() { vaddr_end = vaddr_end.saturating_add(section_header.sh_size); @@ -715,17 +719,17 @@ impl Executable { let buf_offset_end = highest_addr.saturating_sub(addr_file_offset.unwrap_or(0) as usize); - let addr_offset = if lowest_addr >= ebpf::MM_PROGRAM_START as usize { + let addr_offset = if lowest_addr >= ebpf::MM_RODATA_START as usize { // The first field of Section::Borrowed is an offset from - // ebpf::MM_PROGRAM_START so if the linker has already put the - // sections within ebpf::MM_PROGRAM_START, we need to subtract + // ebpf::MM_RODATA_START so if the linker has already put the + // sections within ebpf::MM_RODATA_START, we need to subtract // it now. lowest_addr } else { if sbpf_version.enable_elf_vaddr() { return Err(ElfError::ValueOutOfBounds); } - lowest_addr.saturating_add(ebpf::MM_PROGRAM_START as usize) + lowest_addr.saturating_add(ebpf::MM_RODATA_START as usize) }; Section::Borrowed(addr_offset, buf_offset_start..buf_offset_end) @@ -734,14 +738,14 @@ impl Executable { // sections and and copy the ro ones at their intended offsets. if config.optimize_rodata { - // The rodata region starts at MM_PROGRAM_START + offset, - // [MM_PROGRAM_START, MM_PROGRAM_START + offset) is not + // The rodata region starts at MM_RODATA_START + offset, + // [MM_RODATA_START, MM_RODATA_START + offset) is not // mappable. We only need to allocate highest_addr - lowest_addr // bytes. highest_addr = highest_addr.saturating_sub(lowest_addr); } else { - // For backwards compatibility, the whole [MM_PROGRAM_START, - // MM_PROGRAM_START + highest_addr) range is mappable. We need + // For backwards compatibility, the whole [MM_RODATA_START, + // MM_RODATA_START + highest_addr) range is mappable. We need // to allocate the whole address range. lowest_addr = 0; }; @@ -758,10 +762,10 @@ impl Executable { .copy_from_slice(slice); } - let addr_offset = if lowest_addr >= ebpf::MM_PROGRAM_START as usize { + let addr_offset = if lowest_addr >= ebpf::MM_RODATA_START as usize { lowest_addr } else { - lowest_addr.saturating_add(ebpf::MM_PROGRAM_START as usize) + lowest_addr.saturating_add(ebpf::MM_RODATA_START as usize) }; Section::Owned(addr_offset, ro_section) }; @@ -880,11 +884,11 @@ impl Executable { let mut addr = symbol.st_value.saturating_add(refd_addr); // The "physical address" from the VM's perspective is rooted - // at `MM_PROGRAM_START`. If the linker hasn't already put - // the symbol within `MM_PROGRAM_START`, we need to do so + // at `MM_RODATA_START`. If the linker hasn't already put + // the symbol within `MM_RODATA_START`, we need to do so // now. - if addr < ebpf::MM_PROGRAM_START { - addr = ebpf::MM_PROGRAM_START.saturating_add(addr); + if addr < ebpf::MM_RODATA_START { + addr = ebpf::MM_RODATA_START.saturating_add(addr); } if text_section @@ -970,10 +974,10 @@ impl Executable { return Err(ElfError::InvalidVirtualAddress(refd_addr)); } - if refd_addr < ebpf::MM_PROGRAM_START { + if refd_addr < ebpf::MM_RODATA_START { // The linker hasn't already placed rodata within - // MM_PROGRAM_START, so we do so now - refd_addr = ebpf::MM_PROGRAM_START.saturating_add(refd_addr); + // MM_RODATA_START, so we do so now + refd_addr = ebpf::MM_RODATA_START.saturating_add(refd_addr); } // Write back the low half @@ -1005,9 +1009,9 @@ impl Executable { .get(r_offset..r_offset.saturating_add(mem::size_of::())) .ok_or(ElfError::ValueOutOfBounds)?; let mut refd_addr = LittleEndian::read_u64(addr_slice); - if refd_addr < ebpf::MM_PROGRAM_START { - // Not within MM_PROGRAM_START, do it now - refd_addr = ebpf::MM_PROGRAM_START.saturating_add(refd_addr); + if refd_addr < ebpf::MM_RODATA_START { + // Not within MM_RODATA_START, do it now + refd_addr = ebpf::MM_RODATA_START.saturating_add(refd_addr); } refd_addr } else { @@ -1020,7 +1024,7 @@ impl Executable { .get(imm_offset..imm_offset.saturating_add(BYTE_LENGTH_IMMEDIATE)) .ok_or(ElfError::ValueOutOfBounds)?; let refd_addr = LittleEndian::read_u32(addr_slice) as u64; - ebpf::MM_PROGRAM_START.saturating_add(refd_addr) + ebpf::MM_RODATA_START.saturating_add(refd_addr) }; let addr_slice = elf_bytes @@ -1135,8 +1139,8 @@ pub(crate) fn get_ro_region(ro_section: &Section, elf: &[u8]) -> MemoryRegion { Section::Borrowed(offset, byte_range) => (*offset, &elf[byte_range.clone()]), }; - // If offset > 0, the region will start at MM_PROGRAM_START + the offset of - // the first read only byte. [MM_PROGRAM_START, MM_PROGRAM_START + offset) + // If offset > 0, the region will start at MM_RODATA_START + the offset of + // the first read only byte. [MM_RODATA_START, MM_RODATA_START + offset) // will be unmappable, see MemoryRegion::vm_to_host. MemoryRegion::new_readonly(ro_data, offset as u64) } @@ -1381,7 +1385,7 @@ mod test { Elf64Shdr { sh_addr, sh_offset: sh_addr - .checked_sub(ebpf::MM_PROGRAM_START) + .checked_sub(ebpf::MM_RODATA_START) .unwrap_or(sh_addr), sh_size, sh_name: 0, @@ -1416,7 +1420,7 @@ mod test { sections, &elf_bytes, ), - Ok(Section::Owned(offset, data)) if offset == ebpf::MM_PROGRAM_START as usize + 10 && data.len() == 30 + Ok(Section::Owned(offset, data)) if offset == ebpf::MM_RODATA_START as usize + 10 && data.len() == 30 )); } @@ -1443,7 +1447,7 @@ mod test { sections, &elf_bytes, ), - Ok(Section::Owned(offset, data)) if offset == ebpf::MM_PROGRAM_START as usize + 10 && data.len() == 20 + Ok(Section::Owned(offset, data)) if offset == ebpf::MM_RODATA_START as usize + 10 && data.len() == 20 )); } @@ -1506,8 +1510,8 @@ mod test { }; let elf_bytes = [0u8; 512]; - let mut s1 = new_section(ebpf::MM_PROGRAM_START + 10, 10); - let mut s2 = new_section(ebpf::MM_PROGRAM_START + 20, 10); + let mut s1 = new_section(ebpf::MM_RODATA_START + 10, 10); + let mut s2 = new_section(ebpf::MM_RODATA_START + 20, 10); // The sections don't have a constant offset. This is rejected since it // makes it impossible to efficiently map virtual addresses to byte // offsets @@ -1530,8 +1534,8 @@ mod test { }; let elf_bytes = [0u8; 512]; - let mut s1 = new_section(ebpf::MM_PROGRAM_START + 10, 10); - let mut s2 = new_section(ebpf::MM_PROGRAM_START + 20, 10); + let mut s1 = new_section(ebpf::MM_RODATA_START + 10, 10); + let mut s2 = new_section(ebpf::MM_RODATA_START + 20, 10); // the sections have a constant offset (100) s1.sh_offset = 100; s2.sh_offset = 110; @@ -1541,7 +1545,7 @@ mod test { assert_eq!( ElfExecutable::parse_ro_sections(&config, &SBPFVersion::V2, sections, &elf_bytes), Ok(Section::Borrowed( - ebpf::MM_PROGRAM_START as usize + 10, + ebpf::MM_RODATA_START as usize + 10, 100..120 )) ); @@ -1573,15 +1577,15 @@ mod test { // [0..s3.sh_addr + s3.sh_size] is the valid ro memory area assert!(matches!( - ro_region.vm_to_host(ebpf::MM_PROGRAM_START, s3.sh_addr + s3.sh_size), + ro_region.vm_to_host(ebpf::MM_RODATA_START, s3.sh_addr + s3.sh_size), ProgramResult::Ok(ptr) if ptr == owned_section.as_ptr() as u64, )); // one byte past the ro section is not mappable assert_error!( - ro_region.vm_to_host(ebpf::MM_PROGRAM_START + s3.sh_addr + s3.sh_size, 1), + ro_region.vm_to_host(ebpf::MM_RODATA_START + s3.sh_addr + s3.sh_size, 1), "InvalidVirtualAddress({})", - ebpf::MM_PROGRAM_START + s3.sh_addr + s3.sh_size + ebpf::MM_RODATA_START + s3.sh_addr + s3.sh_size ); } @@ -1617,15 +1621,15 @@ mod test { // But for backwards compatibility (config.optimize_rodata=false) // [0..s1.sh_addr] is mappable too (and zeroed). assert!(matches!( - ro_region.vm_to_host(ebpf::MM_PROGRAM_START, s3.sh_addr + s3.sh_size), + ro_region.vm_to_host(ebpf::MM_RODATA_START, s3.sh_addr + s3.sh_size), ProgramResult::Ok(ptr) if ptr == owned_section.as_ptr() as u64, )); // one byte past the ro section is not mappable assert_error!( - ro_region.vm_to_host(ebpf::MM_PROGRAM_START + s3.sh_addr + s3.sh_size, 1), + ro_region.vm_to_host(ebpf::MM_RODATA_START + s3.sh_addr + s3.sh_size, 1), "InvalidVirtualAddress({})", - ebpf::MM_PROGRAM_START + s3.sh_addr + s3.sh_size + ebpf::MM_RODATA_START + s3.sh_addr + s3.sh_size ); } @@ -1653,26 +1657,26 @@ mod test { }; let ro_region = get_ro_region(&ro_section, &elf_bytes); - // s1 starts at sh_addr=10 so [MM_PROGRAM_START..MM_PROGRAM_START + 10] is not mappable + // s1 starts at sh_addr=10 so [MM_RODATA_START..MM_RODATA_START + 10] is not mappable // the low bound of the initial gap is not mappable assert_error!( - ro_region.vm_to_host(ebpf::MM_PROGRAM_START, 1), + ro_region.vm_to_host(ebpf::MM_RODATA_START, 1), "InvalidVirtualAddress({})", - ebpf::MM_PROGRAM_START + ebpf::MM_RODATA_START ); // the hi bound of the initial gap is not mappable assert_error!( - ro_region.vm_to_host(ebpf::MM_PROGRAM_START + s1.sh_addr - 1, 1), + ro_region.vm_to_host(ebpf::MM_RODATA_START + s1.sh_addr - 1, 1), "InvalidVirtualAddress({})", - ebpf::MM_PROGRAM_START + 9 + ebpf::MM_RODATA_START + 9 ); // [s1.sh_addr..s3.sh_addr + s3.sh_size] is the valid ro memory area assert!(matches!( ro_region.vm_to_host( - ebpf::MM_PROGRAM_START + s1.sh_addr, + ebpf::MM_RODATA_START + s1.sh_addr, s3.sh_addr + s3.sh_size - s1.sh_addr ), ProgramResult::Ok(ptr) if ptr == owned_section.as_ptr() as u64, @@ -1680,9 +1684,9 @@ mod test { // one byte past the ro section is not mappable assert_error!( - ro_region.vm_to_host(ebpf::MM_PROGRAM_START + s3.sh_addr + s3.sh_size, 1), + ro_region.vm_to_host(ebpf::MM_RODATA_START + s3.sh_addr + s3.sh_size, 1), "InvalidVirtualAddress({})", - ebpf::MM_PROGRAM_START + s3.sh_addr + s3.sh_size + ebpf::MM_RODATA_START + s3.sh_addr + s3.sh_size ); } @@ -1708,7 +1712,7 @@ mod test { sections, &elf_bytes, ), - Ok(Section::Owned(offset, data)) if offset == ebpf::MM_PROGRAM_START as usize && data.len() == 20 + Ok(Section::Owned(offset, data)) if offset == ebpf::MM_RODATA_START as usize && data.len() == 20 )); } @@ -1718,7 +1722,7 @@ mod test { let elf_bytes = [0u8; 512]; for (vaddr_base, sbpf_version) in [ (0, SBPFVersion::V1), - (ebpf::MM_PROGRAM_START, SBPFVersion::V2), + (ebpf::MM_RODATA_START, SBPFVersion::V2), ] { let s1 = new_section(vaddr_base, 10); let s2 = new_section(vaddr_base + 20, 10); @@ -1733,7 +1737,7 @@ mod test { assert_eq!( ElfExecutable::parse_ro_sections(&config, &sbpf_version, sections, &elf_bytes), Ok(Section::Borrowed( - ebpf::MM_PROGRAM_START as usize + 20, + ebpf::MM_RODATA_START as usize + 20, 20..50 )) ); @@ -1746,7 +1750,7 @@ mod test { let elf_bytes = [0u8; 512]; for (vaddr_base, sbpf_version) in [ (0, SBPFVersion::V1), - (ebpf::MM_PROGRAM_START, SBPFVersion::V2), + (ebpf::MM_RODATA_START, SBPFVersion::V2), ] { let s1 = new_section(vaddr_base, 10); let s2 = new_section(vaddr_base + 10, 10); @@ -1764,15 +1768,15 @@ mod test { // s1 starts at sh_offset=0 so [0..s2.sh_offset + s2.sh_size] // is the valid ro memory area assert!(matches!( - ro_region.vm_to_host(ebpf::MM_PROGRAM_START + s1.sh_offset, s2.sh_offset + s2.sh_size), + ro_region.vm_to_host(ebpf::MM_RODATA_START + s1.sh_offset, s2.sh_offset + s2.sh_size), ProgramResult::Ok(ptr) if ptr == elf_bytes.as_ptr() as u64, )); // one byte past the ro section is not mappable assert_error!( - ro_region.vm_to_host(ebpf::MM_PROGRAM_START + s3.sh_offset, 1), + ro_region.vm_to_host(ebpf::MM_RODATA_START + s3.sh_offset, 1), "InvalidVirtualAddress({})", - ebpf::MM_PROGRAM_START + s3.sh_offset + ebpf::MM_RODATA_START + s3.sh_offset ); } } @@ -1783,7 +1787,7 @@ mod test { let elf_bytes = [0u8; 512]; for (vaddr_base, sbpf_version) in [ (0, SBPFVersion::V1), - (ebpf::MM_PROGRAM_START, SBPFVersion::V2), + (ebpf::MM_RODATA_START, SBPFVersion::V2), ] { let s1 = new_section(vaddr_base, 10); let s2 = new_section(vaddr_base + 10, 10); @@ -1802,22 +1806,22 @@ mod test { // the low bound of the initial gap is not mappable assert_error!( - ro_region.vm_to_host(ebpf::MM_PROGRAM_START + s1.sh_offset, 1), + ro_region.vm_to_host(ebpf::MM_RODATA_START + s1.sh_offset, 1), "InvalidVirtualAddress({})", - ebpf::MM_PROGRAM_START + s1.sh_offset + ebpf::MM_RODATA_START + s1.sh_offset ); // the hi bound of the initial gap is not mappable assert_error!( - ro_region.vm_to_host(ebpf::MM_PROGRAM_START + s2.sh_offset - 1, 1), + ro_region.vm_to_host(ebpf::MM_RODATA_START + s2.sh_offset - 1, 1), "InvalidVirtualAddress({})", - ebpf::MM_PROGRAM_START + s2.sh_offset - 1 + ebpf::MM_RODATA_START + s2.sh_offset - 1 ); // [s2.sh_offset..s3.sh_offset + s3.sh_size] is the valid ro memory area assert!(matches!( ro_region.vm_to_host( - ebpf::MM_PROGRAM_START + s2.sh_offset, + ebpf::MM_RODATA_START + s2.sh_offset, s3.sh_offset + s3.sh_size - s2.sh_offset ), ProgramResult::Ok(ptr) if ptr == elf_bytes[s2.sh_offset as usize..].as_ptr() as u64, @@ -1825,9 +1829,9 @@ mod test { // one byte past the ro section is not mappable assert_error!( - ro_region.vm_to_host(ebpf::MM_PROGRAM_START + s3.sh_offset + s3.sh_size, 1), + ro_region.vm_to_host(ebpf::MM_RODATA_START + s3.sh_offset + s3.sh_size, 1), "InvalidVirtualAddress({})", - ebpf::MM_PROGRAM_START + s3.sh_offset + s3.sh_size + ebpf::MM_RODATA_START + s3.sh_offset + s3.sh_size ); } } diff --git a/src/memory_region.rs b/src/memory_region.rs index 6b03216b..e555bf65 100644 --- a/src/memory_region.rs +++ b/src/memory_region.rs @@ -888,8 +888,8 @@ fn generate_access_violation( stack_frame, )) } else { - let region_name = match vm_addr & (!ebpf::MM_PROGRAM_START.saturating_sub(1)) { - ebpf::MM_PROGRAM_START => "program", + let region_name = match vm_addr & (!ebpf::MM_RODATA_START.saturating_sub(1)) { + ebpf::MM_RODATA_START => "program", ebpf::MM_STACK_START => "stack", ebpf::MM_HEAP_START => "heap", ebpf::MM_INPUT_START => "input", @@ -1049,7 +1049,7 @@ mod test { let mut mem1 = vec![0xff; 8]; let m = MemoryMapping::new( vec![ - MemoryRegion::new_readonly(&[0; 8], ebpf::MM_PROGRAM_START), + MemoryRegion::new_readonly(&[0; 8], ebpf::MM_RODATA_START), MemoryRegion::new_writable_gapped(&mut mem1, ebpf::MM_STACK_START, 2), ], &config, @@ -1247,7 +1247,7 @@ mod test { let mem2 = vec![0xDD; 4]; let m = MemoryMapping::new( vec![ - MemoryRegion::new_writable(&mut mem1, ebpf::MM_PROGRAM_START), + MemoryRegion::new_writable(&mut mem1, ebpf::MM_RODATA_START), MemoryRegion::new_readonly(&mem2, ebpf::MM_STACK_START), ], &config, @@ -1255,25 +1255,25 @@ mod test { ) .unwrap(); assert_error!( - m.region(AccessType::Load, ebpf::MM_PROGRAM_START - 1), + m.region(AccessType::Load, ebpf::MM_RODATA_START - 1), "AccessViolation" ); assert_eq!( - m.region(AccessType::Load, ebpf::MM_PROGRAM_START) + m.region(AccessType::Load, ebpf::MM_RODATA_START) .unwrap() .host_addr .get(), mem1.as_ptr() as u64 ); assert_eq!( - m.region(AccessType::Load, ebpf::MM_PROGRAM_START + 3) + m.region(AccessType::Load, ebpf::MM_RODATA_START + 3) .unwrap() .host_addr .get(), mem1.as_ptr() as u64 ); assert_error!( - m.region(AccessType::Load, ebpf::MM_PROGRAM_START + 4), + m.region(AccessType::Load, ebpf::MM_RODATA_START + 4), "AccessViolation" ); @@ -1627,7 +1627,7 @@ mod test { let mem3 = [33, 33]; let mut m = AlignedMemoryMapping::new( vec![ - MemoryRegion::new_readonly(&mem1, ebpf::MM_PROGRAM_START), + MemoryRegion::new_readonly(&mem1, ebpf::MM_RODATA_START), MemoryRegion::new_readonly(&mem2, ebpf::MM_STACK_START), ], &config, @@ -1682,7 +1682,7 @@ mod test { let c = Rc::clone(&copied); let m = MemoryMapping::new_with_cow( - vec![MemoryRegion::new_cow(&original, ebpf::MM_PROGRAM_START, 42)], + vec![MemoryRegion::new_cow(&original, ebpf::MM_RODATA_START, 42)], Box::new(move |_| { c.borrow_mut().extend_from_slice(&original); Ok(c.borrow().as_slice().as_ptr() as u64) @@ -1693,11 +1693,11 @@ mod test { .unwrap(); assert_eq!( - m.map(AccessType::Load, ebpf::MM_PROGRAM_START, 1).unwrap(), + m.map(AccessType::Load, ebpf::MM_RODATA_START, 1).unwrap(), original.as_ptr() as u64 ); assert_eq!( - m.map(AccessType::Store, ebpf::MM_PROGRAM_START, 1).unwrap(), + m.map(AccessType::Store, ebpf::MM_RODATA_START, 1).unwrap(), copied.borrow().as_ptr() as u64 ); } @@ -1715,7 +1715,7 @@ mod test { let c = Rc::clone(&copied); let m = MemoryMapping::new_with_cow( - vec![MemoryRegion::new_cow(&original, ebpf::MM_PROGRAM_START, 42)], + vec![MemoryRegion::new_cow(&original, ebpf::MM_RODATA_START, 42)], Box::new(move |_| { c.borrow_mut().extend_from_slice(&original); Ok(c.borrow().as_slice().as_ptr() as u64) @@ -1726,18 +1726,18 @@ mod test { .unwrap(); assert_eq!( - m.map(AccessType::Load, ebpf::MM_PROGRAM_START, 1).unwrap(), + m.map(AccessType::Load, ebpf::MM_RODATA_START, 1).unwrap(), original.as_ptr() as u64 ); - assert_eq!(m.load::(ebpf::MM_PROGRAM_START).unwrap(), 11); - assert_eq!(m.load::(ebpf::MM_PROGRAM_START + 1).unwrap(), 22); + assert_eq!(m.load::(ebpf::MM_RODATA_START).unwrap(), 11); + assert_eq!(m.load::(ebpf::MM_RODATA_START + 1).unwrap(), 22); assert!(copied.borrow().is_empty()); - m.store(33u8, ebpf::MM_PROGRAM_START).unwrap(); + m.store(33u8, ebpf::MM_RODATA_START).unwrap(); assert_eq!(original[0], 11); - assert_eq!(m.load::(ebpf::MM_PROGRAM_START).unwrap(), 33); - assert_eq!(m.load::(ebpf::MM_PROGRAM_START + 1).unwrap(), 22); + assert_eq!(m.load::(ebpf::MM_RODATA_START).unwrap(), 33); + assert_eq!(m.load::(ebpf::MM_RODATA_START + 1).unwrap(), 22); } } @@ -1755,8 +1755,8 @@ mod test { let c = Rc::clone(&copied); let m = MemoryMapping::new_with_cow( vec![ - MemoryRegion::new_cow(&original1, ebpf::MM_PROGRAM_START, 42), - MemoryRegion::new_cow(&original2, ebpf::MM_PROGRAM_START + 0x100000000, 24), + MemoryRegion::new_cow(&original1, ebpf::MM_RODATA_START, 42), + MemoryRegion::new_cow(&original2, ebpf::MM_RODATA_START + 0x100000000, 24), ], Box::new(move |id| { // check that the argument passed to MemoryRegion::new_cow is then passed to the @@ -1770,9 +1770,9 @@ mod test { ) .unwrap(); - m.store(55u8, ebpf::MM_PROGRAM_START).unwrap(); + m.store(55u8, ebpf::MM_RODATA_START).unwrap(); assert_eq!(original1[0], 11); - assert_eq!(m.load::(ebpf::MM_PROGRAM_START).unwrap(), 55); + assert_eq!(m.load::(ebpf::MM_RODATA_START).unwrap(), 55); } } @@ -1783,14 +1783,14 @@ mod test { let original = [11, 22]; let m = MemoryMapping::new_with_cow( - vec![MemoryRegion::new_cow(&original, ebpf::MM_PROGRAM_START, 42)], + vec![MemoryRegion::new_cow(&original, ebpf::MM_RODATA_START, 42)], Box::new(|_| Err(())), &config, &SBPFVersion::V2, ) .unwrap(); - m.map(AccessType::Store, ebpf::MM_PROGRAM_START, 1).unwrap(); + m.map(AccessType::Store, ebpf::MM_RODATA_START, 1).unwrap(); } #[test] @@ -1800,13 +1800,13 @@ mod test { let original = [11, 22]; let m = MemoryMapping::new_with_cow( - vec![MemoryRegion::new_cow(&original, ebpf::MM_PROGRAM_START, 42)], + vec![MemoryRegion::new_cow(&original, ebpf::MM_RODATA_START, 42)], Box::new(|_| Err(())), &config, &SBPFVersion::V2, ) .unwrap(); - m.store(33u8, ebpf::MM_PROGRAM_START).unwrap(); + m.store(33u8, ebpf::MM_RODATA_START).unwrap(); } } diff --git a/src/program.rs b/src/program.rs index 1f817c02..3c844317 100644 --- a/src/program.rs +++ b/src/program.rs @@ -61,12 +61,16 @@ impl SBPFVersion { self != &SBPFVersion::V1 } - /// Allow sh_addr != sh_offset in elf sections. Used in V2 to align - /// section vaddrs to MM_PROGRAM_START. + /// Allow sh_addr != sh_offset in elf sections. pub fn enable_elf_vaddr(&self) -> bool { self != &SBPFVersion::V1 } + /// Separates the bytecode from the read only data in virtual address space + pub fn enable_lower_bytecode_vaddr(&self) -> bool { + self != &SBPFVersion::V1 + } + /// Use dynamic stack frame sizes pub fn dynamic_stack_frames(&self) -> bool { self != &SBPFVersion::V1 diff --git a/tests/execution.rs b/tests/execution.rs index cf63966b..760078d7 100644 --- a/tests/execution.rs +++ b/tests/execution.rs @@ -2389,9 +2389,7 @@ fn test_callx() { test_interpreter_and_jit_asm!( " mov64 r0, 0x0 - mov64 r8, 0x1 - lsh64 r8, 0x20 - or64 r8, 0x30 + or64 r8, 0x20 callx r8 exit function_foo: @@ -2399,7 +2397,7 @@ fn test_callx() { exit", [], (), - TestContextObject::new(8), + TestContextObject::new(6), ProgramResult::Ok(42), ); } @@ -2409,27 +2407,30 @@ fn test_err_callx_unregistered() { test_interpreter_and_jit_asm!( " mov64 r0, 0x0 - mov64 r8, 0x1 - lsh64 r8, 0x20 - or64 r8, 0x30 + or64 r8, 0x20 callx r8 exit mov64 r0, 0x2A exit", [], (), - TestContextObject::new(6), + TestContextObject::new(4), ProgramResult::Err(EbpfError::UnsupportedInstruction), ); } #[test] fn test_err_callx_oob_low() { + let config = Config { + enabled_sbpf_versions: SBPFVersion::V1..=SBPFVersion::V1, + ..Config::default() + }; test_interpreter_and_jit_asm!( " mov64 r0, 0x3 callx r0 exit", + config, [], (), TestContextObject::new(2), @@ -2518,14 +2519,12 @@ fn test_err_reg_stack_depth() { }; test_interpreter_and_jit_asm!( " - mov64 r0, 0x1 - lsh64 r0, 0x20 callx r0 exit", config, [], (), - TestContextObject::new(max_call_depth as u64 * 3), + TestContextObject::new(max_call_depth as u64), ProgramResult::Err(EbpfError::CallDepthExceeded), ); } @@ -2765,9 +2764,7 @@ fn test_tight_infinite_recursion() { fn test_tight_infinite_recursion_callx() { test_interpreter_and_jit_asm!( " - mov64 r8, 0x1 - lsh64 r8, 0x20 - or64 r8, 0x28 + or64 r8, 0x18 call function_foo exit function_foo: @@ -2776,7 +2773,7 @@ fn test_tight_infinite_recursion_callx() { exit", [], (), - TestContextObject::new(8), + TestContextObject::new(6), ProgramResult::Err(EbpfError::ExceededMaxInstructions), ); } @@ -2815,27 +2812,6 @@ fn test_err_instruction_count_syscall_capped() { ); } -#[test] -fn test_non_terminate_early() { - test_interpreter_and_jit_asm!( - " - mov64 r6, 0x0 - mov64 r1, 0x0 - mov64 r2, 0x0 - mov64 r3, 0x0 - mov64 r4, 0x0 - mov64 r5, r6 - callx r6 - add64 r6, 0x1 - ja -0x8 - exit", - [], - (), - TestContextObject::new(7), - ProgramResult::Err(EbpfError::CallOutsideTextSegment), - ); -} - #[test] fn test_err_non_terminate_capped() { test_interpreter_and_jit_asm!( @@ -2896,7 +2872,7 @@ fn test_err_capped_before_exception() { test_interpreter_and_jit_asm!( " mov64 r1, 0x0 - hor64 r2, 0x1 + mov64 r2, 0x0 callx r2 mov64 r0, 0x0 exit", @@ -2911,25 +2887,21 @@ fn test_err_capped_before_exception() { fn test_err_exit_capped() { test_interpreter_and_jit_asm!( " - mov64 r1, 0x1 - lsh64 r1, 0x20 - or64 r1, 0x28 - callx r1 + or64 r0, 0x18 + callx r0 exit function_foo: exit ", [], (), - TestContextObject::new(5), + TestContextObject::new(3), ProgramResult::Err(EbpfError::ExceededMaxInstructions), ); test_interpreter_and_jit_asm!( " - mov64 r1, 0x1 - lsh64 r1, 0x20 - or64 r1, 0x28 - callx r1 + or64 r0, 0x18 + callx r0 exit function_foo: mov r0, r0 @@ -2937,7 +2909,7 @@ fn test_err_exit_capped() { ", [], (), - TestContextObject::new(6), + TestContextObject::new(4), ProgramResult::Err(EbpfError::ExceededMaxInstructions), ); test_interpreter_and_jit_asm!( @@ -2966,13 +2938,12 @@ fn test_far_jumps() { .fill 1024, 0x0F exit function_c: - mov32 r1, 0x00000010 - hor64 r1, 0x00000001 + mov32 r1, 0x10 callx r1 exit", [], (), - TestContextObject::new(7), + TestContextObject::new(6), ProgramResult::Ok(0), ); } @@ -3050,7 +3021,7 @@ fn test_reloc_64_64_sbpfv1() { [], (), TestContextObject::new(2), - ProgramResult::Ok(ebpf::MM_PROGRAM_START + 0x120), + ProgramResult::Ok(ebpf::MM_RODATA_START + 0x120), ); } @@ -3070,7 +3041,7 @@ fn test_reloc_64_relative_sbpfv1() { [], (), TestContextObject::new(2), - ProgramResult::Ok(ebpf::MM_PROGRAM_START + 0x138), + ProgramResult::Ok(ebpf::MM_RODATA_START + 0x138), ); } @@ -3093,7 +3064,7 @@ fn test_reloc_64_relative_data_sbfv1() { [], (), TestContextObject::new(3), - ProgramResult::Ok(ebpf::MM_PROGRAM_START + 0x108), + ProgramResult::Ok(ebpf::MM_RODATA_START + 0x108), ); } @@ -3122,7 +3093,7 @@ fn test_reloc_64_relative_data_sbpfv1() { [], (), TestContextObject::new(3), - ProgramResult::Ok(ebpf::MM_PROGRAM_START + 0x108), + ProgramResult::Ok(ebpf::MM_RODATA_START + 0x108), ); }