diff --git a/benches/elf_loader.rs b/benches/elf_loader.rs index 323be403..656c794a 100644 --- a/benches/elf_loader.rs +++ b/benches/elf_loader.rs @@ -31,8 +31,8 @@ fn loader() -> Arc> { } #[bench] -fn bench_load_sbpfv1(bencher: &mut Bencher) { - let mut file = File::open("tests/elfs/syscall_reloc_64_32_sbpfv1.so").unwrap(); +fn bench_load_sbpfv0(bencher: &mut Bencher) { + let mut file = File::open("tests/elfs/syscall_reloc_64_32_sbpfv0.so").unwrap(); let mut elf = Vec::new(); file.read_to_end(&mut elf).unwrap(); let loader = loader(); diff --git a/benches/jit_compile.rs b/benches/jit_compile.rs index 48b22484..cbc7a072 100644 --- a/benches/jit_compile.rs +++ b/benches/jit_compile.rs @@ -18,7 +18,7 @@ use test_utils::create_vm; #[bench] fn bench_init_vm(bencher: &mut Bencher) { - let mut file = File::open("tests/elfs/relative_call_sbpfv1.so").unwrap(); + let mut file = File::open("tests/elfs/relative_call_sbpfv0.so").unwrap(); let mut elf = Vec::new(); file.read_to_end(&mut elf).unwrap(); let executable = @@ -42,7 +42,7 @@ fn bench_init_vm(bencher: &mut Bencher) { #[cfg(all(feature = "jit", not(target_os = "windows"), target_arch = "x86_64"))] #[bench] fn bench_jit_compile(bencher: &mut Bencher) { - let mut file = File::open("tests/elfs/relative_call_sbpfv1.so").unwrap(); + let mut file = File::open("tests/elfs/relative_call_sbpfv0.so").unwrap(); let mut elf = Vec::new(); file.read_to_end(&mut elf).unwrap(); let mut executable = diff --git a/benches/memory_mapping.rs b/benches/memory_mapping.rs index 1d78b708..91e77dee 100644 --- a/benches/memory_mapping.rs +++ b/benches/memory_mapping.rs @@ -73,7 +73,7 @@ macro_rules! bench_gapped_randomized_access_with_1024_entries { )]; let config = Config::default(); let memory_mapping = - $mem::new(memory_regions, &config, SBPFVersion::V2).unwrap(); + $mem::new(memory_regions, &config, SBPFVersion::V3).unwrap(); let mut prng = new_prng!(); bencher.iter(|| { assert!(memory_mapping @@ -111,7 +111,7 @@ macro_rules! bench_randomized_access_with_0001_entry { let content = vec![0; 1024 * 2]; let memory_regions = vec![MemoryRegion::new_readonly(&content[..], 0x100000000)]; let config = Config::default(); - let memory_mapping = $mem::new(memory_regions, &config, SBPFVersion::V2).unwrap(); + let memory_mapping = $mem::new(memory_regions, &config, SBPFVersion::V3).unwrap(); let mut prng = new_prng!(); bencher.iter(|| { let _ = memory_mapping.map( @@ -145,7 +145,7 @@ macro_rules! bench_randomized_access_with_n_entries { let (memory_regions, end_address) = generate_memory_regions($n, MemoryState::Readable, Some(&mut prng)); let config = Config::default(); - let memory_mapping = $mem::new(memory_regions, &config, SBPFVersion::V2).unwrap(); + let memory_mapping = $mem::new(memory_regions, &config, SBPFVersion::V3).unwrap(); bencher.iter(|| { let _ = memory_mapping.map( AccessType::Load, @@ -194,7 +194,7 @@ macro_rules! bench_randomized_mapping_with_n_entries { let (memory_regions, _end_address) = generate_memory_regions($n, MemoryState::Readable, Some(&mut prng)); let config = Config::default(); - let memory_mapping = $mem::new(memory_regions, &config, SBPFVersion::V2).unwrap(); + let memory_mapping = $mem::new(memory_regions, &config, SBPFVersion::V3).unwrap(); bencher.iter(|| { let _ = memory_mapping.map(AccessType::Load, 0x100000000, 1); }); @@ -243,7 +243,7 @@ macro_rules! bench_mapping_with_n_entries { let (memory_regions, _end_address) = generate_memory_regions($n, MemoryState::Readable, None); let config = Config::default(); - let memory_mapping = $mem::new(memory_regions, &config, SBPFVersion::V2).unwrap(); + let memory_mapping = $mem::new(memory_regions, &config, SBPFVersion::V3).unwrap(); bencher.iter(|| { let _ = memory_mapping.map(AccessType::Load, 0x100000000, 1); }); @@ -301,7 +301,7 @@ fn do_bench_mapping_operation(bencher: &mut Bencher, op: MemoryOperation, vm_add MemoryRegion::new_writable(&mut mem2, 0x100000000 + 8), ], &config, - SBPFVersion::V2, + SBPFVersion::V3, ) .unwrap(); diff --git a/benches/vm_execution.rs b/benches/vm_execution.rs index 4404c927..aecac29f 100644 --- a/benches/vm_execution.rs +++ b/benches/vm_execution.rs @@ -25,7 +25,7 @@ use test_utils::create_vm; #[bench] fn bench_init_interpreter_start(bencher: &mut Bencher) { - let mut file = File::open("tests/elfs/rodata_section_sbpfv1.so").unwrap(); + let mut file = File::open("tests/elfs/rodata_section_sbpfv0.so").unwrap(); let mut elf = Vec::new(); file.read_to_end(&mut elf).unwrap(); let executable = @@ -51,7 +51,7 @@ fn bench_init_interpreter_start(bencher: &mut Bencher) { #[cfg(all(feature = "jit", not(target_os = "windows"), target_arch = "x86_64"))] #[bench] fn bench_init_jit_start(bencher: &mut Bencher) { - let mut file = File::open("tests/elfs/rodata_section_sbpfv1.so").unwrap(); + let mut file = File::open("tests/elfs/rodata_section_sbpfv0.so").unwrap(); let mut elf = Vec::new(); file.read_to_end(&mut elf).unwrap(); let mut executable = @@ -171,7 +171,7 @@ fn bench_jit_vs_interpreter_address_translation_stack_fixed(bencher: &mut Benche bencher, ADDRESS_TRANSLATION_STACK_CODE, Config { - enabled_sbpf_versions: SBPFVersion::V1..=SBPFVersion::V1, + enabled_sbpf_versions: SBPFVersion::V0..=SBPFVersion::V0, ..Config::default() }, 524289, @@ -185,10 +185,7 @@ fn bench_jit_vs_interpreter_address_translation_stack_dynamic(bencher: &mut Benc bench_jit_vs_interpreter( bencher, ADDRESS_TRANSLATION_STACK_CODE, - Config { - enabled_sbpf_versions: SBPFVersion::V1..=SBPFVersion::V2, - ..Config::default() - }, + Config::default(), 524289, &mut [], ); @@ -233,7 +230,7 @@ fn bench_jit_vs_interpreter_call_depth_fixed(bencher: &mut Bencher) { call function_foo exit", Config { - enabled_sbpf_versions: SBPFVersion::V1..=SBPFVersion::V1, + enabled_sbpf_versions: SBPFVersion::V0..=SBPFVersion::V0, ..Config::default() }, 137218, @@ -263,10 +260,7 @@ fn bench_jit_vs_interpreter_call_depth_dynamic(bencher: &mut Bencher) { call function_foo add r11, 4 exit", - Config { - enabled_sbpf_versions: SBPFVersion::V1..=SBPFVersion::V2, - ..Config::default() - }, + Config::default(), 176130, &mut [], ); diff --git a/doc/bytecode.md b/doc/bytecode.md index 8ef14afb..1fd8c679 100644 --- a/doc/bytecode.md +++ b/doc/bytecode.md @@ -19,7 +19,7 @@ All of them are 64 bit wide. | `r8` | all | GPR | Call-preserved | `r9` | all | GPR | Call-preserved | `r10` | all | Frame pointer | System register -| `r11` | from v2 | Stack pointer | System register +| `r11` | from v1 | Stack pointer | System register | `pc` | all | Program counter | Hidden register @@ -78,7 +78,7 @@ The following Rust equivalents assume that: - `imm` is `u32` - `off` is `u16` -### 32 bit Arithmetic and Logic +### Memory Load or 32 bit Arithmetic and Logic | opcode (hex / bin) | feature set | assembler mnemonic | Rust equivalent | ------------------ | ----------- | ---------------------- | --------------- | `04` / `00000100` | until v2 | `add32 dst, imm` | `dst = (dst as u32).wrapping_add(imm) as i32 as i64 as u64` @@ -90,9 +90,13 @@ The following Rust equivalents assume that: | `1C` / `00011100` | until v2 | `sub32 dst, src` | `dst = (dst as u32).wrapping_sub(src as u32) as i32 as i64 as u64` | `1C` / `00011100` | from v2 | `sub32 dst, src` | `dst = (dst as u32).wrapping_sub(src as u32) as u64` | `24` / `00100100` | until v2 | `mul32 dst, imm` | `dst = (dst as i32).wrapping_mul(imm as i32) as i64 as u64` +| `24` / `00100100` | from v2 | -- reserved -- | `2C` / `00101100` | until v2 | `mul32 dst, src` | `dst = (dst as i32).wrapping_mul(src as i32) as i64 as u64` +| `2C` / `00101100` | from v2 | `ldxb dst, [src + off]` | `34` / `00110100` | until v2 | `div32 dst, imm` | `dst = ((dst as u32) / imm) as u64` +| `34` / `00110100` | from v2 | -- reserved -- | `3C` / `00111100` | until v2 | `div32 dst, src` | `dst = ((dst as u32) / (src as u32)) as u64` +| `3C` / `00111100` | from v2 | `ldxh dst, [src + off]` | `44` / `01000100` | all | `or32 dst, imm` | `dst = (dst as u32).or(imm) as u64` | `4C` / `01001100` | all | `or32 dst, src` | `dst = (dst as u32).or(src as u32) as u64` | `54` / `01010100` | all | `and32 dst, imm` | `dst = (dst as u32).and(imm) as u64` @@ -102,9 +106,13 @@ The following Rust equivalents assume that: | `74` / `01110100` | all | `rsh32 dst, imm` | `dst = (dst as u32).wrapping_shr(imm) as u64` | `7C` / `01111100` | all | `rsh32 dst, src` | `dst = (dst as u32).wrapping_shr(src as u32) as u64` | `84` / `10000100` | until v2 | `neg32 dst` | `dst = (dst as i32).wrapping_neg() as u32 as u64` -| `8C` / `10001100` | | -- reserved -- +| `84` / `10000100` | from v2 | -- reserved -- +| `8C` / `10001100` | until v2 | -- reserved -- +| `8C` / `01100001` | from v2 | `ldxw dst, [src + off]` | `94` / `10010100` | until v2 | `mod32 dst, imm` | `dst = ((dst as u32) % imm) as u64` +| `94` / `10010100` | from v2 | -- reserved -- | `9C` / `10011100` | until v2 | `mod32 dst, src` | `dst = ((dst as u32) % (src as u32)) as u64` +| `9C` / `01111001` | from v2 | `ldxdw dst, [src + off]` | `A4` / `10100100` | all | `xor32 dst, imm` | `dst = (dst as u32).xor(imm) as u64` | `AC` / `10101100` | all | `xor32 dst, src` | `dst = (dst as u32).xor(src as u32) as u64` | `B4` / `10110100` | all | `mov32 dst, imm` | `dst = imm as i32 as i64 as u64` @@ -113,10 +121,11 @@ The following Rust equivalents assume that: | `C4` / `11000100` | all | `ash32 dst, imm` | `dst = (dst as i32).wrapping_shr(imm) as u32 as u64` | `CC` / `11001100` | all | `ash32 dst, src` | `dst = (dst as i32).wrapping_shr(src as u32) as u32 as u64` | `D4` / `11010100` | until v2 | `le dst, imm` | `dst = dst as u32 as u64` +| `D4` / `11010100` | from v2 | -- reserved -- | `DC` / `11011100` | all | `be dst, imm` | `dst = match imm { 16 => (dst as u16).swap_bytes() as u64, 32 => (dst as u32).swap_bytes() as u64, 64 => dst.swap_bytes() }` -| `E4` to `FC` | | -- reserved -- +| `E4` to `FC` | all | -- reserved -- -### 64 bit Arithmetic and Logic +### Memory Store or 64 bit Arithmetic and Logic | opcode (hex / bin) | feature set | assembler mnemonic | Rust equivalent | ------------------ | ----------- | ------------------ | --------------- | `07` / `00000111` | all | `add64 dst, imm` | `dst = dst.wrapping_add(imm as i32 as i64 as u64)` @@ -125,9 +134,13 @@ The following Rust equivalents assume that: | `17` / `00010111` | from v2 | `sub64 dst, imm` | `dst = (imm as i32 as i64 as u64).wrapping_sub(dst)` | `1F` / `00011111` | all | `sub64 dst, src` | `dst = dst.wrapping_sub(src)` | `27` / `00100111` | until v2 | `mul64 dst, imm` | `dst = dst.wrapping_mul(imm as u64)` +| `27` / `01110010` | from v2 | `stb [dst + off], imm` | `2F` / `00101111` | until v2 | `mul64 dst, src` | `dst = dst.wrapping_mul(src)` +| `2F` / `01110011` | from v2 | `stxb [dst + off], src` | `37` / `00110111` | until v2 | `div64 dst, imm` | `dst = dst / (imm as u64)` +| `37` / `01101010` | from v2 | `sth [dst + off], imm` | `3F` / `00111111` | until v2 | `div64 dst, src` | `dst = dst / src` +| `3F` / `01101011` | from v2 | `stxh [dst + off], src` | `47` / `01000111` | all | `or64 dst, imm` | `dst = dst.or(imm)` | `4F` / `01001111` | all | `or64 dst, src` | `dst = dst.or(src)` | `57` / `01010111` | all | `and64 dst, imm` | `dst = dst.and(imm)` @@ -137,18 +150,23 @@ The following Rust equivalents assume that: | `77` / `01110111` | all | `rsh64 dst, imm` | `dst = dst.wrapping_shr(imm)` | `7F` / `01111111` | all | `rsh64 dst, src` | `dst = dst.wrapping_shr(src as u32)` | `87` / `10000111` | until v2 | `neg64 dst` | `dst = (dst as i64).wrapping_neg() as u64` -| `8F` / `10001111` | | -- reserved -- +| `87` / `01100010` | from v2 | `stw [dst + off], imm` +| `8F` / `10001111` | until | -- reserved -- +| `8F` / `01100011` | from v2 | `stxw [dst + off], src` | `97` / `10010111` | until v2 | `mod64 dst, imm` | `dst = dst % (imm as u64)` +| `97` / `01111010` | from v2 | `stdw [dst + off], imm` | `9F` / `10011111` | until v2 | `mod64 dst, src` | `dst = dst % src` +| `9F` / `01111011` | from v2 | `stxdw [dst + off], src` | `A7` / `10100111` | all | `xor64 dst, imm` | `dst = dst.xor(imm)` | `AF` / `10101111` | all | `xor64 dst, src` | `dst = dst.xor(src)` | `B7` / `10110111` | all | `mov64 dst, imm` | `dst = imm as u64` | `BF` / `10111111` | all | `mov64 dst, src` | `dst = src` | `C7` / `11000111` | all | `ash64 dst, imm` | `dst = (dst as i64).wrapping_shr(imm)` | `CF` / `11001111` | all | `ash64 dst, src` | `dst = (dst as i64).wrapping_shr(src as u32)` -| `D7` to `EF` | | -- reserved -- +| `D7` to `EF` | all | -- reserved -- +| `F7` / `11110111` | until v2 | -- reserved -- | `F7` / `11110111` | from v2 | `hor64 dst, imm` | `dst = dst.or((imm as u64).wrapping_shl(32))` -| `FF` / `11111111` | | -- reserved -- +| `FF` / `11111111` | all | -- reserved -- ### Product / Quotient / Remainder | bit index | when `0` | when `1` @@ -165,7 +183,7 @@ The following Rust equivalents assume that: | opcode (hex / bin) | feature set | assembler mnemonic | Rust equivalent | ------------------ | ----------- | ------------------ | --------------- -| `06` to `2E` | | -- reserved -- +| `06` to `2E` | all | -- reserved -- | `36` / `00110110` | from v2 | `uhmul64 dst, imm` | `dst = (dst as u128).wrapping_mul(imm as u128).wrapping_shr(64) as u64` | `3E` / `00111110` | from v2 | `uhmul64 dst, src` | `dst = (dst as u128).wrapping_mul(src as u128).wrapping_shr(64) as u64` | `46` / `01000110` | from v2 | `udiv32 dst, imm` | `dst = ((dst as u32) / imm) as u64` @@ -180,7 +198,7 @@ The following Rust equivalents assume that: | `8E` / `10001110` | from v2 | `lmul32 dst, src` | `dst = (dst as i32).wrapping_mul(src as i32) as u32 as u64` | `96` / `10010110` | from v2 | `lmul64 dst, imm` | `dst = dst.wrapping_mul(imm as u64)` | `9E` / `10011110` | from v2 | `lmul64 dst, src` | `dst = dst.wrapping_mul(src)` -| `A6` to `AE` | | -- reserved -- +| `A6` to `AE` | all | -- reserved -- | `B6` / `10110110` | from v2 | `shmul64 dst, imm` | `dst = (dst as i128).wrapping_mul(imm as i32 as i128).wrapping_shr(64) as i64 as u64` | `BE` / `10111110` | from v2 | `shmul64 dst, src` | `dst = (dst as i128).wrapping_mul(src as i64 as i128).wrapping_shr(64) as i64 as u64` | `C6` / `11000110` | from v2 | `sdiv32 dst, imm` | `dst = ((dst as i32) / (imm as i32)) as u32 as u64` @@ -192,7 +210,7 @@ The following Rust equivalents assume that: | `F6` / `11110110` | from v2 | `srem64 dst, imm` | `dst = ((dst as i64) % (imm as i64)) as u64` | `FE` / `11111110` | from v2 | `srem64 dst, src` | `dst = ((dst as i64) % (src as i64)) as u64` -### Memory +### Deprecated Memory Load and Store #### Panics - Out of bounds: When the memory location is not mapped. @@ -201,36 +219,36 @@ The following Rust equivalents assume that: | opcode (hex / bin) | feature set | assembler mnemonic | Rust equivalent | ------------------ | ----------- | ------------------ | --------------- | `00` / `00000000` | until v2 | `lddw dst, imm` | `dst = dst.or((imm as u64).wrapping_shl(32))` -| `08` to `10` | | -- reserved -- +| `08` to `10` | all | -- reserved -- | `18` / `00011000` | until v2 | `lddw dst, imm` | `dst = imm as u64` -| `20` to `F8` | | -- reserved -- +| `20` to `F8` | all | -- reserved -- | opcode (hex / bin) | feature set | assembler mnemonic | ------------------ | ----------- | ------------------ -| `01` to `59` | | -- reserved -- -| `61` / `01100001` | all | `ldxw dst, [src + off]` -| `69` / `01101001` | all | `ldxh dst, [src + off]` -| `71` / `01110001` | all | `ldxb dst, [src + off]` -| `79` / `01111001` | all | `ldxdw dst, [src + off]` -| `81` to `F9` | | -- reserved -- +| `01` to `59` | all | -- reserved -- +| `61` / `01100001` | until v2 | `ldxw dst, [src + off]` +| `69` / `01101001` | until v2 | `ldxh dst, [src + off]` +| `71` / `01110001` | until v2 | `ldxb dst, [src + off]` +| `79` / `01111001` | until v2 | `ldxdw dst, [src + off]` +| `81` to `F9` | all | -- reserved -- | opcode (hex / bin) | feature set | assembler mnemonic | ------------------ | ----------- | ------------------ -| `02` to `5A` | | -- reserved -- -| `62` / `01100010` | all | `stw [dst + off], imm` -| `6A` / `01101010` | all | `sth [dst + off], imm` -| `72` / `01110010` | all | `stb [dst + off], imm` -| `7A` / `01111010` | all | `stdw [dst + off], imm` -| `82` to `FA` | | -- reserved -- +| `02` to `5A` | all | -- reserved -- +| `62` / `01100010` | until v2 | `stw [dst + off], imm` +| `6A` / `01101010` | until v2 | `sth [dst + off], imm` +| `72` / `01110010` | until v2 | `stb [dst + off], imm` +| `7A` / `01111010` | until v2 | `stdw [dst + off], imm` +| `82` to `FA` | all | -- reserved -- | opcode (hex / bin) | feature set | assembler mnemonic | ------------------ | ----------- | ------------------ -| `03` to `5B` | | -- reserved -- -| `63` / `01100011` | all | `stxw [dst + off], src` -| `6B` / `01101011` | all | `stxh [dst + off], src` -| `73` / `01110011` | all | `stxb [dst + off], src` -| `7B` / `01111011` | all | `stxdw [dst + off], src` -| `83` to `FB` | | -- reserved -- +| `03` to `5B` | all | -- reserved -- +| `63` / `01100011` | until v2 | `stxw [dst + off], src` +| `6B` / `01101011` | until v2 | `stxh [dst + off], src` +| `73` / `01110011` | until v2 | `stxb [dst + off], src` +| `7B` / `01111011` | until v2 | `stxdw [dst + off], src` +| `83` to `FB` | all | -- reserved -- ### Control Flow @@ -239,24 +257,24 @@ Except that the target location of `callx` is the src register, thus runtime dyn Call instructions (`call` and `callx` but not `syscall`) do: - Save the registers `r6`, `r7`, `r8`, `r9`, the frame pointer `r10` and the `pc` (pointing at the next instruction) -- If ≤ v1: Add one stack frame size to the frame pointer `r10` -- If ≥ v2: Move the stack pointer `r11` into the frame pointer `r10` +- If < v1: Add one stack frame size to the frame pointer `r10` +- If ≥ v1: Move the stack pointer `r11` into the frame pointer `r10` The `exit` (a.k.a. return) instruction does: - Restore the registers `r6`, `r7`, `r8`, `r9`, the frame pointer `r10` and the `pc` - Or gracefully terminate the program if there is no stack frame to restore #### Panics -- Out of bounds: When the target location is outside the bytecode if ≤ v1. -- Out of bounds: When the target location is outside the current function if ≥ v2 and a jump. -- Out of bounds: When the target location is not a registered function if ≥ v2 and a call. +- Out of bounds: When the target location is outside the bytecode if < v3. +- Out of bounds: When the target location is outside the current function if ≥ v3 and a jump. +- Out of bounds: When the target location is not a registered function if ≥ v3 and a call. - Second slot of `lddw`: When the target location has opcode `0x00`. - Stack overflow: When one too many nested call happens. | opcode (hex / bin) | feature set | assembler mnemonic | condition Rust equivalent | ------------------ | ----------- | -------------------- | ------------------------- | `05` / `00000101` | all | `ja off` | `true` -| `0D` / `00001101` | | -- reserved -- +| `0D` / `00001101` | all | -- reserved -- | `15` / `00010101` | all | `jeq dst, imm, off` | `dst == (imm as i32 as i64 as u64)` | `1D` / `00011101` | all | `jeq dst, src, off` | `dst == src` | `25` / `00100101` | all | `jgt dst, imm, off` | `dst > (imm as i32 as i64 as u64)` @@ -271,13 +289,14 @@ The `exit` (a.k.a. return) instruction does: | `6D` / `01101101` | all | `jsgt dst, src, off` | `(dst as i64) > (src as i64)` | `75` / `01110101` | all | `jsge dst, imm, off` | `(dst as i64) >= (imm as i32 as i64)` | `7D` / `01111101` | all | `jsge dst, src, off` | `(dst as i64) >= (src as i64)` -| `85` / `10000101` | until v2 | `call off` -| `85` / `10000101` | from v2 | `syscall src=0, off` -| `85` / `10000101` | from v2 | `call src=1, off` +| `85` / `10000101` | until v3 | `call imm` or `syscall imm` +| `85` / `10000101` | from v3 | `call off` | `8D` / `10001101` | until v2 | `callx imm` | `8D` / `10001101` | from v2 | `callx src` -| `95` / `10010101` | all | `exit` -| `9D` / `10011101` | | -- reserved -- +| `95` / `10010101` | until v3 | `exit` or `return` +| `95` / `10010101` | from v3 | `syscall imm` +| `9D` / `10011101` | until v3 | -- reserved -- +| `9D` / `10011101` | from v3 | `exit` or `return` | `A5` / `10100101` | all | `jlt dst, imm, off` | `dst < imm as i32 as i64 as u64` | `AD` / `10101101` | all | `jlt dst, src, off` | `dst < src` | `B5` / `10110101` | all | `jle dst, imm, off` | `dst <= imm as i32 as i64 as u64` @@ -286,7 +305,7 @@ The `exit` (a.k.a. return) instruction does: | `CD` / `11001101` | all | `jslt dst, src, off` | `(dst as i64) < (src as i64)` | `D5` / `11010101` | all | `jsle dst, imm, off` | `(dst as i64) <= (imm as i32 as i64)` | `DD` / `11011101` | all | `jsle dst, src, off` | `(dst as i64) <= (src as i64)` -| `E5` to `FD` | | -- reserved -- +| `E5` to `FD` | all | -- reserved -- Verification @@ -307,23 +326,34 @@ Verification - For all instructions the opcode must be valid - Memory write instructions can use `r10` as destination register +### until v1 +- No instruction can use `r11` as destination register + +### from v1 +- `add64 reg, imm` can use `r11` as destination register + ### until v2 - Opcodes from the product / quotient / remainder instruction class are forbiden +- `neg32` and `neg64` are allowed - `le` is allowed +- `lddw` (opcodes `0x18` and `0x00`) is allowed - `hor64` is forbidden - `callx` source register is encoded in the imm field -- The targets of `call` instructions is checked at runtime not verification time -- The offset of jump instructions must be limited to the range of the bytecode ### from v2 -- Every function must end in a `ja` or `exit` instruction -- `lddw` (opcodes `0x18` and `0x00`) are forbidden -- `neg32` and `neg64` are forbidden - Opcodes from the product / quotient / remainder instruction class are allowed +- `neg32` and `neg64` are forbidden - `le` is forbidden +- `lddw` (opcodes `0x18` and `0x00`) is forbidden - `hor64` is allowed -- The offset of jump instructions must be limited to the range of the current function - `callx` source register is encoded in the src field -- The targets of internal calls (`call` instructions with src ≠ 0) must have been registered at verification time -- The targets of syscalls (`call` instructions with src = 0) must have been registered at verification time -- `add64 reg, imm` can use `r11` as destination register + +### until v3 +- The targets of `call` instructions (which includes `syscall` instructions) are checked at runtime not verification time +- The offset of jump instructions must be limited to the range of the bytecode + +### from v3 +- Every function must end in a `ja` or `exit` instruction +- The targets of `call` instructions must have been registered at verification time +- The targets of `syscall` instructions must have been registered at verification time +- The offset of jump instructions must be limited to the range of the current function diff --git a/examples/disassemble.rs b/examples/disassemble.rs index 454fec2e..d668cbaf 100644 --- a/examples/disassemble.rs +++ b/examples/disassemble.rs @@ -35,7 +35,7 @@ fn main() { let executable = Executable::::from_text_bytes( program, loader, - SBPFVersion::V2, + SBPFVersion::V3, FunctionRegistry::default(), ) .unwrap(); diff --git a/examples/to_json.rs b/examples/to_json.rs index db2b35c5..cb6b1e2e 100644 --- a/examples/to_json.rs +++ b/examples/to_json.rs @@ -31,7 +31,7 @@ fn to_json(program: &[u8]) -> String { let executable = Executable::::from_text_bytes( program, Arc::new(BuiltinProgram::new_mock()), - SBPFVersion::V2, + SBPFVersion::V3, FunctionRegistry::default(), ) .unwrap(); diff --git a/fuzz/fuzz_targets/dumb.rs b/fuzz/fuzz_targets/dumb.rs index f22eba76..ee624f5b 100644 --- a/fuzz/fuzz_targets/dumb.rs +++ b/fuzz/fuzz_targets/dumb.rs @@ -31,7 +31,7 @@ fuzz_target!(|data: DumbFuzzData| { let function_registry = FunctionRegistry::default(); let syscall_registry = FunctionRegistry::>::default(); - if RequisiteVerifier::verify(&prog, &config, SBPFVersion::V2, &function_registry, &syscall_registry).is_err() { + if RequisiteVerifier::verify(&prog, &config, SBPFVersion::V3, &function_registry, &syscall_registry).is_err() { // verify please return; } @@ -42,7 +42,7 @@ fuzz_target!(|data: DumbFuzzData| { config, FunctionRegistry::default(), )), - SBPFVersion::V2, + SBPFVersion::V3, function_registry, ) .unwrap(); diff --git a/fuzz/fuzz_targets/smart.rs b/fuzz/fuzz_targets/smart.rs index 1bd66e45..9232b0d3 100644 --- a/fuzz/fuzz_targets/smart.rs +++ b/fuzz/fuzz_targets/smart.rs @@ -38,7 +38,7 @@ fuzz_target!(|data: FuzzData| { if RequisiteVerifier::verify( prog.into_bytes(), &config, - SBPFVersion::V2, + SBPFVersion::V3, &function_registry, &syscall_registry, ) @@ -54,7 +54,7 @@ fuzz_target!(|data: FuzzData| { config, FunctionRegistry::default(), )), - SBPFVersion::V2, + SBPFVersion::V3, function_registry, ) .unwrap(); diff --git a/fuzz/fuzz_targets/smart_jit_diff.rs b/fuzz/fuzz_targets/smart_jit_diff.rs index 2bc1c30b..b77ab691 100644 --- a/fuzz/fuzz_targets/smart_jit_diff.rs +++ b/fuzz/fuzz_targets/smart_jit_diff.rs @@ -45,7 +45,7 @@ fuzz_target!(|data: FuzzData| { if RequisiteVerifier::verify( prog.into_bytes(), &config, - SBPFVersion::V2, + SBPFVersion::V3, &function_registry, &syscall_registry, ) @@ -62,7 +62,7 @@ fuzz_target!(|data: FuzzData| { config, FunctionRegistry::default(), )), - SBPFVersion::V2, + SBPFVersion::V3, function_registry, ) .unwrap(); diff --git a/fuzz/fuzz_targets/smarter_jit_diff.rs b/fuzz/fuzz_targets/smarter_jit_diff.rs index 31e84767..ebfa6e04 100644 --- a/fuzz/fuzz_targets/smarter_jit_diff.rs +++ b/fuzz/fuzz_targets/smarter_jit_diff.rs @@ -35,7 +35,7 @@ fuzz_target!(|data: FuzzData| { if RequisiteVerifier::verify( prog.into_bytes(), &config, - SBPFVersion::V2, + SBPFVersion::V3, &function_registry, &syscall_registry, ) @@ -52,7 +52,7 @@ fuzz_target!(|data: FuzzData| { config, FunctionRegistry::default(), )), - SBPFVersion::V2, + SBPFVersion::V3, function_registry, ) .unwrap(); diff --git a/fuzz/fuzz_targets/verify_semantic_aware.rs b/fuzz/fuzz_targets/verify_semantic_aware.rs index b06bd265..68c7d4b1 100644 --- a/fuzz/fuzz_targets/verify_semantic_aware.rs +++ b/fuzz/fuzz_targets/verify_semantic_aware.rs @@ -30,7 +30,7 @@ fuzz_target!(|data: FuzzData| { RequisiteVerifier::verify( prog.into_bytes(), &config, - SBPFVersion::V2, + SBPFVersion::V3, &function_registry, &syscall_registry, ) diff --git a/src/assembler.rs b/src/assembler.rs index 30263794..40f58ba3 100644 --- a/src/assembler.rs +++ b/src/assembler.rs @@ -112,7 +112,7 @@ fn make_instruction_map(sbpf_version: SBPFVersion) -> HashMap HashMap( ebpf::LSH32_REG => { name = "lsh32"; desc = alu_reg_str(name, insn); }, ebpf::RSH32_IMM => { name = "rsh32"; desc = alu_imm_str(name, insn); }, ebpf::RSH32_REG => { name = "rsh32"; desc = alu_reg_str(name, insn); }, - ebpf::NEG32 if sbpf_version.enable_neg() => { name = "neg32"; desc = format!("{} r{}", name, insn.dst); }, + ebpf::NEG32 if !sbpf_version.disable_neg() => { name = "neg32"; desc = format!("{} r{}", name, insn.dst); }, ebpf::LD_4B_REG if sbpf_version.move_memory_instruction_classes() => { name = "ldxw"; desc = ld_reg_str(name, insn); }, ebpf::MOD32_IMM if !sbpf_version.enable_pqr() => { name = "mod32"; desc = alu_imm_str(name, insn); }, ebpf::MOD32_REG if !sbpf_version.enable_pqr() => { name = "mod32"; desc = alu_reg_str(name, insn); }, @@ -197,7 +197,7 @@ pub fn disassemble_instruction( ebpf::RSH64_IMM => { name = "rsh64"; desc = alu_imm_str(name, insn); }, ebpf::RSH64_REG => { name = "rsh64"; desc = alu_reg_str(name, insn); }, ebpf::ST_4B_IMM if sbpf_version.move_memory_instruction_classes() => { name = "stw"; desc = ld_st_imm_str(name, insn); }, - ebpf::NEG64 if sbpf_version.enable_neg() => { name = "neg64"; desc = format!("{} r{}", name, insn.dst); }, + ebpf::NEG64 if !sbpf_version.disable_neg() => { name = "neg64"; desc = format!("{} r{}", name, insn.dst); }, ebpf::ST_4B_REG if sbpf_version.move_memory_instruction_classes() => { name = "stxw"; desc = st_reg_str(name, insn); }, ebpf::MOD64_IMM if !sbpf_version.enable_pqr() => { name = "mod64"; desc = alu_imm_str(name, insn); }, ebpf::ST_8B_IMM if sbpf_version.move_memory_instruction_classes() => { name = "stdw"; desc = ld_st_imm_str(name, insn); }, diff --git a/src/ebpf.rs b/src/ebpf.rs index 81d929ef..2558b9e7 100644 --- a/src/ebpf.rs +++ b/src/ebpf.rs @@ -43,9 +43,9 @@ pub const VIRTUAL_ADDRESS_BITS: usize = 32; /// Size (and alignment) of a memory region pub const MM_REGION_SIZE: u64 = 1 << VIRTUAL_ADDRESS_BITS; -/// Virtual address of the bytecode region (not available in SBPFv1) +/// Virtual address of the bytecode region (in SBPFv3) pub const MM_BYTECODE_START: u64 = 0; -/// Virtual address of the readonly data region (also contains the bytecode in SBPFv1) +/// Virtual address of the readonly data region (also contains the bytecode until SBPFv3) pub const MM_RODATA_START: u64 = MM_REGION_SIZE; /// Virtual address of the stack region pub const MM_STACK_START: u64 = MM_REGION_SIZE * 2; @@ -196,9 +196,9 @@ pub const BPF_JSGT: u8 = 0x60; pub const BPF_JSGE: u8 = 0x70; /// BPF JMP operation code: syscall function call. pub const BPF_CALL: u8 = 0x80; -/// BPF JMP operation code: return from program (V1). +/// BPF JMP operation code: return from program. pub const BPF_EXIT: u8 = 0x90; -/// BPF JMP operation code: static syscall (V2). +/// BPF JMP operation code: static syscall. pub const BPF_SYSCALL: u8 = 0x90; /// BPF JMP operation code: jump if lower than. pub const BPF_JLT: u8 = 0xa0; @@ -483,11 +483,11 @@ pub const JSLE_REG: u8 = BPF_JMP | BPF_X | BPF_JSLE; pub const CALL_IMM: u8 = BPF_JMP | BPF_CALL; /// BPF opcode: tail call. pub const CALL_REG: u8 = BPF_JMP | BPF_X | BPF_CALL; -/// BPF opcode: `exit` /// `return r0`. /// Valid only for SBPFv1 +/// BPF opcode: `exit` /// `return r0`. /// Valid only until SBPFv3 pub const EXIT: u8 = BPF_JMP | BPF_EXIT; -/// BPF opcode: `return` /// `return r0`. /// Valid only for SBPFv2 +/// BPF opcode: `return` /// `return r0`. /// Valid only since SBPFv3 pub const RETURN: u8 = BPF_JMP | BPF_X | BPF_EXIT; -/// BPF opcode: `syscall` /// `syscall imm`. /// Valid only for SBPFv2 +/// BPF opcode: `syscall` /// `syscall imm`. /// Valid only since SBPFv3 pub const SYSCALL: u8 = BPF_JMP | BPF_SYSCALL; // Used in JIT diff --git a/src/elf.rs b/src/elf.rs index 5a347076..2489d58f 100644 --- a/src/elf.rs +++ b/src/elf.rs @@ -379,16 +379,18 @@ impl Executable { .ok_or(ElfParserError::OutOfBounds)?, ); let config = loader.get_config(); - let sbpf_version = if config.enabled_sbpf_versions.end() == &SBPFVersion::V1 { + let sbpf_version = if config.enabled_sbpf_versions.end() == &SBPFVersion::V0 { if e_flags == EF_SBPF_V2 { - SBPFVersion::V2 + SBPFVersion::Reserved } else { - SBPFVersion::V1 + SBPFVersion::V0 } } else { match e_flags { - 0 => SBPFVersion::V1, - EF_SBPF_V2 => SBPFVersion::V2, + 0 => SBPFVersion::V0, + 1 => SBPFVersion::V1, + 2 => SBPFVersion::V2, + 3 => SBPFVersion::V3, _ => SBPFVersion::Reserved, } }; @@ -621,9 +623,9 @@ impl Executable { let config = loader.get_config(); let header = elf.file_header(); let sbpf_version = if header.e_flags == EF_SBPF_V2 { - SBPFVersion::V2 + SBPFVersion::Reserved } else { - SBPFVersion::V1 + SBPFVersion::V0 }; Self::validate(config, &elf, elf_bytes.as_slice())?; @@ -748,9 +750,9 @@ impl Executable { } let sbpf_version = if header.e_flags == EF_SBPF_V2 { - SBPFVersion::V2 + SBPFVersion::Reserved } else { - SBPFVersion::V1 + SBPFVersion::V0 }; if !config.enabled_sbpf_versions.contains(&sbpf_version) { return Err(ElfError::UnsupportedSBPFVersion); @@ -1010,9 +1012,9 @@ impl Executable { let mut syscall_cache = BTreeMap::new(); let text_section = get_section(elf, b".text")?; let sbpf_version = if elf.file_header().e_flags == EF_SBPF_V2 { - SBPFVersion::V2 + SBPFVersion::Reserved } else { - SBPFVersion::V1 + SBPFVersion::V0 }; // Fixup all program counter relative call instructions @@ -1086,7 +1088,7 @@ impl Executable { .file_range() .unwrap_or_default() .contains(&r_offset) - || sbpf_version == SBPFVersion::V1 + || sbpf_version == SBPFVersion::V0 { r_offset.saturating_add(BYTE_OFFSET_IMMEDIATE) } else { @@ -1121,7 +1123,7 @@ impl Executable { .file_range() .unwrap_or_default() .contains(&r_offset) - || sbpf_version == SBPFVersion::V1 + || sbpf_version == SBPFVersion::V0 { let imm_low_offset = imm_offset; let imm_high_offset = imm_low_offset.saturating_add(INSN_SIZE); @@ -1227,7 +1229,7 @@ impl Executable { refd_addr.checked_shr(32).unwrap_or_default() as u32, ); } else { - let refd_addr = if sbpf_version != SBPFVersion::V1 { + let refd_addr = if sbpf_version != SBPFVersion::V0 { // We're relocating an address inside a data section (eg .rodata). The // address is encoded as a simple u64. @@ -1298,7 +1300,7 @@ impl Executable { .or_insert_with(|| ebpf::hash_symbol_name(name)); if config.reject_broken_elfs && loader - .get_function_registry(SBPFVersion::V1) + .get_function_registry(SBPFVersion::V0) .lookup_by_key(hash) .is_none() { @@ -1543,7 +1545,7 @@ mod test { #[test] fn test_validate() { - let elf_bytes = std::fs::read("tests/elfs/relative_call_sbpfv1.so").unwrap(); + let elf_bytes = std::fs::read("tests/elfs/relative_call_sbpfv0.so").unwrap(); let elf = Elf64::parse(&elf_bytes).unwrap(); let mut header = elf.file_header().clone(); @@ -1605,7 +1607,7 @@ mod test { #[test] fn test_load() { - let mut file = File::open("tests/elfs/relative_call_sbpfv1.so").expect("file open failed"); + let mut file = File::open("tests/elfs/relative_call_sbpfv0.so").expect("file open failed"); let mut elf_bytes = Vec::new(); file.read_to_end(&mut elf_bytes) .expect("failed to read elf file"); @@ -1615,7 +1617,7 @@ mod test { #[test] fn test_load_unaligned() { let mut elf_bytes = - std::fs::read("tests/elfs/relative_call_sbpfv1.so").expect("failed to read elf file"); + std::fs::read("tests/elfs/relative_call_sbpfv0.so").expect("failed to read elf file"); // The default allocator allocates aligned memory. Move the ELF slice to // elf_bytes.as_ptr() + 1 to make it unaligned and test unaligned // parsing. @@ -1627,7 +1629,7 @@ mod test { fn test_entrypoint() { let loader = loader(); - let mut file = File::open("tests/elfs/relative_call_sbpfv1.so").expect("file open failed"); + let mut file = File::open("tests/elfs/relative_call_sbpfv0.so").expect("file open failed"); let mut elf_bytes = Vec::new(); file.read_to_end(&mut elf_bytes) .expect("failed to read elf file"); @@ -1776,7 +1778,7 @@ mod test { assert!(matches!( ElfExecutable::parse_ro_sections( &config, - &SBPFVersion::V1, + &SBPFVersion::V0, sections, &elf_bytes, ), @@ -1803,7 +1805,7 @@ mod test { assert!(matches!( ElfExecutable::parse_ro_sections( &config, - &SBPFVersion::V1, + &SBPFVersion::V0, sections, &elf_bytes, ), @@ -1815,7 +1817,7 @@ mod test { fn test_sh_offset_not_same_as_vaddr() { let config = Config { reject_broken_elfs: true, - enabled_sbpf_versions: SBPFVersion::V1..=SBPFVersion::V1, + enabled_sbpf_versions: SBPFVersion::V0..=SBPFVersion::V0, ..Config::default() }; let elf_bytes = [0u8; 512]; @@ -1826,7 +1828,7 @@ mod test { let sections: [(Option<&[u8]>, &Elf64Shdr); 1] = [(Some(b".text"), &s1)]; assert!(ElfExecutable::parse_ro_sections( &config, - &SBPFVersion::V1, + &SBPFVersion::V0, sections, &elf_bytes ) @@ -1836,7 +1838,7 @@ mod test { s1.sh_offset = 0; let sections: [(Option<&[u8]>, &Elf64Shdr); 1] = [(Some(b".text"), &s1)]; assert_eq!( - ElfExecutable::parse_ro_sections(&config, &SBPFVersion::V1, sections, &elf_bytes), + ElfExecutable::parse_ro_sections(&config, &SBPFVersion::V0, sections, &elf_bytes), Err(ElfError::ValueOutOfBounds) ); } @@ -1881,7 +1883,7 @@ mod test { let sections: [(Option<&[u8]>, &Elf64Shdr); 2] = [(Some(b".text"), &s1), (Some(b".rodata"), &s2)]; assert_eq!( - ElfExecutable::parse_ro_sections(&config, &SBPFVersion::V2, sections, &elf_bytes), + ElfExecutable::parse_ro_sections(&config, &SBPFVersion::V3, sections, &elf_bytes), Err(ElfError::ValueOutOfBounds) ); } @@ -1903,7 +1905,7 @@ mod test { let sections: [(Option<&[u8]>, &Elf64Shdr); 2] = [(Some(b".text"), &s1), (Some(b".rodata"), &s2)]; assert_eq!( - ElfExecutable::parse_ro_sections(&config, &SBPFVersion::V2, sections, &elf_bytes), + ElfExecutable::parse_ro_sections(&config, &SBPFVersion::V3, sections, &elf_bytes), Ok(Section::Borrowed( ebpf::MM_RODATA_START as usize + 10, 100..120 @@ -1927,7 +1929,7 @@ mod test { (Some(b".rodata"), &s3), ]; let ro_section = - ElfExecutable::parse_ro_sections(&config, &SBPFVersion::V1, sections, &elf_bytes) + ElfExecutable::parse_ro_sections(&config, &SBPFVersion::V0, sections, &elf_bytes) .unwrap(); let ro_region = get_ro_region(&ro_section, &elf_bytes); let owned_section = match &ro_section { @@ -1969,7 +1971,7 @@ mod test { ]; // V2 requires optimize_rodata=true let ro_section = - ElfExecutable::parse_ro_sections(&config, &SBPFVersion::V1, sections, &elf_bytes) + ElfExecutable::parse_ro_sections(&config, &SBPFVersion::V0, sections, &elf_bytes) .unwrap(); let ro_region = get_ro_region(&ro_section, &elf_bytes); let owned_section = match &ro_section { @@ -2009,7 +2011,7 @@ mod test { (Some(b".rodata"), &s3), ]; let ro_section = - ElfExecutable::parse_ro_sections(&config, &SBPFVersion::V1, sections, &elf_bytes) + ElfExecutable::parse_ro_sections(&config, &SBPFVersion::V0, sections, &elf_bytes) .unwrap(); let owned_section = match &ro_section { Section::Owned(_offset, data) => data.as_slice(), @@ -2068,7 +2070,7 @@ mod test { assert!(matches!( ElfExecutable::parse_ro_sections( &config, - &SBPFVersion::V1, // v2 requires optimize_rodata=true + &SBPFVersion::V0, // v2 requires optimize_rodata=true sections, &elf_bytes, ), @@ -2081,8 +2083,8 @@ mod test { let config = Config::default(); let elf_bytes = [0u8; 512]; for (vaddr_base, sbpf_version) in [ - (0, SBPFVersion::V1), - (ebpf::MM_RODATA_START, SBPFVersion::V2), + (0, SBPFVersion::V0), + (ebpf::MM_RODATA_START, SBPFVersion::V3), ] { let s1 = new_section(vaddr_base, 10); let s2 = new_section(vaddr_base + 20, 10); @@ -2109,8 +2111,8 @@ mod test { let config = Config::default(); let elf_bytes = [0u8; 512]; for (vaddr_base, sbpf_version) in [ - (0, SBPFVersion::V1), - (ebpf::MM_RODATA_START, SBPFVersion::V2), + (0, SBPFVersion::V0), + (ebpf::MM_RODATA_START, SBPFVersion::V3), ] { let s1 = new_section(vaddr_base, 10); let s2 = new_section(vaddr_base + 10, 10); @@ -2146,8 +2148,8 @@ mod test { let config = Config::default(); let elf_bytes = [0u8; 512]; for (vaddr_base, sbpf_version) in [ - (0, SBPFVersion::V1), - (ebpf::MM_RODATA_START, SBPFVersion::V2), + (0, SBPFVersion::V0), + (ebpf::MM_RODATA_START, SBPFVersion::V3), ] { let s1 = new_section(vaddr_base, 10); let s2 = new_section(vaddr_base + 10, 10); @@ -2199,7 +2201,7 @@ mod test { #[test] fn test_reject_rodata_stack_overlap() { let config = Config { - enabled_sbpf_versions: SBPFVersion::V1..=SBPFVersion::V2, + enabled_sbpf_versions: SBPFVersion::V0..=SBPFVersion::V3, ..Config::default() }; let elf_bytes = [0u8; 512]; @@ -2209,7 +2211,7 @@ mod test { s1.sh_offset = 0; let sections: [(Option<&[u8]>, &Elf64Shdr); 1] = [(Some(b".text"), &s1)]; assert!( - ElfExecutable::parse_ro_sections(&config, &SBPFVersion::V2, sections, &elf_bytes) + ElfExecutable::parse_ro_sections(&config, &SBPFVersion::V3, sections, &elf_bytes) .is_ok() ); @@ -2218,7 +2220,7 @@ mod test { s1.sh_offset = 0; let sections: [(Option<&[u8]>, &Elf64Shdr); 1] = [(Some(b".text"), &s1)]; assert!( - ElfExecutable::parse_ro_sections(&config, &SBPFVersion::V2, sections, &elf_bytes) + ElfExecutable::parse_ro_sections(&config, &SBPFVersion::V3, sections, &elf_bytes) .is_ok() ); @@ -2227,7 +2229,7 @@ mod test { s1.sh_offset = 0; let sections: [(Option<&[u8]>, &Elf64Shdr); 1] = [(Some(b".text"), &s1)]; assert_eq!( - ElfExecutable::parse_ro_sections(&config, &SBPFVersion::V2, sections, &elf_bytes), + ElfExecutable::parse_ro_sections(&config, &SBPFVersion::V3, sections, &elf_bytes), Err(ElfError::ValueOutOfBounds) ); @@ -2236,7 +2238,7 @@ mod test { s1.sh_offset = 0; let sections: [(Option<&[u8]>, &Elf64Shdr); 1] = [(Some(b".text"), &s1)]; assert_eq!( - ElfExecutable::parse_ro_sections(&config, &SBPFVersion::V2, sections, &elf_bytes), + ElfExecutable::parse_ro_sections(&config, &SBPFVersion::V3, sections, &elf_bytes), Err(ElfError::ValueOutOfBounds) ); } @@ -2245,7 +2247,7 @@ mod test { #[should_panic(expected = r#"validation failed: WritableSectionNotSupported(".data")"#)] fn test_writable_data_section() { let elf_bytes = - std::fs::read("tests/elfs/data_section_sbpfv1.so").expect("failed to read elf file"); + std::fs::read("tests/elfs/data_section_sbpfv0.so").expect("failed to read elf file"); ElfExecutable::load(&elf_bytes, loader()).expect("validation failed"); } @@ -2253,7 +2255,7 @@ mod test { #[should_panic(expected = r#"validation failed: WritableSectionNotSupported(".bss")"#)] fn test_bss_section() { let elf_bytes = - std::fs::read("tests/elfs/bss_section_sbpfv1.so").expect("failed to read elf file"); + std::fs::read("tests/elfs/bss_section_sbpfv0.so").expect("failed to read elf file"); ElfExecutable::load(&elf_bytes, loader()).expect("validation failed"); } @@ -2269,7 +2271,7 @@ mod test { #[should_panic(expected = "validation failed: RelativeJumpOutOfBounds(8)")] fn test_relative_call_oob_backward() { let mut elf_bytes = - std::fs::read("tests/elfs/relative_call_sbpfv1.so").expect("failed to read elf file"); + std::fs::read("tests/elfs/relative_call_sbpfv0.so").expect("failed to read elf file"); LittleEndian::write_i32(&mut elf_bytes[0x1044..0x1048], -11i32); ElfExecutable::load(&elf_bytes, loader()).expect("validation failed"); } @@ -2278,7 +2280,7 @@ mod test { #[should_panic(expected = "validation failed: RelativeJumpOutOfBounds(11)")] fn test_relative_call_oob_forward() { let mut elf_bytes = - std::fs::read("tests/elfs/relative_call_sbpfv1.so").expect("failed to read elf file"); + std::fs::read("tests/elfs/relative_call_sbpfv0.so").expect("failed to read elf file"); LittleEndian::write_i32(&mut elf_bytes[0x105C..0x1060], 5); ElfExecutable::load(&elf_bytes, loader()).expect("validation failed"); } @@ -2288,13 +2290,13 @@ mod test { fn test_err_unresolved_syscall_reloc_64_32() { let loader = BuiltinProgram::new_loader( Config { - enabled_sbpf_versions: SBPFVersion::V1..=SBPFVersion::V1, + enabled_sbpf_versions: SBPFVersion::V0..=SBPFVersion::V0, reject_broken_elfs: true, ..Config::default() }, FunctionRegistry::default(), ); - let elf_bytes = std::fs::read("tests/elfs/syscall_reloc_64_32_sbpfv1.so") + let elf_bytes = std::fs::read("tests/elfs/syscall_reloc_64_32_sbpfv0.so") .expect("failed to read elf file"); ElfExecutable::load(&elf_bytes, Arc::new(loader)).expect("validation failed"); } diff --git a/src/interpreter.rs b/src/interpreter.rs index 981f923f..1fd1dd61 100644 --- a/src/interpreter.rs +++ b/src/interpreter.rs @@ -157,11 +157,11 @@ impl<'a, 'b, C: ContextObject> Interpreter<'a, 'b, C> { if self .executable .get_sbpf_version() - .implicit_sign_extension_of_results() + .explicit_sign_extension_of_results() { - value as i64 as u64 - } else { value as u32 as u64 + } else { + value as i64 as u64 } } @@ -199,7 +199,7 @@ impl<'a, 'b, C: ContextObject> Interpreter<'a, 'b, C> { self.vm.stack_pointer = self.vm.stack_pointer.overflowing_add(insn.imm as u64).0; } - ebpf::LD_DW_IMM if self.executable.get_sbpf_version().enable_lddw() => { + ebpf::LD_DW_IMM if !self.executable.get_sbpf_version().disable_lddw() => { ebpf::augment_lddw_unchecked(self.program, &mut insn); self.reg[dst] = insn.imm as u64; self.reg[11] += 1; @@ -292,7 +292,7 @@ impl<'a, 'b, C: ContextObject> Interpreter<'a, 'b, C> { ebpf::LSH32_REG => self.reg[dst] = (self.reg[dst] as u32).wrapping_shl(self.reg[src] as u32) as u64, ebpf::RSH32_IMM => self.reg[dst] = (self.reg[dst] as u32).wrapping_shr(insn.imm as u32) as u64, ebpf::RSH32_REG => self.reg[dst] = (self.reg[dst] as u32).wrapping_shr(self.reg[src] as u32) as u64, - ebpf::NEG32 if self.executable.get_sbpf_version().enable_neg() => self.reg[dst] = (self.reg[dst] as i32).wrapping_neg() as u64 & (u32::MAX as u64), + ebpf::NEG32 if !self.executable.get_sbpf_version().disable_neg() => self.reg[dst] = (self.reg[dst] as i32).wrapping_neg() as u64 & (u32::MAX as u64), ebpf::LD_4B_REG if self.executable.get_sbpf_version().move_memory_instruction_classes() => { let vm_addr = (self.reg[src] as i64).wrapping_add(insn.off as i64) as u64; self.reg[dst] = translate_memory_access!(self, load, vm_addr, u32); @@ -309,14 +309,14 @@ impl<'a, 'b, C: ContextObject> Interpreter<'a, 'b, C> { ebpf::XOR32_IMM => self.reg[dst] = (self.reg[dst] as u32 ^ insn.imm as u32) as u64, ebpf::XOR32_REG => self.reg[dst] = (self.reg[dst] as u32 ^ self.reg[src] as u32) as u64, ebpf::MOV32_IMM => self.reg[dst] = insn.imm as u32 as u64, - ebpf::MOV32_REG => self.reg[dst] = if self.executable.get_sbpf_version().implicit_sign_extension_of_results() { - self.reg[src] as u32 as u64 - } else { + ebpf::MOV32_REG => self.reg[dst] = if self.executable.get_sbpf_version().explicit_sign_extension_of_results() { self.reg[src] as i32 as i64 as u64 + } else { + self.reg[src] as u32 as u64 }, ebpf::ARSH32_IMM => self.reg[dst] = (self.reg[dst] as i32).wrapping_shr(insn.imm as u32) as u32 as u64, ebpf::ARSH32_REG => self.reg[dst] = (self.reg[dst] as i32).wrapping_shr(self.reg[src] as u32) as u32 as u64, - ebpf::LE if self.executable.get_sbpf_version().enable_le() => { + ebpf::LE if !self.executable.get_sbpf_version().disable_le() => { self.reg[dst] = match insn.imm { 16 => (self.reg[dst] as u16).to_le() as u64, 32 => (self.reg[dst] as u32).to_le() as u64, @@ -381,7 +381,7 @@ impl<'a, 'b, C: ContextObject> Interpreter<'a, 'b, C> { let vm_addr = (self.reg[dst] as i64).wrapping_add(insn.off as i64) as u64; translate_memory_access!(self, store, insn.imm, vm_addr, u32); }, - ebpf::NEG64 if self.executable.get_sbpf_version().enable_neg() => self.reg[dst] = (self.reg[dst] as i64).wrapping_neg() as u64, + ebpf::NEG64 if !self.executable.get_sbpf_version().disable_neg() => self.reg[dst] = (self.reg[dst] as i64).wrapping_neg() as u64, ebpf::ST_4B_REG if self.executable.get_sbpf_version().move_memory_instruction_classes() => { let vm_addr = (self.reg[dst] as i64).wrapping_add(insn.off as i64) as u64; translate_memory_access!(self, store, self.reg[src], vm_addr, u32); @@ -405,7 +405,7 @@ impl<'a, 'b, C: ContextObject> Interpreter<'a, 'b, C> { ebpf::MOV64_REG => self.reg[dst] = self.reg[src], ebpf::ARSH64_IMM => self.reg[dst] = (self.reg[dst] as i64).wrapping_shr(insn.imm as u32) as u64, ebpf::ARSH64_REG => self.reg[dst] = (self.reg[dst] as i64).wrapping_shr(self.reg[src] as u32) as u64, - ebpf::HOR64_IMM if !self.executable.get_sbpf_version().enable_lddw() => { + ebpf::HOR64_IMM if self.executable.get_sbpf_version().disable_lddw() => { self.reg[dst] |= (insn.imm as u64).wrapping_shl(32); } @@ -529,7 +529,7 @@ impl<'a, 'b, C: ContextObject> Interpreter<'a, 'b, C> { if let (false, Some((_, function))) = (self.executable.get_sbpf_version().static_syscalls(), self.executable.get_loader().get_function_registry(self.executable.get_sbpf_version()).lookup_by_key(insn.imm as u32)) { - // SBPFv1 syscall + // SBPFv0 syscall self.reg[0] = match self.dispatch_syscall(function) { ProgramResult::Ok(value) => *value, ProgramResult::Err(_err) => return false, @@ -554,7 +554,7 @@ impl<'a, 'b, C: ContextObject> Interpreter<'a, 'b, C> { } ebpf::SYSCALL if self.executable.get_sbpf_version().static_syscalls() => { if let Some((_, function)) = self.executable.get_loader().get_function_registry(self.executable.get_sbpf_version()).lookup_by_key(insn.imm as u32) { - // SBPFv2 syscall + // SBPFv3 syscall self.reg[0] = match self.dispatch_syscall(function) { ProgramResult::Ok(value) => *value, ProgramResult::Err(_err) => return false, diff --git a/src/jit.rs b/src/jit.rs index c0691196..a0afd2bb 100644 --- a/src/jit.rs +++ b/src/jit.rs @@ -342,7 +342,7 @@ impl<'a, C: ContextObject> JitCompiler<'a, C> { // Scan through program to find actual number of instructions let mut pc = 0; - if executable.get_sbpf_version().enable_lddw() { + if !executable.get_sbpf_version().disable_lddw() { while (pc + 1) * ebpf::INSN_SIZE <= program.len() { let insn = ebpf::get_insn_unchecked(program, pc); pc += match insn.opc { @@ -428,7 +428,7 @@ impl<'a, C: ContextObject> JitCompiler<'a, C> { self.emit_ins(X86Instruction::alu(OperandSize::S64, 0x81, 0, REGISTER_PTR_TO_VM, insn.imm, Some(stack_ptr_access))); } - ebpf::LD_DW_IMM if self.executable.get_sbpf_version().enable_lddw() => { + ebpf::LD_DW_IMM if !self.executable.get_sbpf_version().disable_lddw() => { self.emit_validate_and_profile_instruction_count(false, Some(self.pc + 2)); self.pc += 1; self.result.pc_section[self.pc] = self.anchors[ANCHOR_CALL_UNSUPPORTED_INSTRUCTION] as usize; @@ -485,13 +485,13 @@ impl<'a, C: ContextObject> JitCompiler<'a, C> { // BPF_ALU32_LOAD class ebpf::ADD32_IMM => { self.emit_sanitized_alu(OperandSize::S32, 0x01, 0, dst, insn.imm); - if self.executable.get_sbpf_version().implicit_sign_extension_of_results() { + if !self.executable.get_sbpf_version().explicit_sign_extension_of_results() { self.emit_ins(X86Instruction::alu(OperandSize::S64, 0x63, dst, dst, 0, None)); // sign extend i32 to i64 } }, ebpf::ADD32_REG => { self.emit_ins(X86Instruction::alu(OperandSize::S32, 0x01, src, dst, 0, None)); - if self.executable.get_sbpf_version().implicit_sign_extension_of_results() { + if !self.executable.get_sbpf_version().explicit_sign_extension_of_results() { self.emit_ins(X86Instruction::alu(OperandSize::S64, 0x63, dst, dst, 0, None)); // sign extend i32 to i64 } }, @@ -504,13 +504,13 @@ impl<'a, C: ContextObject> JitCompiler<'a, C> { } else { self.emit_sanitized_alu(OperandSize::S32, 0x29, 5, dst, insn.imm); } - if self.executable.get_sbpf_version().implicit_sign_extension_of_results() { + if !self.executable.get_sbpf_version().explicit_sign_extension_of_results() { self.emit_ins(X86Instruction::alu(OperandSize::S64, 0x63, dst, dst, 0, None)); // sign extend i32 to i64 } }, ebpf::SUB32_REG => { self.emit_ins(X86Instruction::alu(OperandSize::S32, 0x29, src, dst, 0, None)); - if self.executable.get_sbpf_version().implicit_sign_extension_of_results() { + if !self.executable.get_sbpf_version().explicit_sign_extension_of_results() { self.emit_ins(X86Instruction::alu(OperandSize::S64, 0x63, dst, dst, 0, None)); // sign extend i32 to i64 } }, @@ -532,7 +532,7 @@ impl<'a, C: ContextObject> JitCompiler<'a, C> { ebpf::LSH32_REG => self.emit_shift(OperandSize::S32, 4, src, dst, None), ebpf::RSH32_IMM => self.emit_shift(OperandSize::S32, 5, REGISTER_SCRATCH, dst, Some(insn.imm)), ebpf::RSH32_REG => self.emit_shift(OperandSize::S32, 5, src, dst, None), - ebpf::NEG32 if self.executable.get_sbpf_version().enable_neg() => self.emit_ins(X86Instruction::alu(OperandSize::S32, 0xf7, 3, dst, 0, None)), + ebpf::NEG32 if !self.executable.get_sbpf_version().disable_neg() => self.emit_ins(X86Instruction::alu(OperandSize::S32, 0xf7, 3, dst, 0, None)), ebpf::LD_4B_REG if self.executable.get_sbpf_version().move_memory_instruction_classes() => { self.emit_address_translation(Some(dst), Value::RegisterPlusConstant64(src, insn.off as i64, true), 4, None); }, @@ -549,15 +549,15 @@ impl<'a, C: ContextObject> JitCompiler<'a, C> { } } ebpf::MOV32_REG => { - if self.executable.get_sbpf_version().implicit_sign_extension_of_results() { - self.emit_ins(X86Instruction::mov(OperandSize::S32, src, dst)); - } else { + if self.executable.get_sbpf_version().explicit_sign_extension_of_results() { self.emit_ins(X86Instruction::mov_with_sign_extension(OperandSize::S64, src, dst)); + } else { + self.emit_ins(X86Instruction::mov(OperandSize::S32, src, dst)); } } ebpf::ARSH32_IMM => self.emit_shift(OperandSize::S32, 7, REGISTER_SCRATCH, dst, Some(insn.imm)), ebpf::ARSH32_REG => self.emit_shift(OperandSize::S32, 7, src, dst, None), - ebpf::LE if self.executable.get_sbpf_version().enable_le() => { + ebpf::LE if !self.executable.get_sbpf_version().disable_le() => { match insn.imm { 16 => { self.emit_ins(X86Instruction::alu(OperandSize::S32, 0x81, 4, dst, 0xffff, None)); // Mask to 16 bit @@ -626,7 +626,7 @@ impl<'a, C: ContextObject> JitCompiler<'a, C> { ebpf::ST_4B_IMM if self.executable.get_sbpf_version().move_memory_instruction_classes() => { self.emit_address_translation(None, Value::RegisterPlusConstant64(dst, insn.off as i64, true), 4, Some(Value::Constant64(insn.imm, true))); }, - ebpf::NEG64 if self.executable.get_sbpf_version().enable_neg() => self.emit_ins(X86Instruction::alu(OperandSize::S64, 0xf7, 3, dst, 0, None)), + ebpf::NEG64 if !self.executable.get_sbpf_version().disable_neg() => self.emit_ins(X86Instruction::alu(OperandSize::S64, 0xf7, 3, dst, 0, None)), ebpf::ST_4B_REG if self.executable.get_sbpf_version().move_memory_instruction_classes() => { self.emit_address_translation(None, Value::RegisterPlusConstant64(dst, insn.off as i64, true), 4, Some(Value::Register(src))); }, @@ -648,7 +648,7 @@ impl<'a, C: ContextObject> JitCompiler<'a, C> { ebpf::MOV64_REG => self.emit_ins(X86Instruction::mov(OperandSize::S64, src, dst)), ebpf::ARSH64_IMM => self.emit_shift(OperandSize::S64, 7, REGISTER_SCRATCH, dst, Some(insn.imm)), ebpf::ARSH64_REG => self.emit_shift(OperandSize::S64, 7, src, dst, None), - ebpf::HOR64_IMM if !self.executable.get_sbpf_version().enable_lddw() => { + ebpf::HOR64_IMM if self.executable.get_sbpf_version().disable_lddw() => { self.emit_sanitized_alu(OperandSize::S64, 0x09, 1, dst, (insn.imm as u64).wrapping_shl(32) as i64); } @@ -712,7 +712,7 @@ impl<'a, C: ContextObject> JitCompiler<'a, C> { if let (false, Some((_, function))) = (self.executable.get_sbpf_version().static_syscalls(), self.executable.get_loader().get_function_registry(self.executable.get_sbpf_version()).lookup_by_key(insn.imm as u32)) { - // SBPFv1 syscall + // SBPFv0 syscall self.emit_syscall_dispatch(function); } else if let Some((_function_name, target_pc)) = self.executable @@ -1374,7 +1374,7 @@ impl<'a, C: ContextObject> JitCompiler<'a, C> { self.emit_ins(X86Instruction::pop(RAX)); } if let OperandSize::S32 = size { - if signed && self.executable.get_sbpf_version().implicit_sign_extension_of_results() { + if signed && !self.executable.get_sbpf_version().explicit_sign_extension_of_results() { self.emit_ins(X86Instruction::alu(OperandSize::S64, 0x63, dst, dst, 0, None)); // sign extend i32 to i64 } } @@ -1799,8 +1799,8 @@ mod tests { prog[pc * ebpf::INSN_SIZE] = ebpf::ADD64_IMM; } - let mut empty_program_machine_code_length_per_version = [0; 2]; - for sbpf_version in [SBPFVersion::V1, SBPFVersion::V2] { + let mut empty_program_machine_code_length_per_version = [0; 4]; + for sbpf_version in [SBPFVersion::V0, SBPFVersion::V3] { let empty_program_machine_code_length = { let config = Config { noop_instruction_rate: 0, @@ -1827,7 +1827,7 @@ mod tests { let config = Config { instruction_meter_checkpoint_distance: index * INSTRUCTION_COUNT * 2, noop_instruction_rate: 0, - enabled_sbpf_versions: SBPFVersion::V1..=SBPFVersion::V1, + enabled_sbpf_versions: SBPFVersion::V0..=SBPFVersion::V0, ..Config::default() }; let mut executable = create_mockup_executable(config, &prog); @@ -1847,7 +1847,7 @@ mod tests { MACHINE_CODE_PER_INSTRUCTION_METER_CHECKPOINT ); - for sbpf_version in [SBPFVersion::V1, SBPFVersion::V2] { + for sbpf_version in [SBPFVersion::V0, SBPFVersion::V3] { let empty_program_machine_code_length = empty_program_machine_code_length_per_version[sbpf_version as usize]; diff --git a/src/memory_region.rs b/src/memory_region.rs index e0ebfe61..de5ffe54 100644 --- a/src/memory_region.rs +++ b/src/memory_region.rs @@ -1026,13 +1026,13 @@ mod test { #[test] fn test_map_empty() { let config = Config::default(); - let m = UnalignedMemoryMapping::new(vec![], &config, SBPFVersion::V2).unwrap(); + let m = UnalignedMemoryMapping::new(vec![], &config, SBPFVersion::V3).unwrap(); assert_error!( m.map(AccessType::Load, ebpf::MM_INPUT_START, 8), "AccessViolation" ); - let m = AlignedMemoryMapping::new(vec![], &config, SBPFVersion::V2).unwrap(); + let m = AlignedMemoryMapping::new(vec![], &config, SBPFVersion::V3).unwrap(); assert_error!( m.map(AccessType::Load, ebpf::MM_INPUT_START, 8), "AccessViolation" @@ -1053,7 +1053,7 @@ mod test { MemoryRegion::new_writable_gapped(&mut mem1, ebpf::MM_STACK_START, 2), ], &config, - SBPFVersion::V2, + SBPFVersion::V3, ) .unwrap(); for frame in 0..4 { @@ -1081,7 +1081,7 @@ mod test { MemoryRegion::new_readonly(&mem2, ebpf::MM_INPUT_START + mem1.len() as u64 - 1), ], &config, - SBPFVersion::V2, + SBPFVersion::V3, ), "InvalidMemoryRegion(1)" ); @@ -1091,7 +1091,7 @@ mod test { MemoryRegion::new_readonly(&mem2, ebpf::MM_INPUT_START + mem1.len() as u64), ], &config, - SBPFVersion::V2, + SBPFVersion::V3, ) .is_ok()); } @@ -1117,7 +1117,7 @@ mod test { ), ], &config, - SBPFVersion::V2, + SBPFVersion::V3, ) .unwrap(); @@ -1191,7 +1191,7 @@ mod test { MemoryRegion::new_readonly(&mem2, ebpf::MM_INPUT_START + 4), ], &config, - SBPFVersion::V2, + SBPFVersion::V3, ) .unwrap(); assert_error!( @@ -1251,7 +1251,7 @@ mod test { MemoryRegion::new_readonly(&mem2, ebpf::MM_STACK_START), ], &config, - SBPFVersion::V2, + SBPFVersion::V3, ) .unwrap(); assert_error!( @@ -1325,7 +1325,7 @@ mod test { ), ], &config, - SBPFVersion::V2, + SBPFVersion::V3, ) .unwrap(); @@ -1367,7 +1367,7 @@ mod test { ), ], &config, - SBPFVersion::V2, + SBPFVersion::V3, ) .unwrap(); m.store(0x1122u16, ebpf::MM_INPUT_START).unwrap(); @@ -1394,7 +1394,7 @@ mod test { let m = MemoryMapping::new( vec![MemoryRegion::new_writable(&mut mem1, ebpf::MM_INPUT_START)], &config, - SBPFVersion::V2, + SBPFVersion::V3, ) .unwrap(); @@ -1428,7 +1428,7 @@ mod test { MemoryRegion::new_writable(&mut mem2, ebpf::MM_INPUT_START + 7), ], &config, - SBPFVersion::V2, + SBPFVersion::V3, ) .unwrap(); @@ -1456,7 +1456,7 @@ mod test { let m = MemoryMapping::new( vec![MemoryRegion::new_writable(&mut mem1, ebpf::MM_INPUT_START)], &config, - SBPFVersion::V2, + SBPFVersion::V3, ) .unwrap(); m.store(0x11u8, ebpf::MM_INPUT_START).unwrap(); @@ -1474,7 +1474,7 @@ mod test { MemoryRegion::new_writable(&mut mem2, ebpf::MM_INPUT_START + 4), ], &config, - SBPFVersion::V2, + SBPFVersion::V3, ) .unwrap(); m.store(0x1122334455667788u64, ebpf::MM_INPUT_START) @@ -1500,7 +1500,7 @@ mod test { let m = MemoryMapping::new( vec![MemoryRegion::new_readonly(&mem1, ebpf::MM_INPUT_START)], &config, - SBPFVersion::V2, + SBPFVersion::V3, ) .unwrap(); assert_eq!(m.load::(ebpf::MM_INPUT_START).unwrap(), 0xff); @@ -1516,7 +1516,7 @@ mod test { MemoryRegion::new_readonly(&mem2, ebpf::MM_INPUT_START + 4), ], &config, - SBPFVersion::V2, + SBPFVersion::V3, ) .unwrap(); assert_eq!( @@ -1541,7 +1541,7 @@ mod test { MemoryRegion::new_readonly(&mem2, ebpf::MM_INPUT_START + mem1.len() as u64), ], &config, - SBPFVersion::V2, + SBPFVersion::V3, ) .unwrap(); m.store(0x11223344, ebpf::MM_INPUT_START).unwrap(); @@ -1559,7 +1559,7 @@ mod test { MemoryRegion::new_readonly(&mem2, ebpf::MM_INPUT_START + mem1.len() as u64), ], &config, - SBPFVersion::V2, + SBPFVersion::V3, ) .unwrap(); @@ -1631,7 +1631,7 @@ mod test { MemoryRegion::new_readonly(&mem2, ebpf::MM_STACK_START), ], &config, - SBPFVersion::V2, + SBPFVersion::V3, ) .unwrap(); @@ -1688,7 +1688,7 @@ mod test { Ok(c.borrow().as_slice().as_ptr() as u64) }), &config, - SBPFVersion::V2, + SBPFVersion::V3, ) .unwrap(); @@ -1721,7 +1721,7 @@ mod test { Ok(c.borrow().as_slice().as_ptr() as u64) }), &config, - SBPFVersion::V2, + SBPFVersion::V3, ) .unwrap(); @@ -1766,7 +1766,7 @@ mod test { Ok(c.borrow().as_slice().as_ptr() as u64) }), &config, - SBPFVersion::V2, + SBPFVersion::V3, ) .unwrap(); @@ -1786,7 +1786,7 @@ mod test { vec![MemoryRegion::new_cow(&original, ebpf::MM_RODATA_START, 42)], Box::new(|_| Err(())), &config, - SBPFVersion::V2, + SBPFVersion::V3, ) .unwrap(); @@ -1803,7 +1803,7 @@ mod test { vec![MemoryRegion::new_cow(&original, ebpf::MM_RODATA_START, 42)], Box::new(|_| Err(())), &config, - SBPFVersion::V2, + SBPFVersion::V3, ) .unwrap(); diff --git a/src/program.rs b/src/program.rs index 35f7ed49..fe019dd1 100644 --- a/src/program.rs +++ b/src/program.rs @@ -12,73 +12,80 @@ use { #[derive(Debug, PartialEq, PartialOrd, Eq, Clone, Copy)] pub enum SBPFVersion { /// The legacy format + V0, + /// SIMD-0166 V1, - /// The current format + /// SIMD-0174, SIMD-0173 V2, + /// SIMD-0178, SIMD-0179, SIMD-0189 + V3, /// Used for future versions Reserved, } impl SBPFVersion { - /// Implicitly perform sign extension of results - pub fn implicit_sign_extension_of_results(self) -> bool { - self == SBPFVersion::V1 + /// Enable SIMD-0166: SBPF dynamic stack frames + pub fn dynamic_stack_frames(self) -> bool { + self >= SBPFVersion::V1 } - /// Enable the little-endian byte swap instructions - pub fn enable_le(self) -> bool { - self == SBPFVersion::V1 + /// Enable SIMD-0174: SBPF arithmetics improvements + pub fn enable_pqr(self) -> bool { + self >= SBPFVersion::V2 } - - /// Enable the negation instruction - pub fn enable_neg(self) -> bool { - self == SBPFVersion::V1 + /// ... SIMD-0174 + pub fn explicit_sign_extension_of_results(self) -> bool { + self >= SBPFVersion::V2 } - - /// Swaps the reg and imm operands of the subtraction instruction + /// ... SIMD-0174 pub fn swap_sub_reg_imm_operands(self) -> bool { - self != SBPFVersion::V1 + self >= SBPFVersion::V2 } - - /// Enable the only two slots long instruction: LD_DW_IMM - pub fn enable_lddw(self) -> bool { - self == SBPFVersion::V1 + /// ... SIMD-0174 + pub fn disable_neg(self) -> bool { + self >= SBPFVersion::V2 } - /// Enable the BPF_PQR instruction class - pub fn enable_pqr(self) -> bool { - self != SBPFVersion::V1 + /// Enable SIMD-0173: SBPF instruction encoding improvements + pub fn callx_uses_src_reg(self) -> bool { + self >= SBPFVersion::V2 + } + /// ... SIMD-0173 + pub fn disable_lddw(self) -> bool { + self >= SBPFVersion::V2 + } + /// ... SIMD-0173 + pub fn disable_le(self) -> bool { + self >= SBPFVersion::V2 + } + /// ... SIMD-0173 + pub fn move_memory_instruction_classes(self) -> bool { + self >= SBPFVersion::V2 } - /// Use src reg instead of imm in callx - pub fn callx_uses_src_reg(self) -> bool { - self != SBPFVersion::V1 + /// Enable SIMD-0178: SBPF Static Syscalls + /// Enable SIMD-0179: SBPF stricter verification constraints + pub fn static_syscalls(self) -> bool { + self >= SBPFVersion::V3 + } + /// Enable SIMD-0189: SBPF stricter ELF headers + pub fn enable_stricter_elf_headers(self) -> bool { + self >= SBPFVersion::V3 + } + /// ... SIMD-0189 + pub fn enable_lower_bytecode_vaddr(self) -> bool { + self >= SBPFVersion::V3 } /// Ensure that rodata sections don't exceed their maximum allowed size and /// overlap with the stack pub fn reject_rodata_stack_overlap(self) -> bool { - self != SBPFVersion::V1 + self != SBPFVersion::V0 } /// Allow sh_addr != sh_offset in elf sections. pub fn enable_elf_vaddr(self) -> bool { - self != SBPFVersion::V1 - } - - /// Separates the bytecode from the read only data in virtual address space - pub fn enable_lower_bytecode_vaddr(self) -> bool { - self != SBPFVersion::V1 - } - - /// Use dynamic stack frame sizes - pub fn dynamic_stack_frames(self) -> bool { - self != SBPFVersion::V1 - } - - /// Support syscalls via pseudo calls (insn.src = 0) - pub fn static_syscalls(self) -> bool { - self != SBPFVersion::V1 + self != SBPFVersion::V0 } /// Calculate the target program counter for a CALL_IMM instruction depending on @@ -90,16 +97,6 @@ impl SBPFVersion { imm as u32 } } - - /// Move opcodes of memory instructions into ALU instruction classes - pub fn move_memory_instruction_classes(self) -> bool { - self != SBPFVersion::V1 - } - - /// Constrain ELF format to ignore section headers and relocations - pub fn enable_stricter_elf_headers(self) -> bool { - self != SBPFVersion::V1 - } } /// Holds the function symbols of an Executable @@ -149,7 +146,7 @@ impl FunctionRegistry { Ok(key) } - /// Used for transitioning from SBPFv1 to SBPFv2 + /// Used for transitioning from SBPFv0 to SBPFv3 pub(crate) fn register_function_hashed_legacy( &mut self, loader: &BuiltinProgram, @@ -169,7 +166,7 @@ impl FunctionRegistry { ebpf::hash_symbol_name(&usize::from(value).to_le_bytes()) }; if loader - .get_function_registry(SBPFVersion::V1) + .get_function_registry(SBPFVersion::V0) .lookup_by_key(hash) .is_some() { diff --git a/src/verifier.rs b/src/verifier.rs index 70cb0de4..e676ef91 100644 --- a/src/verifier.rs +++ b/src/verifier.rs @@ -248,7 +248,7 @@ impl Verifier for RequisiteVerifier { } match insn.opc { - ebpf::LD_DW_IMM if sbpf_version.enable_lddw() => { + ebpf::LD_DW_IMM if !sbpf_version.disable_lddw() => { check_load_dw(prog, insn_ptr)?; insn_ptr += 1; }, @@ -290,7 +290,7 @@ impl Verifier for RequisiteVerifier { ebpf::LSH32_REG => {}, ebpf::RSH32_IMM => { check_imm_shift(&insn, insn_ptr, 32)?; }, ebpf::RSH32_REG => {}, - ebpf::NEG32 if sbpf_version.enable_neg() => {}, + ebpf::NEG32 if !sbpf_version.disable_neg() => {}, ebpf::LD_4B_REG if sbpf_version.move_memory_instruction_classes() => {}, ebpf::MOD32_IMM if !sbpf_version.enable_pqr() => { check_imm_nonzero(&insn, insn_ptr)?; }, ebpf::MOD32_REG if !sbpf_version.enable_pqr() => {}, @@ -301,7 +301,7 @@ impl Verifier for RequisiteVerifier { ebpf::MOV32_REG => {}, ebpf::ARSH32_IMM => { check_imm_shift(&insn, insn_ptr, 32)?; }, ebpf::ARSH32_REG => {}, - ebpf::LE if sbpf_version.enable_le() => { check_imm_endian(&insn, insn_ptr)?; }, + ebpf::LE if !sbpf_version.disable_le() => { check_imm_endian(&insn, insn_ptr)?; }, ebpf::BE => { check_imm_endian(&insn, insn_ptr)?; }, // BPF_ALU64_STORE class @@ -326,7 +326,7 @@ impl Verifier for RequisiteVerifier { ebpf::RSH64_IMM => { check_imm_shift(&insn, insn_ptr, 64)?; }, ebpf::RSH64_REG => {}, ebpf::ST_4B_IMM if sbpf_version.move_memory_instruction_classes() => store = true, - ebpf::NEG64 if sbpf_version.enable_neg() => {}, + ebpf::NEG64 if !sbpf_version.disable_neg() => {}, ebpf::ST_4B_REG if sbpf_version.move_memory_instruction_classes() => store = true, ebpf::MOD64_IMM if !sbpf_version.enable_pqr() => { check_imm_nonzero(&insn, insn_ptr)?; }, ebpf::ST_8B_IMM if sbpf_version.move_memory_instruction_classes() => store = true, @@ -338,7 +338,7 @@ impl Verifier for RequisiteVerifier { ebpf::MOV64_REG => {}, ebpf::ARSH64_IMM => { check_imm_shift(&insn, insn_ptr, 64)?; }, ebpf::ARSH64_REG => {}, - ebpf::HOR64_IMM if !sbpf_version.enable_lddw() => {}, + ebpf::HOR64_IMM if sbpf_version.disable_lddw() => {}, // BPF_PQR class ebpf::LMUL32_IMM if sbpf_version.enable_pqr() => {}, diff --git a/src/vm.rs b/src/vm.rs index 690be74c..18ad7d5d 100644 --- a/src/vm.rs +++ b/src/vm.rs @@ -103,7 +103,7 @@ impl Default for Config { sanitize_user_provided_values: true, optimize_rodata: true, aligned_memory_mapping: true, - enabled_sbpf_versions: SBPFVersion::V1..=SBPFVersion::V2, + enabled_sbpf_versions: SBPFVersion::V0..=SBPFVersion::V3, } } } @@ -251,7 +251,7 @@ pub struct CallFrame { /// /// let loader = std::sync::Arc::new(BuiltinProgram::new_mock()); /// let function_registry = FunctionRegistry::default(); -/// let mut executable = Executable::::from_text_bytes(prog, loader.clone(), SBPFVersion::V2, function_registry).unwrap(); +/// let mut executable = Executable::::from_text_bytes(prog, loader.clone(), SBPFVersion::V3, function_registry).unwrap(); /// executable.verify::().unwrap(); /// let mut context_object = TestContextObject::new(1); /// let sbpf_version = executable.get_sbpf_version(); diff --git a/tests/assembler.rs b/tests/assembler.rs index 45ce9931..2ce72f09 100644 --- a/tests/assembler.rs +++ b/tests/assembler.rs @@ -58,7 +58,7 @@ fn test_fill() { #[test] fn test_exit() { let config = Config { - enabled_sbpf_versions: SBPFVersion::V1..=SBPFVersion::V1, + enabled_sbpf_versions: SBPFVersion::V0..=SBPFVersion::V0, ..Config::default() }; assert_eq!( @@ -74,7 +74,7 @@ fn test_exit() { #[test] fn test_static_syscall() { let config = Config { - enabled_sbpf_versions: SBPFVersion::V2..=SBPFVersion::V2, + enabled_sbpf_versions: SBPFVersion::V3..=SBPFVersion::V3, ..Config::default() }; @@ -87,7 +87,7 @@ fn test_static_syscall() { #[test] fn test_return() { let config = Config { - enabled_sbpf_versions: SBPFVersion::V2..=SBPFVersion::V2, + enabled_sbpf_versions: SBPFVersion::V3..=SBPFVersion::V3, ..Config::default() }; assert_eq!( @@ -519,7 +519,7 @@ fn test_large_immediate() { #[test] fn test_tcp_sack() { let config = Config { - enabled_sbpf_versions: SBPFVersion::V2..=SBPFVersion::V2, + enabled_sbpf_versions: SBPFVersion::V3..=SBPFVersion::V3, ..Config::default() }; let executable = assemble::( diff --git a/tests/disassembler.rs b/tests/disassembler.rs index e77d274f..cc55af86 100644 --- a/tests/disassembler.rs +++ b/tests/disassembler.rs @@ -45,7 +45,7 @@ fn test_empty() { #[test] fn test_exit() { let config = Config { - enabled_sbpf_versions: SBPFVersion::V1..=SBPFVersion::V1, + enabled_sbpf_versions: SBPFVersion::V0..=SBPFVersion::V0, ..Config::default() }; disasm!("entrypoint:\n exit\n", config); @@ -54,7 +54,7 @@ fn test_exit() { #[test] fn test_return() { let config = Config { - enabled_sbpf_versions: SBPFVersion::V2..=SBPFVersion::V2, + enabled_sbpf_versions: SBPFVersion::V3..=SBPFVersion::V3, ..Config::default() }; disasm!("entrypoint:\n return\n", config); @@ -63,7 +63,7 @@ fn test_return() { #[test] fn test_static_syscall() { let config = Config { - enabled_sbpf_versions: SBPFVersion::V2..=SBPFVersion::V2, + enabled_sbpf_versions: SBPFVersion::V3..=SBPFVersion::V3, ..Config::default() }; disasm!("entrypoint:\n syscall 5\n", config); diff --git a/tests/elfs/bss_section_sbpfv1.so b/tests/elfs/bss_section_sbpfv0.so similarity index 100% rename from tests/elfs/bss_section_sbpfv1.so rename to tests/elfs/bss_section_sbpfv0.so diff --git a/tests/elfs/data_section_sbpfv1.so b/tests/elfs/data_section_sbpfv0.so similarity index 100% rename from tests/elfs/data_section_sbpfv1.so rename to tests/elfs/data_section_sbpfv0.so diff --git a/tests/elfs/elfs.sh b/tests/elfs/elfs.sh index f8cfc974..93190d97 100755 --- a/tests/elfs/elfs.sh +++ b/tests/elfs/elfs.sh @@ -9,40 +9,40 @@ RC="$RC_COMMON -C target_cpu=sbfv2" RC_V1="$RC_COMMON -C target_cpu=generic" LD_COMMON="$TOOLCHAIN/llvm/bin/ld.lld -z notext -shared --Bdynamic -entry entrypoint" LD="$LD_COMMON --script elf.ld" -LD_V1="$LD_COMMON --script elf_sbpfv1.ld" +LD_V1="$LD_COMMON --script elf_sbpfv0.ld" $RC -o strict_header.o strict_header.rs $LD -o strict_header.so strict_header.o $RC_V1 -o relative_call.o relative_call.rs -$LD_V1 -o relative_call_sbpfv1.so relative_call.o +$LD_V1 -o relative_call_sbpfv0.so relative_call.o $RC_V1 -o syscall_reloc_64_32.o syscall_reloc_64_32.rs -$LD_V1 -o syscall_reloc_64_32_sbpfv1.so syscall_reloc_64_32.o +$LD_V1 -o syscall_reloc_64_32_sbpfv0.so syscall_reloc_64_32.o $RC_V1 -o bss_section.o bss_section.rs -$LD_V1 -o bss_section_sbpfv1.so bss_section.o +$LD_V1 -o bss_section_sbpfv0.so bss_section.o $RC_V1 -o data_section.o data_section.rs -$LD_V1 -o data_section_sbpfv1.so data_section.o +$LD_V1 -o data_section_sbpfv0.so data_section.o $RC_V1 -o rodata_section.o rodata_section.rs -$LD_V1 -o rodata_section_sbpfv1.so rodata_section.o +$LD_V1 -o rodata_section_sbpfv0.so rodata_section.o $RC -o program_headers_overflow.o rodata_section.rs "$TOOLCHAIN"/llvm/bin/ld.lld -z notext -shared --Bdynamic -entry entrypoint --script program_headers_overflow.ld --noinhibit-exec -o program_headers_overflow.so program_headers_overflow.o $RC_V1 -o struct_func_pointer.o struct_func_pointer.rs -$LD_V1 -o struct_func_pointer_sbpfv1.so struct_func_pointer.o +$LD_V1 -o struct_func_pointer_sbpfv0.so struct_func_pointer.o $RC_V1 -o reloc_64_64.o reloc_64_64.rs -$LD_V1 -o reloc_64_64_sbpfv1.so reloc_64_64.o +$LD_V1 -o reloc_64_64_sbpfv0.so reloc_64_64.o $RC_V1 -o reloc_64_relative.o reloc_64_relative.rs -$LD_V1 -o reloc_64_relative_sbpfv1.so reloc_64_relative.o +$LD_V1 -o reloc_64_relative_sbpfv0.so reloc_64_relative.o $RC_V1 -o reloc_64_relative_data.o reloc_64_relative_data.rs -$LD_V1 -o reloc_64_relative_data_sbpfv1.so reloc_64_relative_data.o +$LD_V1 -o reloc_64_relative_data_sbpfv0.so reloc_64_relative_data.o # $RC_V1 -o callx_unaligned.o callx_unaligned.rs # $LD_V1 -o callx_unaligned.so callx_unaligned.o diff --git a/tests/elfs/relative_call_sbpfv1.so b/tests/elfs/relative_call_sbpfv0.so similarity index 100% rename from tests/elfs/relative_call_sbpfv1.so rename to tests/elfs/relative_call_sbpfv0.so diff --git a/tests/elfs/reloc_64_64_sbpfv1.so b/tests/elfs/reloc_64_64_sbpfv0.so similarity index 100% rename from tests/elfs/reloc_64_64_sbpfv1.so rename to tests/elfs/reloc_64_64_sbpfv0.so diff --git a/tests/elfs/reloc_64_relative_data_sbpfv1.so b/tests/elfs/reloc_64_relative_data_sbpfv0.so similarity index 100% rename from tests/elfs/reloc_64_relative_data_sbpfv1.so rename to tests/elfs/reloc_64_relative_data_sbpfv0.so diff --git a/tests/elfs/reloc_64_relative_sbpfv1.so b/tests/elfs/reloc_64_relative_sbpfv0.so similarity index 100% rename from tests/elfs/reloc_64_relative_sbpfv1.so rename to tests/elfs/reloc_64_relative_sbpfv0.so diff --git a/tests/elfs/rodata_section_sbpfv1.so b/tests/elfs/rodata_section_sbpfv0.so similarity index 100% rename from tests/elfs/rodata_section_sbpfv1.so rename to tests/elfs/rodata_section_sbpfv0.so diff --git a/tests/elfs/strict_header.so b/tests/elfs/strict_header.so index 5bea25c4..fac0efc9 100644 Binary files a/tests/elfs/strict_header.so and b/tests/elfs/strict_header.so differ diff --git a/tests/elfs/struct_func_pointer_sbpfv1.so b/tests/elfs/struct_func_pointer_sbpfv0.so similarity index 100% rename from tests/elfs/struct_func_pointer_sbpfv1.so rename to tests/elfs/struct_func_pointer_sbpfv0.so diff --git a/tests/elfs/syscall_reloc_64_32_sbpfv1.so b/tests/elfs/syscall_reloc_64_32_sbpfv0.so similarity index 100% rename from tests/elfs/syscall_reloc_64_32_sbpfv1.so rename to tests/elfs/syscall_reloc_64_32_sbpfv0.so diff --git a/tests/execution.rs b/tests/execution.rs index 8b617501..b7614ca4 100644 --- a/tests/execution.rs +++ b/tests/execution.rs @@ -196,9 +196,9 @@ macro_rules! test_syscall_asm { enable_instruction_tracing: true, ..Config::default() }; - for sbpf_version in [SBPFVersion::V1, SBPFVersion::V2] { + for sbpf_version in [SBPFVersion::V0, SBPFVersion::V3] { config.enabled_sbpf_versions = sbpf_version..=sbpf_version; - let src = if sbpf_version == SBPFVersion::V1 { + let src = if sbpf_version == SBPFVersion::V0 { format!($source, $($syscall_name, )*) } else { format!($source, $($syscall_number, )*) @@ -830,7 +830,7 @@ fn test_pqr() { let mut executable = Executable::::from_text_bytes( &prog, loader.clone(), - SBPFVersion::V2, + SBPFVersion::V3, FunctionRegistry::default(), ) .unwrap(); @@ -845,7 +845,7 @@ fn test_pqr() { let mut executable = Executable::::from_text_bytes( &prog, loader.clone(), - SBPFVersion::V2, + SBPFVersion::V3, FunctionRegistry::default(), ) .unwrap(); @@ -879,7 +879,7 @@ fn test_err_divide_by_zero() { let mut executable = Executable::::from_text_bytes( &prog, loader.clone(), - SBPFVersion::V2, + SBPFVersion::V3, FunctionRegistry::default(), ) .unwrap(); @@ -921,7 +921,7 @@ fn test_err_divide_overflow() { let mut executable = Executable::::from_text_bytes( &prog, loader.clone(), - SBPFVersion::V2, + SBPFVersion::V3, FunctionRegistry::default(), ) .unwrap(); @@ -938,7 +938,7 @@ fn test_err_divide_overflow() { #[test] fn test_memory_instructions() { - for sbpf_version in [SBPFVersion::V1, SBPFVersion::V2] { + for sbpf_version in [SBPFVersion::V0, SBPFVersion::V3] { let config = Config { enabled_sbpf_versions: sbpf_version..=sbpf_version, ..Config::default() @@ -1434,7 +1434,7 @@ fn test_stxb_chain() { #[test] fn test_exit_capped() { - for sbpf_version in [SBPFVersion::V1, SBPFVersion::V2] { + for sbpf_version in [SBPFVersion::V0, SBPFVersion::V3] { let config = Config { enabled_sbpf_versions: sbpf_version..=sbpf_version, ..Config::default() @@ -1453,7 +1453,7 @@ fn test_exit_capped() { #[test] fn test_exit_without_value() { - for sbpf_version in [SBPFVersion::V1, SBPFVersion::V2] { + for sbpf_version in [SBPFVersion::V0, SBPFVersion::V3] { let config = Config { enabled_sbpf_versions: sbpf_version..=sbpf_version, ..Config::default() @@ -1472,7 +1472,7 @@ fn test_exit_without_value() { #[test] fn test_exit() { - for sbpf_version in [SBPFVersion::V1, SBPFVersion::V2] { + for sbpf_version in [SBPFVersion::V0, SBPFVersion::V3] { let config = Config { enabled_sbpf_versions: sbpf_version..=sbpf_version, ..Config::default() @@ -1492,7 +1492,7 @@ fn test_exit() { #[test] fn test_early_exit() { - for sbpf_version in [SBPFVersion::V1, SBPFVersion::V2] { + for sbpf_version in [SBPFVersion::V0, SBPFVersion::V3] { let config = Config { enabled_sbpf_versions: sbpf_version..=sbpf_version, ..Config::default() @@ -2042,7 +2042,7 @@ fn test_string_stack() { #[test] fn test_err_dynamic_stack_out_of_bound() { let config = Config { - enabled_sbpf_versions: SBPFVersion::V1..=SBPFVersion::V2, + enabled_sbpf_versions: SBPFVersion::V0..=SBPFVersion::V3, max_call_depth: 3, ..Config::default() }; @@ -2176,9 +2176,9 @@ fn test_entrypoint_exit() { // can't infer anything from the stack size so we track call depth // explicitly. Make sure exit still works with both fixed and dynamic // frames. - for highest_sbpf_version in [SBPFVersion::V1, SBPFVersion::V2] { + for highest_sbpf_version in [SBPFVersion::V0, SBPFVersion::V3] { let config = Config { - enabled_sbpf_versions: SBPFVersion::V1..=highest_sbpf_version, + enabled_sbpf_versions: SBPFVersion::V0..=highest_sbpf_version, ..Config::default() }; @@ -2203,9 +2203,9 @@ fn test_entrypoint_exit() { #[test] fn test_stack_call_depth_tracking() { - for highest_sbpf_version in [SBPFVersion::V1, SBPFVersion::V2] { + for highest_sbpf_version in [SBPFVersion::V0, SBPFVersion::V3] { let config = Config { - enabled_sbpf_versions: SBPFVersion::V1..=highest_sbpf_version, + enabled_sbpf_versions: SBPFVersion::V0..=highest_sbpf_version, max_call_depth: 2, ..Config::default() }; @@ -2264,7 +2264,7 @@ fn test_err_mem_access_out_of_bound() { let mut executable = Executable::::from_text_bytes( &prog, loader.clone(), - SBPFVersion::V2, + SBPFVersion::V3, FunctionRegistry::default(), ) .unwrap(); @@ -2287,11 +2287,11 @@ fn test_err_mem_access_out_of_bound() { #[test] fn test_relative_call() { let config = Config { - enabled_sbpf_versions: SBPFVersion::V1..=SBPFVersion::V1, + enabled_sbpf_versions: SBPFVersion::V0..=SBPFVersion::V0, ..Config::default() }; test_interpreter_and_jit_elf!( - "tests/elfs/relative_call_sbpfv1.so", + "tests/elfs/relative_call_sbpfv0.so", config, [1], (), @@ -2365,7 +2365,7 @@ fn test_callx() { #[test] fn test_err_callx_unregistered() { let config = Config { - enabled_sbpf_versions: SBPFVersion::V1..=SBPFVersion::V1, + enabled_sbpf_versions: SBPFVersion::V0..=SBPFVersion::V0, ..Config::default() }; @@ -2385,7 +2385,7 @@ fn test_err_callx_unregistered() { ); let config = Config { - enabled_sbpf_versions: SBPFVersion::V2..=SBPFVersion::V2, + enabled_sbpf_versions: SBPFVersion::V3..=SBPFVersion::V3, ..Config::default() }; @@ -2404,7 +2404,7 @@ fn test_err_callx_unregistered() { ProgramResult::Err(EbpfError::UnsupportedInstruction), ); - let versions = [SBPFVersion::V1, SBPFVersion::V2]; + let versions = [SBPFVersion::V0, SBPFVersion::V3]; let expected_errors = [ EbpfError::CallOutsideTextSegment, EbpfError::UnsupportedInstruction, @@ -2437,7 +2437,7 @@ fn test_err_callx_unregistered() { #[test] fn test_err_callx_oob_low() { let config = Config { - enabled_sbpf_versions: SBPFVersion::V1..=SBPFVersion::V1, + enabled_sbpf_versions: SBPFVersion::V0..=SBPFVersion::V0, ..Config::default() }; test_interpreter_and_jit_asm!( @@ -2691,10 +2691,10 @@ declare_builtin_function!( if depth > 0 { let mut config = Config::default(); let syscall_name = if version == 1 { - config.enabled_sbpf_versions = SBPFVersion::V1..=SBPFVersion::V1; + config.enabled_sbpf_versions = SBPFVersion::V0..=SBPFVersion::V0; "nested_vm_syscall" } else { - config.enabled_sbpf_versions = SBPFVersion::V2..=SBPFVersion::V2; + config.enabled_sbpf_versions = SBPFVersion::V3..=SBPFVersion::V3; "1" }; let mut loader = BuiltinProgram::new_loader_with_dense_registration(config); @@ -2728,15 +2728,15 @@ declare_builtin_function!( fn test_nested_vm_syscall() { let config = Config::default(); let mut context_object = TestContextObject::default(); - let mut memory_mapping = MemoryMapping::new(vec![], &config, SBPFVersion::V2).unwrap(); + let mut memory_mapping = MemoryMapping::new(vec![], &config, SBPFVersion::V3).unwrap(); - // SBPFv1 + // SBPFv0 let result = SyscallNestedVm::rust(&mut context_object, 1, 0, 1, 0, 0, &mut memory_mapping); assert_eq!(result.unwrap(), 42); let result = SyscallNestedVm::rust(&mut context_object, 1, 1, 1, 0, 0, &mut memory_mapping); assert_error!(result, "CallDepthExceeded"); - // SBPFv2 + // SBPFv3 let result = SyscallNestedVm::rust(&mut context_object, 1, 0, 2, 0, 0, &mut memory_mapping); assert_eq!(result.unwrap(), 42); let result = SyscallNestedVm::rust(&mut context_object, 1, 1, 2, 0, 0, &mut memory_mapping); @@ -2962,7 +2962,7 @@ fn test_far_jumps() { #[test] fn test_err_call_unresolved() { let config = Config { - enabled_sbpf_versions: SBPFVersion::V1..=SBPFVersion::V1, + enabled_sbpf_versions: SBPFVersion::V0..=SBPFVersion::V0, ..Config::default() }; test_interpreter_and_jit_asm!( @@ -2985,11 +2985,11 @@ fn test_err_call_unresolved() { #[test] fn test_syscall_reloc_64_32() { let config = Config { - enabled_sbpf_versions: SBPFVersion::V1..=SBPFVersion::V1, + enabled_sbpf_versions: SBPFVersion::V0..=SBPFVersion::V0, ..Config::default() }; test_interpreter_and_jit_elf!( - "tests/elfs/syscall_reloc_64_32_sbpfv1.so", + "tests/elfs/syscall_reloc_64_32_sbpfv0.so", config, [], ( @@ -3001,12 +3001,12 @@ fn test_syscall_reloc_64_32() { } #[test] -fn test_reloc_64_64_sbpfv1() { +fn test_reloc_64_64_sbpfv0() { // Tests the correctness of R_BPF_64_64 relocations. The program returns the // address of the entrypoint. // [ 1] .text PROGBITS 0000000000000120 000120 000018 00 AX 0 0 8 test_interpreter_and_jit_elf!( - "tests/elfs/reloc_64_64_sbpfv1.so", + "tests/elfs/reloc_64_64_sbpfv0.so", [], (), TestContextObject::new(2), @@ -3015,17 +3015,17 @@ fn test_reloc_64_64_sbpfv1() { } #[test] -fn test_reloc_64_relative_sbpfv1() { +fn test_reloc_64_relative_sbpfv0() { // Tests the correctness of R_BPF_64_RELATIVE relocations. The program // returns the address of the first .rodata byte. // [ 1] .text PROGBITS 0000000000000120 000120 000018 00 AX 0 0 8 // [ 2] .rodata PROGBITS 0000000000000138 000138 00000a 01 AMS 0 0 1 let config = Config { - enabled_sbpf_versions: SBPFVersion::V1..=SBPFVersion::V1, + enabled_sbpf_versions: SBPFVersion::V0..=SBPFVersion::V0, ..Config::default() }; test_interpreter_and_jit_elf!( - "tests/elfs/reloc_64_relative_sbpfv1.so", + "tests/elfs/reloc_64_relative_sbpfv0.so", config, [], (), @@ -3044,11 +3044,11 @@ fn test_reloc_64_relative_data_sbfv1() { // 00000000000001f8 : // 63: 08 01 00 00 00 00 00 00 let config = Config { - enabled_sbpf_versions: SBPFVersion::V1..=SBPFVersion::V1, + enabled_sbpf_versions: SBPFVersion::V0..=SBPFVersion::V0, ..Config::default() }; test_interpreter_and_jit_elf!( - "tests/elfs/reloc_64_relative_data_sbpfv1.so", + "tests/elfs/reloc_64_relative_data_sbpfv0.so", config, [], (), @@ -3058,13 +3058,13 @@ fn test_reloc_64_relative_data_sbfv1() { } #[test] -fn test_reloc_64_relative_data_sbpfv1() { +fn test_reloc_64_relative_data_sbpfv0() { // Before https://github.com/solana-labs/llvm-project/pull/35, we used to // generate invalid R_BPF_64_RELATIVE relocations in sections other than // .text. // // This test checks that the old behaviour is maintained for backwards - // compatibility when dealing with non-sbfv2 files. See also Elf::relocate(). + // compatibility when dealing with non-sbpfv3 files. See also Elf::relocate(). // // The program returns the address of the first .rodata byte. // [ 1] .text PROGBITS 00000000000000e8 0000e8 000020 00 AX 0 0 8 @@ -3073,11 +3073,11 @@ fn test_reloc_64_relative_data_sbpfv1() { // 00000000000001f8 : // 63: 00 00 00 00 08 01 00 00 let config = Config { - enabled_sbpf_versions: SBPFVersion::V1..=SBPFVersion::V1, + enabled_sbpf_versions: SBPFVersion::V0..=SBPFVersion::V0, ..Config::default() }; test_interpreter_and_jit_elf!( - "tests/elfs/reloc_64_relative_data_sbpfv1.so", + "tests/elfs/reloc_64_relative_data_sbpfv0.so", config, [], (), @@ -3087,14 +3087,14 @@ fn test_reloc_64_relative_data_sbpfv1() { } #[test] -fn test_load_elf_rodata_sbpfv1() { +fn test_load_elf_rodata_sbpfv0() { let config = Config { - enabled_sbpf_versions: SBPFVersion::V1..=SBPFVersion::V1, + enabled_sbpf_versions: SBPFVersion::V0..=SBPFVersion::V0, optimize_rodata: false, ..Config::default() }; test_interpreter_and_jit_elf!( - "tests/elfs/rodata_section_sbpfv1.so", + "tests/elfs/rodata_section_sbpfv0.so", config, [], (), @@ -3109,11 +3109,11 @@ fn test_struct_func_pointer() { // which is a relocatable function pointer is not overwritten when // the function pointer is relocated at load time. let config = Config { - enabled_sbpf_versions: SBPFVersion::V1..=SBPFVersion::V1, + enabled_sbpf_versions: SBPFVersion::V0..=SBPFVersion::V0, ..Config::default() }; test_interpreter_and_jit_elf!( - "tests/elfs/struct_func_pointer_sbpfv1.so", + "tests/elfs/struct_func_pointer_sbpfv0.so", config, [], (), @@ -3347,7 +3347,7 @@ fn execute_generated_program(prog: &[u8]) -> bool { }, FunctionRegistry::default(), )), - SBPFVersion::V2, + SBPFVersion::V3, FunctionRegistry::default(), ); let mut executable = if let Ok(executable) = executable { @@ -3447,14 +3447,14 @@ fn test_total_chaos() { #[test] fn test_invalid_call_imm() { - // In SBPFv2, `call_imm` N shall not be dispatched a syscall. + // In SBPFv3, `call_imm` N shall not be dispatched a syscall. let prog = &[ 0x85, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, // call_imm 2 0x9d, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, ]; let config = Config { - enabled_sbpf_versions: SBPFVersion::V2..=SBPFVersion::V2, + enabled_sbpf_versions: SBPFVersion::V3..=SBPFVersion::V3, enable_instruction_tracing: true, ..Config::default() }; @@ -3465,7 +3465,7 @@ fn test_invalid_call_imm() { let mut executable = Executable::::from_text_bytes( prog, Arc::new(loader), - SBPFVersion::V2, + SBPFVersion::V3, FunctionRegistry::default(), ) .unwrap(); @@ -3482,8 +3482,8 @@ fn test_invalid_call_imm() { #[test] #[should_panic(expected = "Invalid syscall should have been detected in the verifier.")] fn test_invalid_exit_or_return() { - for sbpf_version in [SBPFVersion::V1, SBPFVersion::V2] { - let inst = if sbpf_version == SBPFVersion::V1 { + for sbpf_version in [SBPFVersion::V0, SBPFVersion::V3] { + let inst = if sbpf_version == SBPFVersion::V0 { 0x9d } else { 0x95 @@ -3569,12 +3569,12 @@ fn test_capped_after_callx() { ); } -// SBPFv1 only [DEPRECATED] +// SBPFv0 only [DEPRECATED] #[test] fn test_err_fixed_stack_out_of_bound() { let config = Config { - enabled_sbpf_versions: SBPFVersion::V1..=SBPFVersion::V1, + enabled_sbpf_versions: SBPFVersion::V0..=SBPFVersion::V0, max_call_depth: 3, ..Config::default() }; @@ -3597,7 +3597,7 @@ fn test_err_fixed_stack_out_of_bound() { #[test] fn test_execution_overrun() { let config = Config { - enabled_sbpf_versions: SBPFVersion::V1..=SBPFVersion::V1, + enabled_sbpf_versions: SBPFVersion::V0..=SBPFVersion::V0, ..Config::default() }; test_interpreter_and_jit_asm!( @@ -3629,7 +3629,7 @@ fn test_execution_overrun() { #[test] fn test_mov32_reg_truncating() { let config = Config { - enabled_sbpf_versions: SBPFVersion::V1..=SBPFVersion::V1, + enabled_sbpf_versions: SBPFVersion::V0..=SBPFVersion::V0, ..Config::default() }; test_interpreter_and_jit_asm!( @@ -3647,7 +3647,7 @@ fn test_mov32_reg_truncating() { #[test] fn test_lddw() { let config = Config { - enabled_sbpf_versions: SBPFVersion::V1..=SBPFVersion::V1, + enabled_sbpf_versions: SBPFVersion::V0..=SBPFVersion::V0, ..Config::default() }; test_interpreter_and_jit_asm!( @@ -3768,7 +3768,7 @@ fn test_lddw() { #[test] fn test_le() { let config = Config { - enabled_sbpf_versions: SBPFVersion::V1..=SBPFVersion::V1, + enabled_sbpf_versions: SBPFVersion::V0..=SBPFVersion::V0, ..Config::default() }; test_interpreter_and_jit_asm!( @@ -3826,7 +3826,7 @@ fn test_le() { #[test] fn test_neg() { let config = Config { - enabled_sbpf_versions: SBPFVersion::V1..=SBPFVersion::V1, + enabled_sbpf_versions: SBPFVersion::V0..=SBPFVersion::V0, ..Config::default() }; test_interpreter_and_jit_asm!( @@ -3874,7 +3874,7 @@ fn test_neg() { #[test] fn test_callx_imm() { let config = Config { - enabled_sbpf_versions: SBPFVersion::V1..=SBPFVersion::V1, + enabled_sbpf_versions: SBPFVersion::V0..=SBPFVersion::V0, ..Config::default() }; test_interpreter_and_jit_asm!( @@ -3898,7 +3898,7 @@ fn test_callx_imm() { #[test] fn test_mul() { let config = Config { - enabled_sbpf_versions: SBPFVersion::V1..=SBPFVersion::V1, + enabled_sbpf_versions: SBPFVersion::V0..=SBPFVersion::V0, ..Config::default() }; test_interpreter_and_jit_asm!( @@ -3969,7 +3969,7 @@ fn test_mul() { #[test] fn test_div() { let config = Config { - enabled_sbpf_versions: SBPFVersion::V1..=SBPFVersion::V1, + enabled_sbpf_versions: SBPFVersion::V0..=SBPFVersion::V0, ..Config::default() }; test_interpreter_and_jit_asm!( @@ -4054,7 +4054,7 @@ fn test_div() { #[test] fn test_mod() { let config = Config { - enabled_sbpf_versions: SBPFVersion::V1..=SBPFVersion::V1, + enabled_sbpf_versions: SBPFVersion::V0..=SBPFVersion::V0, ..Config::default() }; test_interpreter_and_jit_asm!( @@ -4121,7 +4121,7 @@ fn test_mod() { #[test] fn test_symbol_relocation() { - // No relocation is necessary in SBFPv2 + // No relocation is necessary in SBFPv3 test_syscall_asm!( " mov64 r1, r10 diff --git a/tests/exercise_instructions.rs b/tests/exercise_instructions.rs index 659c5770..2538b305 100644 --- a/tests/exercise_instructions.rs +++ b/tests/exercise_instructions.rs @@ -508,7 +508,7 @@ fn fuzz_alu() { } } -fn test_ins(v1: bool, ins: String, prng: &mut SmallRng, cu: u64) { +fn test_ins(v0: bool, ins: String, prng: &mut SmallRng, cu: u64) { let mut input = [0u8; 80]; prng.fill_bytes(&mut input); @@ -539,8 +539,8 @@ fn test_ins(v1: bool, ins: String, prng: &mut SmallRng, cu: u64) { ); let mut config = Config::default(); - if v1 { - config.enabled_sbpf_versions = SBPFVersion::V1..=SBPFVersion::V1; + if v0 { + config.enabled_sbpf_versions = SBPFVersion::V0..=SBPFVersion::V0; } test_interpreter_and_jit_asm!(asm.as_str(), config, input, (), TestContextObject::new(cu)); } diff --git a/tests/verifier.rs b/tests/verifier.rs index be7320f9..6234ba23 100644 --- a/tests/verifier.rs +++ b/tests/verifier.rs @@ -127,7 +127,7 @@ fn test_verifier_err_endian_size() { let executable = Executable::::from_text_bytes( prog, Arc::new(BuiltinProgram::new_mock()), - SBPFVersion::V2, + SBPFVersion::V3, FunctionRegistry::default(), ) .unwrap(); @@ -146,7 +146,7 @@ fn test_verifier_err_incomplete_lddw() { let executable = Executable::::from_text_bytes( prog, Arc::new(BuiltinProgram::new_mock()), - SBPFVersion::V1, + SBPFVersion::V0, FunctionRegistry::default(), ) .unwrap(); @@ -156,13 +156,13 @@ fn test_verifier_err_incomplete_lddw() { #[test] #[should_panic(expected = "LDDWCannotBeLast")] fn test_verifier_err_lddw_cannot_be_last() { - for highest_sbpf_version in [SBPFVersion::V1, SBPFVersion::V2] { + for highest_sbpf_version in [SBPFVersion::V0, SBPFVersion::V3] { let prog = &[0x18, 0x00, 0x00, 0x00, 0x88, 0x77, 0x66, 0x55]; let executable = Executable::::from_text_bytes( prog, Arc::new(BuiltinProgram::new_loader( Config { - enabled_sbpf_versions: SBPFVersion::V1..=highest_sbpf_version, + enabled_sbpf_versions: SBPFVersion::V0..=highest_sbpf_version, ..Config::default() }, FunctionRegistry::default(), @@ -179,14 +179,14 @@ fn test_verifier_err_lddw_cannot_be_last() { fn test_verifier_err_invalid_reg_dst() { // r11 is disabled when sbpf_version.dynamic_stack_frames()=false, and only sub and add are // allowed when sbpf_version.dynamic_stack_frames()=true - for highest_sbpf_version in [SBPFVersion::V1, SBPFVersion::V2] { + for highest_sbpf_version in [SBPFVersion::V0, SBPFVersion::V3] { let executable = assemble::( " mov r11, 1 exit", Arc::new(BuiltinProgram::new_loader( Config { - enabled_sbpf_versions: SBPFVersion::V1..=highest_sbpf_version, + enabled_sbpf_versions: SBPFVersion::V0..=highest_sbpf_version, ..Config::default() }, FunctionRegistry::default(), @@ -202,14 +202,14 @@ fn test_verifier_err_invalid_reg_dst() { fn test_verifier_err_invalid_reg_src() { // r11 is disabled when sbpf_version.dynamic_stack_frames()=false, and only sub and add are // allowed when sbpf_version.dynamic_stack_frames()=true - for highest_sbpf_version in [SBPFVersion::V1, SBPFVersion::V2] { + for highest_sbpf_version in [SBPFVersion::V0, SBPFVersion::V3] { let executable = assemble::( " mov r0, r11 exit", Arc::new(BuiltinProgram::new_loader( Config { - enabled_sbpf_versions: SBPFVersion::V1..=highest_sbpf_version, + enabled_sbpf_versions: SBPFVersion::V0..=highest_sbpf_version, ..Config::default() }, FunctionRegistry::default(), @@ -271,7 +271,7 @@ fn test_verifier_err_call_lddw() { #[test] #[should_panic(expected = "InvalidRegister(0)")] fn test_verifier_err_callx_cannot_use_r10() { - for highest_sbpf_version in [SBPFVersion::V1, SBPFVersion::V2] { + for highest_sbpf_version in [SBPFVersion::V0, SBPFVersion::V3] { let executable = assemble::( " callx r10 @@ -279,7 +279,7 @@ fn test_verifier_err_callx_cannot_use_r10() { ", Arc::new(BuiltinProgram::new_loader( Config { - enabled_sbpf_versions: SBPFVersion::V1..=highest_sbpf_version, + enabled_sbpf_versions: SBPFVersion::V0..=highest_sbpf_version, ..Config::default() }, FunctionRegistry::default(), @@ -340,7 +340,7 @@ fn test_verifier_err_unknown_opcode() { let executable = Executable::::from_text_bytes( prog, Arc::new(BuiltinProgram::new_mock()), - SBPFVersion::V2, + SBPFVersion::V3, FunctionRegistry::default(), ) .unwrap(); @@ -357,7 +357,7 @@ fn test_verifier_unknown_sycall() { let executable = Executable::::from_text_bytes( prog, Arc::new(BuiltinProgram::new_mock()), - SBPFVersion::V2, + SBPFVersion::V3, FunctionRegistry::default(), ) .unwrap(); @@ -378,7 +378,7 @@ fn test_verifier_known_syscall() { let executable = Executable::::from_text_bytes( prog, Arc::new(loader), - SBPFVersion::V2, + SBPFVersion::V3, FunctionRegistry::default(), ) .unwrap(); @@ -446,13 +446,13 @@ fn test_sdiv_disabled() { ]; for (opc, instruction) in instructions { - for highest_sbpf_version in [SBPFVersion::V1, SBPFVersion::V2] { + for highest_sbpf_version in [SBPFVersion::V0, SBPFVersion::V3] { let assembly = format!("\n{instruction}\nexit"); let executable = assemble::( &assembly, Arc::new(BuiltinProgram::new_loader( Config { - enabled_sbpf_versions: SBPFVersion::V1..=highest_sbpf_version, + enabled_sbpf_versions: SBPFVersion::V0..=highest_sbpf_version, ..Config::default() }, FunctionRegistry::default(), @@ -460,7 +460,7 @@ fn test_sdiv_disabled() { ) .unwrap(); let result = executable.verify::(); - if highest_sbpf_version == SBPFVersion::V2 { + if highest_sbpf_version == SBPFVersion::V3 { assert!(result.is_ok()); } else { assert_error!(result, "VerifierError(UnknownOpCode({}, {}))", opc, 0); @@ -471,7 +471,7 @@ fn test_sdiv_disabled() { #[test] fn return_instr() { - for sbpf_version in [SBPFVersion::V1, SBPFVersion::V2] { + for sbpf_version in [SBPFVersion::V0, SBPFVersion::V3] { let prog = &[ 0xbf, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, // mov64 r0, 2 0x95, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // exit (v1), syscall (v2) @@ -486,7 +486,7 @@ fn return_instr() { ) .unwrap(); let result = executable.verify::(); - if sbpf_version == SBPFVersion::V2 { + if sbpf_version == SBPFVersion::V3 { assert_error!(result, "VerifierError(InvalidSyscall(0))"); } else { assert_error!(result, "VerifierError(UnknownOpCode(157, 2))"); @@ -501,7 +501,7 @@ fn return_in_v2() { return", Arc::new(BuiltinProgram::new_loader( Config { - enabled_sbpf_versions: SBPFVersion::V2..=SBPFVersion::V2, + enabled_sbpf_versions: SBPFVersion::V3..=SBPFVersion::V3, ..Config::default() }, FunctionRegistry::default(), @@ -519,7 +519,7 @@ fn function_without_return() { add64 r0, 5", Arc::new(BuiltinProgram::new_loader( Config { - enabled_sbpf_versions: SBPFVersion::V2..=SBPFVersion::V2, + enabled_sbpf_versions: SBPFVersion::V3..=SBPFVersion::V3, ..Config::default() }, FunctionRegistry::default(),