From 410a627313124252ab1abbd3a3b686c03301bb2a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Alexander=20Mei=C3=9Fner?= Date: Sat, 7 Dec 2024 01:06:12 +0100 Subject: [PATCH] Fix - PQR 64 unsigned sign extension (#642) * Formats parameters and arguments of emit_product_quotient_remainder(). * Removes sign extension of the immediate values in unsigned PQR instructions. --- src/interpreter.rs | 6 ++--- src/jit.rs | 57 ++++++++++++++++++++++++++++++++++++++-------- tests/execution.rs | 34 +++++++++++++++++---------- 3 files changed, 72 insertions(+), 25 deletions(-) diff --git a/src/interpreter.rs b/src/interpreter.rs index 573a7cb7..dcb38f5b 100644 --- a/src/interpreter.rs +++ b/src/interpreter.rs @@ -403,7 +403,7 @@ impl<'a, 'b, C: ContextObject> Interpreter<'a, 'b, C> { ebpf::LMUL32_REG if self.executable.get_sbpf_version().enable_pqr() => self.reg[dst] = (self.reg[dst] as u32).wrapping_mul(self.reg[src] as u32) as u64, ebpf::LMUL64_IMM if self.executable.get_sbpf_version().enable_pqr() => self.reg[dst] = self.reg[dst].wrapping_mul(insn.imm as u64), ebpf::LMUL64_REG if self.executable.get_sbpf_version().enable_pqr() => self.reg[dst] = self.reg[dst].wrapping_mul(self.reg[src]), - ebpf::UHMUL64_IMM if self.executable.get_sbpf_version().enable_pqr() => self.reg[dst] = (self.reg[dst] as u128).wrapping_mul(insn.imm as u64 as u128).wrapping_shr(64) as u64, + ebpf::UHMUL64_IMM if self.executable.get_sbpf_version().enable_pqr() => self.reg[dst] = (self.reg[dst] as u128).wrapping_mul(insn.imm as u32 as u128).wrapping_shr(64) as u64, ebpf::UHMUL64_REG if self.executable.get_sbpf_version().enable_pqr() => self.reg[dst] = (self.reg[dst] as u128).wrapping_mul(self.reg[src] as u128).wrapping_shr(64) as u64, ebpf::SHMUL64_IMM if self.executable.get_sbpf_version().enable_pqr() => self.reg[dst] = (self.reg[dst] as i64 as i128).wrapping_mul(insn.imm as i128).wrapping_shr(64) as u64, ebpf::SHMUL64_REG if self.executable.get_sbpf_version().enable_pqr() => self.reg[dst] = (self.reg[dst] as i64 as i128).wrapping_mul(self.reg[src] as i64 as i128).wrapping_shr(64) as u64, @@ -415,7 +415,7 @@ impl<'a, 'b, C: ContextObject> Interpreter<'a, 'b, C> { self.reg[dst] = (self.reg[dst] as u32 / self.reg[src] as u32) as u64; }, ebpf::UDIV64_IMM if self.executable.get_sbpf_version().enable_pqr() => { - self.reg[dst] /= insn.imm as u64; + self.reg[dst] /= insn.imm as u32 as u64; } ebpf::UDIV64_REG if self.executable.get_sbpf_version().enable_pqr() => { throw_error!(DivideByZero; self, self.reg[src], u64); @@ -429,7 +429,7 @@ impl<'a, 'b, C: ContextObject> Interpreter<'a, 'b, C> { self.reg[dst] = (self.reg[dst] as u32 % self.reg[src] as u32) as u64; }, ebpf::UREM64_IMM if self.executable.get_sbpf_version().enable_pqr() => { - self.reg[dst] %= insn.imm as u64; + self.reg[dst] %= insn.imm as u32 as u64; } ebpf::UREM64_REG if self.executable.get_sbpf_version().enable_pqr() => { throw_error!(DivideByZero; self, self.reg[src], u64); diff --git a/src/jit.rs b/src/jit.rs index 825ec68a..7d0e4570 100644 --- a/src/jit.rs +++ b/src/jit.rs @@ -509,12 +509,24 @@ impl<'a, C: ContextObject> JitCompiler<'a, C> { } }, ebpf::MUL32_IMM | ebpf::DIV32_IMM | ebpf::MOD32_IMM if !self.executable.get_sbpf_version().enable_pqr() => - self.emit_product_quotient_remainder(OperandSize::S32, (insn.opc & ebpf::BPF_ALU_OP_MASK) == ebpf::BPF_MOD, (insn.opc & ebpf::BPF_ALU_OP_MASK) != ebpf::BPF_MUL, (insn.opc & ebpf::BPF_ALU_OP_MASK) == ebpf::BPF_MUL, dst, dst, Some(insn.imm)), + self.emit_product_quotient_remainder( + OperandSize::S32, + (insn.opc & ebpf::BPF_ALU_OP_MASK) == ebpf::BPF_MOD, + (insn.opc & ebpf::BPF_ALU_OP_MASK) != ebpf::BPF_MUL, + (insn.opc & ebpf::BPF_ALU_OP_MASK) == ebpf::BPF_MUL, + dst, dst, Some(insn.imm), + ), ebpf::LD_1B_REG if self.executable.get_sbpf_version().move_memory_instruction_classes() => { self.emit_address_translation(Some(dst), Value::RegisterPlusConstant64(src, insn.off as i64, true), 1, None); }, ebpf::MUL32_REG | ebpf::DIV32_REG | ebpf::MOD32_REG if !self.executable.get_sbpf_version().enable_pqr() => - self.emit_product_quotient_remainder(OperandSize::S32, (insn.opc & ebpf::BPF_ALU_OP_MASK) == ebpf::BPF_MOD, (insn.opc & ebpf::BPF_ALU_OP_MASK) != ebpf::BPF_MUL, (insn.opc & ebpf::BPF_ALU_OP_MASK) == ebpf::BPF_MUL, src, dst, None), + self.emit_product_quotient_remainder( + OperandSize::S32, + (insn.opc & ebpf::BPF_ALU_OP_MASK) == ebpf::BPF_MOD, + (insn.opc & ebpf::BPF_ALU_OP_MASK) != ebpf::BPF_MUL, + (insn.opc & ebpf::BPF_ALU_OP_MASK) == ebpf::BPF_MUL, + src, dst, None, + ), ebpf::LD_2B_REG if self.executable.get_sbpf_version().move_memory_instruction_classes() => { self.emit_address_translation(Some(dst), Value::RegisterPlusConstant64(src, insn.off as i64, true), 2, None); }, @@ -594,7 +606,13 @@ impl<'a, C: ContextObject> JitCompiler<'a, C> { } ebpf::SUB64_REG => self.emit_ins(X86Instruction::alu(OperandSize::S64, 0x29, src, dst, 0, None)), ebpf::MUL64_IMM | ebpf::DIV64_IMM | ebpf::MOD64_IMM if !self.executable.get_sbpf_version().enable_pqr() => - self.emit_product_quotient_remainder(OperandSize::S64, (insn.opc & ebpf::BPF_ALU_OP_MASK) == ebpf::BPF_MOD, (insn.opc & ebpf::BPF_ALU_OP_MASK) != ebpf::BPF_MUL, (insn.opc & ebpf::BPF_ALU_OP_MASK) == ebpf::BPF_MUL, dst, dst, Some(insn.imm)), + self.emit_product_quotient_remainder( + OperandSize::S64, + (insn.opc & ebpf::BPF_ALU_OP_MASK) == ebpf::BPF_MOD, + (insn.opc & ebpf::BPF_ALU_OP_MASK) != ebpf::BPF_MUL, + (insn.opc & ebpf::BPF_ALU_OP_MASK) == ebpf::BPF_MUL, + dst, dst, Some(insn.imm), + ), ebpf::ST_1B_IMM if self.executable.get_sbpf_version().move_memory_instruction_classes() => { self.emit_address_translation(None, Value::RegisterPlusConstant64(dst, insn.off as i64, true), 1, Some(Value::Constant64(insn.imm, true))); }, @@ -602,7 +620,13 @@ impl<'a, C: ContextObject> JitCompiler<'a, C> { self.emit_address_translation(None, Value::RegisterPlusConstant64(dst, insn.off as i64, true), 2, Some(Value::Constant64(insn.imm, true))); }, ebpf::MUL64_REG | ebpf::DIV64_REG | ebpf::MOD64_REG if !self.executable.get_sbpf_version().enable_pqr() => - self.emit_product_quotient_remainder(OperandSize::S64, (insn.opc & ebpf::BPF_ALU_OP_MASK) == ebpf::BPF_MOD, (insn.opc & ebpf::BPF_ALU_OP_MASK) != ebpf::BPF_MUL, (insn.opc & ebpf::BPF_ALU_OP_MASK) == ebpf::BPF_MUL, src, dst, None), + self.emit_product_quotient_remainder( + OperandSize::S64, + (insn.opc & ebpf::BPF_ALU_OP_MASK) == ebpf::BPF_MOD, + (insn.opc & ebpf::BPF_ALU_OP_MASK) != ebpf::BPF_MUL, + (insn.opc & ebpf::BPF_ALU_OP_MASK) == ebpf::BPF_MUL, + src, dst, None, + ), ebpf::ST_1B_REG if self.executable.get_sbpf_version().move_memory_instruction_classes() => { self.emit_address_translation(None, Value::RegisterPlusConstant64(dst, insn.off as i64, true), 1, Some(Value::Register(src))); }, @@ -651,26 +675,30 @@ impl<'a, C: ContextObject> JitCompiler<'a, C> { ebpf::UDIV32_IMM | ebpf::UDIV64_IMM | ebpf::UREM32_IMM | ebpf::UREM64_IMM | ebpf::SDIV32_IMM | ebpf::SDIV64_IMM | ebpf::SREM32_IMM | ebpf::SREM64_IMM if self.executable.get_sbpf_version().enable_pqr() => { + let signed = insn.opc & (1 << 7) != 0; + let mut imm = insn.imm; + if !signed { + imm &= u32::MAX as i64; + } self.emit_product_quotient_remainder( if insn.opc & (1 << 4) != 0 { OperandSize::S64 } else { OperandSize::S32 }, insn.opc & (1 << 5) != 0, insn.opc & (1 << 6) != 0, - insn.opc & (1 << 7) != 0, - dst, dst, Some(insn.imm), + signed, + dst, dst, Some(imm), ) } ebpf::LMUL32_REG | ebpf::LMUL64_REG | ebpf::UHMUL64_REG | ebpf::SHMUL64_REG | ebpf::UDIV32_REG | ebpf::UDIV64_REG | ebpf::UREM32_REG | ebpf::UREM64_REG | ebpf::SDIV32_REG | ebpf::SDIV64_REG | ebpf::SREM32_REG | ebpf::SREM64_REG - if self.executable.get_sbpf_version().enable_pqr() => { + if self.executable.get_sbpf_version().enable_pqr() => self.emit_product_quotient_remainder( if insn.opc & (1 << 4) != 0 { OperandSize::S64 } else { OperandSize::S32 }, insn.opc & (1 << 5) != 0, insn.opc & (1 << 6) != 0, insn.opc & (1 << 7) != 0, src, dst, None, - ) - } + ), // BPF_JMP class ebpf::JA => { @@ -1272,7 +1300,16 @@ impl<'a, C: ContextObject> JitCompiler<'a, C> { } #[allow(clippy::too_many_arguments)] - fn emit_product_quotient_remainder(&mut self, size: OperandSize, alt_dst: bool, division: bool, signed: bool, src: u8, dst: u8, imm: Option) { + fn emit_product_quotient_remainder( + &mut self, + size: OperandSize, + alt_dst: bool, + division: bool, + signed: bool, + src: u8, + dst: u8, + imm: Option, + ) { // LMUL UHMUL SHMUL UDIV SDIV UREM SREM // ALU F7/4 F7/4 F7/5 F7/6 F7/7 F7/6 F7/7 // src-in REGISTER_SCRATCH REGISTER_SCRATCH REGISTER_SCRATCH REGISTER_SCRATCH REGISTER_SCRATCH REGISTER_SCRATCH REGISTER_SCRATCH diff --git a/tests/execution.rs b/tests/execution.rs index 9573c918..0989119e 100644 --- a/tests/execution.rs +++ b/tests/execution.rs @@ -701,9 +701,9 @@ fn test_be64() { #[test] fn test_pqr() { let mut prog = [0; 48]; - prog[0] = ebpf::MOV64_IMM; + prog[0] = ebpf::MOV32_IMM; prog[8] = ebpf::HOR64_IMM; - prog[16] = ebpf::MOV64_IMM; + prog[16] = ebpf::MOV32_IMM; prog[17] = 1; // dst = R1 prog[24] = ebpf::HOR64_IMM; prog[25] = 1; // dst = R1 @@ -716,21 +716,31 @@ fn test_pqr() { (ebpf::UDIV64_IMM, 13u64, 4u64, 3u64), (ebpf::UREM32_IMM, 13u64, 4u64, 1u64), (ebpf::UREM64_IMM, 13u64, 4u64, 1u64), - (ebpf::UHMUL64_IMM, 13u64, u64::MAX, 12u64), - (ebpf::UDIV32_IMM, 13u64, u64::MAX, 0u64), - (ebpf::UDIV64_IMM, 13u64, u64::MAX, 0u64), - (ebpf::UREM32_IMM, 13u64, u64::MAX, 13u64), - (ebpf::UREM64_IMM, 13u64, u64::MAX, 13u64), + (ebpf::UHMUL64_IMM, 13u64, u32::MAX as u64, 0u64), + (ebpf::UDIV32_IMM, 13u64, u32::MAX as u64, 0u64), + (ebpf::UDIV64_IMM, 13u64, u32::MAX as u64, 0u64), + (ebpf::UREM32_IMM, 13u64, u32::MAX as u64, 13u64), + (ebpf::UREM64_IMM, 13u64, u32::MAX as u64, 13u64), (ebpf::UHMUL64_IMM, u64::MAX, 4u64, 3u64), (ebpf::UDIV32_IMM, u64::MAX, 4u64, (u32::MAX / 4) as u64), (ebpf::UDIV64_IMM, u64::MAX, 4u64, u64::MAX / 4), (ebpf::UREM32_IMM, u64::MAX, 4u64, 3u64), (ebpf::UREM64_IMM, u64::MAX, 4u64, 3u64), - (ebpf::UHMUL64_IMM, u64::MAX, u64::MAX, u64::MAX - 1), - (ebpf::UDIV32_IMM, u64::MAX, u64::MAX, 1u64), - (ebpf::UDIV64_IMM, u64::MAX, u64::MAX, 1u64), - (ebpf::UREM32_IMM, u64::MAX, u64::MAX, 0u64), - (ebpf::UREM64_IMM, u64::MAX, u64::MAX, 0u64), + ( + ebpf::UHMUL64_IMM, + u64::MAX, + u32::MAX as u64, + u32::MAX as u64 - 1, + ), + (ebpf::UDIV32_IMM, u64::MAX, u32::MAX as u64, 1u64), + ( + ebpf::UDIV64_IMM, + u64::MAX, + u32::MAX as u64, + u32::MAX as u64 + 2, + ), + (ebpf::UREM32_IMM, u64::MAX, u32::MAX as u64, 0u64), + (ebpf::UREM64_IMM, u64::MAX, u32::MAX as u64, 0u64), ( ebpf::LMUL32_IMM, 13i64 as u64,