From 4f08189ca18f0906a8dc68e75d37c3d5361d4de7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Alexander=20Mei=C3=9Fner?= Date: Wed, 29 Dec 2021 10:59:49 +0100 Subject: [PATCH] Guards all integer_arithmetic in memory_region.rs (#247) --- src/memory_region.rs | 67 ++++++++++++++++++++++++++++---------------- 1 file changed, 43 insertions(+), 24 deletions(-) diff --git a/src/memory_region.rs b/src/memory_region.rs index ae787b0a..74dd4436 100644 --- a/src/memory_region.rs +++ b/src/memory_region.rs @@ -1,4 +1,3 @@ -#![allow(clippy::integer_arithmetic)] //! This module defines memory regions use crate::{ @@ -25,13 +24,12 @@ pub struct MemoryRegion { impl MemoryRegion { /// Creates a new MemoryRegion structure from a slice pub fn new_from_slice(slice: &[u8], vm_addr: u64, vm_gap_size: u64, is_writable: bool) -> Self { - let vm_gap_shift = if vm_gap_size > 0 { - let vm_gap_shift = - std::mem::size_of::() as u8 * 8 - vm_gap_size.leading_zeros() as u8 - 1; - debug_assert_eq!(vm_gap_size, 1 << vm_gap_shift); - vm_gap_shift - } else { - std::mem::size_of::() as u8 * 8 - 1 + let mut vm_gap_shift = (std::mem::size_of::() as u8) + .saturating_mul(8) + .saturating_sub(1); + if vm_gap_size > 0 { + vm_gap_shift = vm_gap_shift.saturating_sub(vm_gap_size.leading_zeros() as u8); + debug_assert_eq!(Some(vm_gap_size), 1_u64.checked_shl(vm_gap_shift as u32)); }; MemoryRegion { host_addr: slice.as_ptr() as u64, @@ -49,13 +47,18 @@ impl MemoryRegion { vm_addr: u64, len: u64, ) -> Result> { - let begin_offset = vm_addr - self.vm_addr; - let is_in_gap = ((begin_offset >> self.vm_gap_shift as u32) & 1) == 1; - let gap_mask = (-1i64 << self.vm_gap_shift) as u64; - let gapped_offset = (begin_offset & gap_mask) >> 1 | (begin_offset & !gap_mask); + let begin_offset = vm_addr.saturating_sub(self.vm_addr); + let is_in_gap = (begin_offset + .checked_shr(self.vm_gap_shift as u32) + .unwrap_or(0) + & 1) + == 1; + let gap_mask = (-1i64).checked_shl(self.vm_gap_shift as u32).unwrap_or(0) as u64; + let gapped_offset = + (begin_offset & gap_mask).checked_shr(1).unwrap_or(0) | (begin_offset & !gap_mask); if let Some(end_offset) = gapped_offset.checked_add(len as u64) { if end_offset <= self.len && !is_in_gap { - return Ok(self.host_addr + gapped_offset); + return Ok(self.host_addr.saturating_add(gapped_offset)); } } Err(EbpfError::InvalidVirtualAddress(vm_addr)) @@ -67,9 +70,9 @@ impl fmt::Debug for MemoryRegion { f, "host_addr: {:#x?}-{:#x?}, vm_addr: {:#x?}-{:#x?}, len: {}", self.host_addr, - self.host_addr + self.len, + self.host_addr.saturating_add(self.len), self.vm_addr, - self.vm_addr + self.len, + self.vm_addr.saturating_add(self.len), self.len ) } @@ -109,9 +112,17 @@ impl<'a> MemoryMapping<'a> { ) -> Result> { regions.sort(); for (index, region) in regions.iter().enumerate() { - if region.vm_addr != (index as u64) << ebpf::VIRTUAL_ADDRESS_BITS + if region.vm_addr + != (index as u64) + .checked_shl(ebpf::VIRTUAL_ADDRESS_BITS as u32) + .unwrap_or(0) || (region.len > 0 - && ((region.vm_addr + region.len - 1) >> ebpf::VIRTUAL_ADDRESS_BITS) as usize + && region + .vm_addr + .saturating_add(region.len) + .saturating_sub(1) + .checked_shr(ebpf::VIRTUAL_ADDRESS_BITS as u32) + .unwrap_or(0) as usize != index) { return Err(EbpfError::InvalidMemoryRegion(index)); @@ -130,7 +141,9 @@ impl<'a> MemoryMapping<'a> { vm_addr: u64, len: u64, ) -> Result> { - let index = (vm_addr >> ebpf::VIRTUAL_ADDRESS_BITS) as usize; + let index = vm_addr + .checked_shr(ebpf::VIRTUAL_ADDRESS_BITS as u32) + .unwrap_or(0) as usize; if (1..self.regions.len()).contains(&index) { let region = &self.regions[index]; if access_type == AccessType::Load || region.is_writable { @@ -149,9 +162,11 @@ impl<'a> MemoryMapping<'a> { vm_addr: u64, len: u64, ) -> Result> { - let stack_frame = - (vm_addr as i64 - ebpf::MM_STACK_START as i64) / self.config.stack_frame_size as i64; - if (-1..self.config.max_call_depth as i64 + 1).contains(&stack_frame) { + let stack_frame = (vm_addr as i64) + .saturating_sub(ebpf::MM_STACK_START as i64) + .checked_div(self.config.stack_frame_size as i64) + .unwrap_or(0); + if (-1..(self.config.max_call_depth as i64).saturating_add(1)).contains(&stack_frame) { Err(EbpfError::StackAccessViolation( 0, // Filled out later access_type, @@ -160,7 +175,7 @@ impl<'a> MemoryMapping<'a> { stack_frame, )) } else { - let region_name = match vm_addr & !(ebpf::MM_PROGRAM_START - 1) { + let region_name = match vm_addr & (!ebpf::MM_PROGRAM_START.saturating_sub(1)) { ebpf::MM_PROGRAM_START => "program", ebpf::MM_STACK_START => "stack", ebpf::MM_HEAP_START => "heap", @@ -185,8 +200,12 @@ impl<'a> MemoryMapping<'a> { ) -> Result<(), EbpfError> { if index >= self.regions.len() || (new_len > 0 - && ((self.regions[index].vm_addr + new_len - 1) >> ebpf::VIRTUAL_ADDRESS_BITS) - as usize + && self.regions[index] + .vm_addr + .saturating_add(new_len) + .saturating_sub(1) + .checked_shr(ebpf::VIRTUAL_ADDRESS_BITS as u32) + .unwrap_or(0) as usize != index) { return Err(EbpfError::InvalidMemoryRegion(index));