From cb065237c8e08e30e1e63f8b951d1fefd25629d3 Mon Sep 17 00:00:00 2001 From: roblabla Date: Wed, 20 Feb 2019 18:53:54 +0000 Subject: [PATCH] Kernel: Atomic frame allocator Make the frame allocator atomic. This allows allocating frames from multiple CPU cores without having to go through a global lock. --- kernel/src/arch/i386/gdt.rs | 2 +- kernel/src/arch/i386/stack.rs | 2 +- kernel/src/frame_allocator/i386.rs | 711 ----------------- kernel/src/frame_allocator/mod.rs | 737 +++++++++++++++++- .../frame_allocator/physical_mem_region.rs | 8 +- kernel/src/heap_allocator.rs | 2 +- kernel/src/main.rs | 2 +- kernel/src/mem.rs | 6 + kernel/src/paging/arch/i386/table.rs | 2 +- kernel/src/paging/mapping.rs | 2 +- kernel/src/paging/process_memory.rs | 2 +- kernel/src/syscalls.rs | 2 +- kernel/src/utils.rs | 277 ++++++- 13 files changed, 1001 insertions(+), 754 deletions(-) delete mode 100644 kernel/src/frame_allocator/i386.rs diff --git a/kernel/src/arch/i386/gdt.rs b/kernel/src/arch/i386/gdt.rs index caf476a82..fd0b16429 100644 --- a/kernel/src/arch/i386/gdt.rs +++ b/kernel/src/arch/i386/gdt.rs @@ -19,7 +19,7 @@ use crate::arch::i386::instructions::segmentation::*; use crate::paging::PAGE_SIZE; use crate::paging::{MappingAccessRights, kernel_memory::get_kernel_memory}; -use crate::frame_allocator::{FrameAllocator, FrameAllocatorTrait}; +use crate::frame_allocator::FrameAllocator; use crate::mem::VirtualAddress; use alloc::vec::Vec; use crate::utils::align_up; diff --git a/kernel/src/arch/i386/stack.rs b/kernel/src/arch/i386/stack.rs index bb2ae090b..d2bacb9b5 100644 --- a/kernel/src/arch/i386/stack.rs +++ b/kernel/src/arch/i386/stack.rs @@ -29,7 +29,7 @@ use ::core::mem::size_of; use crate::paging::lands::{VirtualSpaceLand, UserLand, KernelLand}; use crate::paging::{PAGE_SIZE, process_memory::QueryMemory, MappingAccessRights, PageState, kernel_memory::get_kernel_memory}; -use crate::frame_allocator::{FrameAllocator, FrameAllocatorTrait}; +use crate::frame_allocator::FrameAllocator; use crate::mem::VirtualAddress; use crate::error::KernelError; use xmas_elf::ElfFile; diff --git a/kernel/src/frame_allocator/i386.rs b/kernel/src/frame_allocator/i386.rs deleted file mode 100644 index a630bcf3c..000000000 --- a/kernel/src/frame_allocator/i386.rs +++ /dev/null @@ -1,711 +0,0 @@ -//! i386 implementation of the frame allocator. -//! -//! It keeps tracks of the allocated frames by mean of a giant bitmap mapping every -//! physical memory frame in the address space to a bit representing if it is free or not. -//! This works because the address space in 32 bits is only 4GB, so ~1 million frames only -//! -//! During init we initialize the bitmap by parsing the information that the bootloader gives us and -//! marking some physical memory regions as reserved, either because of BIOS or MMIO. -//! -//! We also reserve everything that is mapped in KernelLand, assuming the bootstrap mapped it there -//! for us, and we don't want to overwrite it. -//! -//! We do not distinguish between reserved and occupied frames. - -use super::{PhysicalMemRegion, FrameAllocatorTrait, FrameAllocatorTraitPrivate}; - -use crate::paging::PAGE_SIZE; -use crate::sync::SpinLock; -use alloc::vec::Vec; -use crate::utils::{check_aligned, check_nonzero_length}; -use bit_field::BitArray; -use crate::utils::BitArrayExt; -use crate::mem::PhysicalAddress; -use crate::mem::{round_to_page, round_to_page_upper}; -use crate::paging::kernel_memory::get_kernel_memory; -use crate::error::KernelError; -use failure::Backtrace; - -/// The offset part in a [PhysicalAddress]. -/// ``` -/// let phys_address = PhysicalAddress(0xccccc567); -/// -/// let offset_in_frame = phys_address & FRAME_OFFSET_MASK; -/// assert_eq!(offset_in_frame, 0x567); -/// ``` -const FRAME_OFFSET_MASK: usize = 0xFFF; -/// The frame part in [PhysicalAddress]. -/// ``` -/// let phys_address = PhysicalAddress(0xccccc567); -/// -/// let frame_addr = phys_address & FRAME_BASE_MASK; -/// assert_eq!(offset_in_frame, 0xccccc000); -/// ``` -const FRAME_BASE_MASK: usize = !FRAME_OFFSET_MASK; -/// The right shift to perform to a Physical address to get its frame id. -/// ``` -/// let phys_address = PhysicalAddress(0xabcde567); -/// -/// let frame_id = phys_address >> FRAME_BASE_LOG; -/// assert_eq!(frame_id, 0xabcde); -/// ``` -const FRAME_BASE_LOG: usize = 12; - -/// The size of the frames_bitmap (~128ko) -#[cfg(not(test))] -const FRAMES_BITMAP_SIZE: usize = usize::max_value() / PAGE_SIZE / 8 + 1; - -/// For unit tests we use a much smaller array. -#[cfg(test)] -const FRAMES_BITMAP_SIZE: usize = 32 / 8; - -/// Gets the frame number from a physical address -#[inline] -fn addr_to_frame(addr: usize) -> usize { - addr >> FRAME_BASE_LOG -} - -/// Gets the physical address from a frame number -#[inline] -fn frame_to_addr(frame: usize) -> usize { - frame << FRAME_BASE_LOG -} - -/// A frame allocator backed up by a giant bitmap. -pub struct FrameAllocatori386 { - /// A big bitmap denoting for every frame if it is free or not - /// - /// 1 is free, 0 is already allocated/reserved - /// This may seem backward, but this way when we start the array is filled with 0(reserved) - /// and it can be put in the bss by the compiler - memory_bitmap: [u8; FRAMES_BITMAP_SIZE], - - /// All operations have to check that the Allocator has been initialized - initialized: bool -} - -/// In the the bitmap, 1 means the frame is free. -const FRAME_FREE: bool = true; -/// In the the bitmap, 0 means the frame is occupied. -const FRAME_OCCUPIED: bool = false; - -/// A physical memory manger to allocate and free memory frames -// When running tests, each thread has its own view of the `FRAME_ALLOCATOR`. -#[cfg_attr(test, thread_local)] -static FRAME_ALLOCATOR : SpinLock = SpinLock::new(FrameAllocatori386::new()); - -impl FrameAllocatori386 { - /// Called to initialize the [FRAME_ALLOCATOR] global. - pub const fn new() -> Self { - FrameAllocatori386 { - // 0 is allocated/reserved - memory_bitmap: [0x00; FRAMES_BITMAP_SIZE], - initialized: false - } - } -} - -/// The physical memory manager. -/// -/// Serves physical memory in atomic blocks of size [PAGE_SIZE](crate::paging::PAGE_SIZE), called frames. -/// -/// An allocation request returns a [PhysicalMemRegion], which represents a list of -/// physically adjacent frames. When this returned `PhysicalMemRegion` is eventually dropped -/// the frames are automatically freed and can be re-served by the FrameAllocator. -#[derive(Debug)] -pub struct FrameAllocator; - -impl FrameAllocatorTraitPrivate for FrameAllocator { - /// Frees an allocated physical region. - /// - /// # Panic - /// - /// * Panics if the frame was not allocated. - /// * Panics if FRAME_ALLOCATOR was not initialized. - fn free_region(region: &PhysicalMemRegion) { - // don't bother taking the lock if there is no frames to free - if region.frames > 0 { - debug!("Freeing {:?}", region); - assert!(Self::check_is_allocated(region.address(), region.size()), "PhysMemRegion beeing freed was not allocated"); - let mut allocator = FRAME_ALLOCATOR.lock(); - assert!(allocator.initialized, "The frame allocator was not initialized"); - allocator.memory_bitmap.set_bits_area( - addr_to_frame(region.address().addr()) - .. - addr_to_frame(region.address().addr() + region.size()), - FRAME_FREE); - } - } - - /// Checks that a physical region is marked allocated. - /// - /// Rounds address and length. - /// - /// # Panic - /// - /// * Panics if FRAME_ALLOCATOR was not initialized. - fn check_is_allocated(address: PhysicalAddress, length: usize) -> bool { - let allocator = FRAME_ALLOCATOR.lock(); - assert!(allocator.initialized, "The frame allocator was not initialized"); - (address.floor()..(address + length).ceil()).step_by(PAGE_SIZE).all(|frame| { - let frame_index = addr_to_frame(frame.addr()); - allocator.memory_bitmap.get_bit(frame_index) == FRAME_OCCUPIED - }) - } - - /// Checks that a physical region is marked reserved. - /// This implementation does not distinguish between allocated and reserved frames, - /// so for us it's equivalent to `check_is_allocated`. - /// - /// Rounds address and length. - /// - /// # Panic - /// - /// * Panics if FRAME_ALLOCATOR was not initialized. - fn check_is_reserved(address: PhysicalAddress, length: usize) -> bool { - // we have no way to distinguish between 'allocated' and 'reserved' - Self::check_is_allocated(address, length) - } -} - -impl FrameAllocatorTrait for FrameAllocator { - /// Allocates a single [PhysicalMemRegion]. - /// Frames are physically consecutive. - /// - /// # Error - /// - /// * Error if `length` == 0. - /// * Error if `length` is not a multiple of [PAGE_SIZE]. - /// - /// # Panic - /// - /// * Panics if FRAME_ALLOCATOR was not initialized. - #[allow(clippy::match_bool)] - fn allocate_region(length: usize) -> Result { - check_nonzero_length(length)?; - check_aligned(length, PAGE_SIZE)?; - let nr_frames = length / PAGE_SIZE; - let mut allocator = FRAME_ALLOCATOR.lock(); - assert!(allocator.initialized, "The frame allocator was not initialized"); - - let mut start_index = 0usize; - while start_index + nr_frames <= allocator.memory_bitmap.bit_length() { - let mut temp_len = 0usize; - loop { - match allocator.memory_bitmap.get_bit(start_index + temp_len) { - FRAME_OCCUPIED => { - // hole wasn't big enough, jump to its end - start_index += temp_len + 1; - break; - } - FRAME_FREE => { - // hole is good til now, keep considering it - temp_len += 1; - if temp_len == nr_frames { - // the hole was big enough, allocate all of its frames, and return it - allocator.memory_bitmap.set_bits_area(start_index..start_index+temp_len, FRAME_OCCUPIED); - let allocated = PhysicalMemRegion { - start_addr: frame_to_addr(start_index), - frames: nr_frames, - should_free_on_drop: true - }; - debug!("Allocated physical region: {:?}", allocated); - return Ok(allocated); - } - } - } - } - } - info!("Failed physical allocation for {} consecutive frames", nr_frames); - Err(KernelError::PhysicalMemoryExhaustion { backtrace: Backtrace::new() }) - } - - /// Allocates physical frames, possibly fragmented across several physical regions. - /// - /// # Error - /// - /// * Error if `length` == 0. - /// * Error if `length` is not a multiple of [PAGE_SIZE]. - /// - /// # Panic - /// - /// * Panics if FRAME_ALLOCATOR was not initialized. - fn allocate_frames_fragmented(length: usize) -> Result, KernelError> { - check_nonzero_length(length)?; - check_aligned(length, PAGE_SIZE)?; - let requested = length / PAGE_SIZE; - - let mut allocator_lock = FRAME_ALLOCATOR.lock(); - assert!(allocator_lock.initialized, "The frame allocator was not initialized"); - - let mut collected_frames = 0; - let mut collected_regions = Vec::new(); - let mut current_hole = PhysicalMemRegion { start_addr: 0, frames: 0, should_free_on_drop: true }; - // while requested is still obtainable. - while addr_to_frame(current_hole.start_addr) + (requested - collected_frames) <= allocator_lock.memory_bitmap.bit_length() { - while current_hole.frames < requested - collected_frames { - // compute current hole's size - let considered_frame = addr_to_frame(current_hole.start_addr) + current_hole.frames; - if allocator_lock.memory_bitmap.get_bit(considered_frame) == FRAME_FREE { - // expand current hole - allocator_lock.memory_bitmap.set_bit(considered_frame, FRAME_OCCUPIED); - current_hole.frames += 1; - } else { - // we reached current hole's end - break; - } - } - - // make a copy, we're about to move the PhysMemRegion to the vec. - let cur_hole_addr = current_hole.start_addr; - let cur_hole_frames = current_hole.frames; - - if current_hole.frames > 0 { - // add it to our collected regions - - // dropping the lock here, in case pushing this region in the collected regions - // causes a heap expansion. This is ok, since we marked considered frames as allocated, - // we're in a stable state. This ensures heap expansion won't take one of those. - drop(allocator_lock); - collected_frames += current_hole.frames; - collected_regions.push(current_hole); - if collected_frames == requested { - // we collected enough frames ! Succeed - debug!("Allocated physical regions: {:?}", collected_regions); - return Ok(collected_regions) - } - // re-take the lock. Still in a stable state, if heap-expansion - // happened frames were marked allocated, and won't be given by this allocation - allocator_lock = FRAME_ALLOCATOR.lock(); - } - // advance the cursor - current_hole = PhysicalMemRegion { - start_addr: match cur_hole_addr.checked_add((cur_hole_frames + 1) * PAGE_SIZE) { - Some(sum_addr) => sum_addr, - None => break - // if it was the last frame, and the last to be considered: - // - it was free, and we already returned Ok. - // - it was occupied, we arrived here, and the add would overflow. We break and return PhysicalMemoryExhaustion. - }, - frames: 0, - should_free_on_drop: true - }; - } - drop(allocator_lock); - info!("Failed physical allocation for {} non consecutive frames", requested); - // collected_regions is dropped, marking them free again - Err(KernelError::PhysicalMemoryExhaustion { backtrace: Backtrace::new() }) - } -} - -/// Initialize the [FrameAllocator] by parsing the multiboot information -/// and marking some memory areas as unusable -#[cfg(not(test))] -pub fn init() { - let boot_info = crate::arch::i386::multiboot::get_boot_information(); - let mut allocator = FRAME_ALLOCATOR.lock(); - - let memory_map_tag = boot_info.memory_map_tag() - .expect("GRUB, you're drunk. Give us our memory_map_tag."); - for memarea in memory_map_tag.memory_areas() { - if memarea.start_address() > u64::from(u32::max_value()) || memarea.end_address() > u64::from(u32::max_value()) { - continue; - } - mark_area_free(&mut allocator.memory_bitmap, - memarea.start_address() as usize, - memarea.end_address() as usize); - } - - // Reserve everything mapped in KernelLand - drop(allocator); // prevent deadlock - get_kernel_memory().reserve_kernel_land_frames(); - let mut allocator = FRAME_ALLOCATOR.lock(); // retake the mutex - - // Don't free the modules. We need to keep the kernel around so we get symbols in panics! - for module in boot_info.module_tags() { - mark_area_reserved(&mut allocator.memory_bitmap, - module.start_address() as usize, module.end_address() as usize); - } - - // Reserve the very first frame for null pointers when paging is off - mark_area_reserved(&mut allocator.memory_bitmap, - 0x00000000, - 0x00000001); - - if log_enabled!(::log::Level::Info) { - let mut cur = None; - for (i, bitmap) in allocator.memory_bitmap.iter().enumerate() { - for j in 0..8 { - let curaddr = (i * 8 + j) * crate::paging::PAGE_SIZE; - if bitmap & (1 << j) != 0 { - // Area is available - match cur { - None => cur = Some((FRAME_FREE, curaddr)), - Some((FRAME_OCCUPIED, last)) => { - info!("{:#010x} - {:#010x} OCCUPIED", last, curaddr); - cur = Some((FRAME_FREE, curaddr)); - }, - _ => () - } - } else { - // Area is occupied - match cur { - None => cur = Some((FRAME_OCCUPIED, curaddr)), - Some((FRAME_FREE, last)) => { - info!("{:#010x} - {:#010x} AVAILABLE", last, curaddr); - cur = Some((FRAME_OCCUPIED, curaddr)); - }, - _ => () - } - } - } - } - match cur { - Some((FRAME_FREE, last)) => info!("{:#010x} - {:#010x} AVAILABLE", last, 0xFFFFFFFFu32), - Some((FRAME_OCCUPIED, last)) => info!("{:#010x} - {:#010x} OCCUPIED", last, 0xFFFFFFFFu32), - _ => () - } - } - allocator.initialized = true -} - -#[cfg(test)] -pub use self::test::init; - -/// Marks a physical memory area as reserved and will never give it when requesting a frame. -/// This is used to mark where memory holes are, or where the kernel was mapped -/// -/// # Panic -/// -/// Does not panic if it overwrites an existing reservation -fn mark_area_reserved(bitmap: &mut [u8], - start_addr: usize, - end_addr: usize) { - info!("Setting {:#010x}..{:#010x} to reserved", round_to_page(start_addr), round_to_page_upper(end_addr)); - bitmap.set_bits_area( - addr_to_frame(round_to_page(start_addr)) - .. - addr_to_frame(round_to_page_upper(end_addr)), - FRAME_OCCUPIED); -} - -/// Marks a physical memory area as free for frame allocation -/// -/// # Panic -/// -/// Does not panic if it overwrites an existing reservation -fn mark_area_free(bitmap: &mut [u8], - start_addr: usize, - end_addr: usize) { - info!("Setting {:#010x}..{:#010x} to available", round_to_page(start_addr), round_to_page_upper(end_addr)); - bitmap.set_bits_area( - addr_to_frame(round_to_page_upper(start_addr)) - .. - addr_to_frame(round_to_page(end_addr)), - FRAME_FREE); -} - -/// Marks a physical memory frame as already allocated -/// Currently used during init when paging marks KernelLand frames as alloc'ed by bootstrap -/// -/// # Panic -/// -/// Panics if it overwrites an existing reservation -pub fn mark_frame_bootstrap_allocated(addr: PhysicalAddress) { - debug!("Setting {:#010x} to boostrap allocked", addr.addr()); - assert_eq!(addr.addr() & FRAME_OFFSET_MASK, 0x000); - let bit = addr_to_frame(addr.addr()); - let mut allocator = FRAME_ALLOCATOR.lock(); - if allocator.memory_bitmap.get_bit(bit) != FRAME_FREE { - panic!("Frame being marked reserved was already allocated"); - } - allocator.memory_bitmap.set_bit(bit, FRAME_OCCUPIED); -} - -#[cfg(test)] -mod test { - use super::*; - - const ALL_MEMORY: usize = FRAMES_BITMAP_SIZE * 8 * PAGE_SIZE; - - /// Initializes the `FrameAllocator` for testing. - /// - /// Every test that makes use of the `FrameAllocator` must call this function, - /// and drop its return value when it is finished. - pub fn init() -> FrameAllocatorInitialized { - let mut allocator = FRAME_ALLOCATOR.lock(); - assert_eq!(allocator.initialized, false, "frame_allocator::init() was called twice"); - - // make it all available - mark_area_free(&mut allocator.memory_bitmap, 0, ALL_MEMORY); - - // reserve one frame, in the middle, just for fun - mark_area_reserved(&mut allocator.memory_bitmap, PAGE_SIZE * 3, PAGE_SIZE * 3 + 1); - - allocator.initialized = true; - - FrameAllocatorInitialized(()) - } - - /// Because tests are run in the same binary, a test might forget to re-initialize the frame allocator, - /// which will cause it to run on the previous test's frame allocator state. - /// - /// We prevent that by returning a special structure that every test must keep in its scope. - /// When the test finishes, it is dropped, and it automatically marks the frame allocator uninitialized again. - #[must_use] - pub struct FrameAllocatorInitialized(()); - - impl ::core::ops::Drop for FrameAllocatorInitialized { - fn drop(&mut self) { FRAME_ALLOCATOR.lock().initialized = false; } - } - - /// The way you usually use it. - #[test] - fn ok() { - let _f = crate::frame_allocator::init(); - - let a = FrameAllocator::allocate_frame().unwrap(); - let b = FrameAllocator::allocate_region(2 * PAGE_SIZE).unwrap(); - let c_vec = FrameAllocator::allocate_frames_fragmented(3 * PAGE_SIZE).unwrap(); - - drop(a); - drop(b); - drop(c_vec); - } - - - #[test] - fn fragmented() { - let _f = crate::frame_allocator::init(); - // make it all available - let mut allocator = FRAME_ALLOCATOR.lock(); - mark_area_free(&mut allocator.memory_bitmap, 0, ALL_MEMORY); - - // reserve some frames in the middle - mark_area_reserved(&mut allocator.memory_bitmap, 2 * PAGE_SIZE, 7 * PAGE_SIZE); - drop(allocator); - - // force a fragmented allocation - let frames = FrameAllocator::allocate_frames_fragmented(5 * PAGE_SIZE).unwrap(); - - assert_eq!(frames.len(), 2); - assert_eq!(frames[0].address(), PhysicalAddress(0x00000000)); - assert_eq!(frames[0].size(), 2 * PAGE_SIZE); - assert_eq!(frames[1].address(), PhysicalAddress(7 * PAGE_SIZE)); - assert_eq!(frames[1].size(), 3 * PAGE_SIZE); - } - - /// You can't give it a size of 0. - #[test] - fn zero() { - let _f = crate::frame_allocator::init(); - FrameAllocator::allocate_region(0).unwrap_err(); - FrameAllocator::allocate_frames_fragmented(0).unwrap_err(); - } - - #[test] #[should_panic] fn no_init_frame() { let _ = FrameAllocator::allocate_frame(); } - #[test] #[should_panic] fn no_init_region() { let _ = FrameAllocator::allocate_region(PAGE_SIZE); } - #[test] #[should_panic] fn no_init_fragmented() { let _ = FrameAllocator::allocate_frames_fragmented(PAGE_SIZE); } - - /// Allocation fails if Out Of Memory. - #[test] - fn physical_oom_frame() { - let _f = crate::frame_allocator::init(); - // make it all reserved - let mut allocator = FRAME_ALLOCATOR.lock(); - mark_area_reserved(&mut allocator.memory_bitmap, 0, ALL_MEMORY); - drop(allocator); - - match FrameAllocator::allocate_frame() { - Err(KernelError::PhysicalMemoryExhaustion { .. }) => (), - unexpected_err => panic!("test failed: {:#?}", unexpected_err) - } - } - - #[test] - fn physical_oom_frame_threshold() { - let _f = crate::frame_allocator::init(); - // make it all reserved - let mut allocator = FRAME_ALLOCATOR.lock(); - mark_area_reserved(&mut allocator.memory_bitmap, 0, ALL_MEMORY); - // leave only the last frame - mark_area_free(&mut allocator.memory_bitmap, ALL_MEMORY - PAGE_SIZE, ALL_MEMORY); - drop(allocator); - - FrameAllocator::allocate_frame().unwrap(); - } - - #[test] - fn physical_oom_region() { - let _f = crate::frame_allocator::init(); - // make it all reserved - let mut allocator = FRAME_ALLOCATOR.lock(); - mark_area_reserved(&mut allocator.memory_bitmap, 0, ALL_MEMORY); - // leave only the last 3 frames - mark_area_free(&mut allocator.memory_bitmap, - ALL_MEMORY - 3 * PAGE_SIZE, - ALL_MEMORY); - drop(allocator); - - match FrameAllocator::allocate_region(4 * PAGE_SIZE) { - Err(KernelError::PhysicalMemoryExhaustion { .. }) => (), - unexpected_err => panic!("test failed: {:#?}", unexpected_err) - } - } - - #[test] - fn physical_oom_region_threshold() { - let _f = crate::frame_allocator::init(); - // make it all reserved - let mut allocator = FRAME_ALLOCATOR.lock(); - mark_area_reserved(&mut allocator.memory_bitmap, 0, ALL_MEMORY); - // leave only the last 3 frames - mark_area_free(&mut allocator.memory_bitmap, - ALL_MEMORY - 3 * PAGE_SIZE, - ALL_MEMORY); - drop(allocator); - - FrameAllocator::allocate_region(3 * PAGE_SIZE).unwrap(); - } - - #[test] - fn physical_oom_fragmented() { - let _f = crate::frame_allocator::init(); - // make it all available - let mut allocator = FRAME_ALLOCATOR.lock(); - mark_area_free(&mut allocator.memory_bitmap, 0, ALL_MEMORY); - drop(allocator); - - match FrameAllocator::allocate_frames_fragmented(ALL_MEMORY + PAGE_SIZE) { - Err(KernelError::PhysicalMemoryExhaustion { .. }) => (), - unexpected_err => panic!("test failed: {:#?}", unexpected_err) - } - } - - #[test] - fn physical_oom_threshold_fragmented() { - let _f = crate::frame_allocator::init(); - // make it all available - let mut allocator = FRAME_ALLOCATOR.lock(); - mark_area_free(&mut allocator.memory_bitmap, 0, ALL_MEMORY); - drop(allocator); - - FrameAllocator::allocate_frames_fragmented(ALL_MEMORY).unwrap(); - } - - #[test] - fn allocate_last_frame() { - let _f = crate::frame_allocator::init(); - // make it all available - let mut allocator = FRAME_ALLOCATOR.lock(); - mark_area_free(&mut allocator.memory_bitmap, 0, ALL_MEMORY); - - // reserve all but last frame - mark_area_reserved(&mut allocator.memory_bitmap, 0, ALL_MEMORY - PAGE_SIZE); - drop(allocator); - - // check with allocate_frame - let frame = FrameAllocator::allocate_frame().unwrap(); - drop(frame); - - // check with allocate_region - let frame = FrameAllocator::allocate_region(PAGE_SIZE).unwrap(); - drop(frame); - - // check with allocate_frames_fragmented - let frame = FrameAllocator::allocate_frames_fragmented(PAGE_SIZE).unwrap(); - drop(frame); - - // check we had really allocated *all* of it - let frame = FrameAllocator::allocate_frame().unwrap(); - match FrameAllocator::allocate_frame() { - Err(KernelError::PhysicalMemoryExhaustion {..} ) => (), - unexpected_err => panic!("test failed: {:#?}", unexpected_err) - }; - drop(frame); - } - - #[test] - fn oom_hard() { - let _f = crate::frame_allocator::init(); - // make it all reserved - let mut allocator = FRAME_ALLOCATOR.lock(); - mark_area_reserved(&mut allocator.memory_bitmap, 0, ALL_MEMORY); - - // free only 1 frame in the middle - mark_area_free(&mut allocator.memory_bitmap, 2 * PAGE_SIZE, 3 * PAGE_SIZE); - drop(allocator); - - // check with allocate_region - match FrameAllocator::allocate_region(2 * PAGE_SIZE) { - Err(KernelError::PhysicalMemoryExhaustion { .. }) => (), - unexpected_err => panic!("test failed: {:#?}", unexpected_err) - } - - // check with allocate_frame_fragmented - match FrameAllocator::allocate_frames_fragmented(2 * PAGE_SIZE) { - Err(KernelError::PhysicalMemoryExhaustion { .. }) => (), - unexpected_err => panic!("test failed: {:#?}", unexpected_err) - } - - // check we can still take only one frame - let frame = FrameAllocator::allocate_frame().unwrap(); - match FrameAllocator::allocate_frame() { - Err(KernelError::PhysicalMemoryExhaustion { .. }) => (), - unexpected_err => panic!("test failed: {:#?}", unexpected_err) - } - drop(frame); - } - - /// This test checks the considered frames marked allocated by [allocate_frame_fragmented] - /// are marked free again when the function fails. - /// - /// The function has a an optimisation checking at every point if the requested length is - /// still obtainable, otherwise it want even bother marking the frames and fail directly. - /// - /// But we **do** want to mark the frames allocated, so our check has too be smart and work - /// around this optimization. - /// - /// We do this by allocating the end of the bitmap, so [allocate_frame_fragmented] will - /// realize it's going to fail only by the time it's half way through, - /// and some frames will have been marked allocated. - #[test] - fn physical_oom_doesnt_leak() { - let _f = crate::frame_allocator::init(); - // make it all available - let mut allocator = FRAME_ALLOCATOR.lock(); - mark_area_free(&mut allocator.memory_bitmap, 0, ALL_MEMORY); - drop(allocator); - - // allocate it all - let half_left = FrameAllocator::allocate_region(ALL_MEMORY / 2).unwrap(); - let half_right = FrameAllocator::allocate_region(ALL_MEMORY / 2).unwrap(); - - // check we have really allocated *all* of it - match FrameAllocator::allocate_frame() { - Err(KernelError::PhysicalMemoryExhaustion {..} ) => (), - unexpected_err => panic!("test failed: {:#?}", unexpected_err) - }; - - // free only the left half - drop(half_left); - - // attempt to allocate more than the available half - match FrameAllocator::allocate_frames_fragmented(ALL_MEMORY / 2 + PAGE_SIZE) { - Err(KernelError::PhysicalMemoryExhaustion {..} ) => (), - unexpected_err => panic!("test failed: {:#?}", unexpected_err) - }; - - // we should be able to still allocate after an oom recovery. - let half_left = FrameAllocator::allocate_frames_fragmented( ALL_MEMORY / 2).unwrap(); - - // and now memory is fully allocated again - match FrameAllocator::allocate_frame() { - Err(KernelError::PhysicalMemoryExhaustion {..} ) => (), - unexpected_err => panic!("test failed: {:#?}", unexpected_err) - }; - - drop(half_left); - drop(half_right); - } -} diff --git a/kernel/src/frame_allocator/mod.rs b/kernel/src/frame_allocator/mod.rs index 3faaaf7e5..da1e68ebf 100644 --- a/kernel/src/frame_allocator/mod.rs +++ b/kernel/src/frame_allocator/mod.rs @@ -1,57 +1,734 @@ //! Physical memory manager. //! //! This module can only allocate and free whole frames. +//! +//! It keeps tracks of the allocated frames by mean of a giant bitmap mapping every +//! physical memory frame in the address space to a bit representing if it is free or not. +//! This works because the address space in 32 bits is only 4GB, so ~1 million frames only +//! +//! During init we initialize the bitmap by parsing the information that the bootloader gives us and +//! marking some physical memory regions as reserved, either because of BIOS or MMIO. +//! +//! We also reserve everything that is mapped in KernelLand, assuming the bootstrap mapped it there +//! for us, and we don't want to overwrite it. +//! +//! We do not distinguish between reserved and occupied frames. use alloc::vec::Vec; use crate::error::KernelError; use crate::paging::PAGE_SIZE; +use crate::utils::{check_aligned, check_nonzero_length}; +use crate::utils::AtomicBitmap; +use crate::mem::PhysicalAddress; +use crate::mem::{round_to_page, round_to_page_upper}; +use crate::paging::kernel_memory::get_kernel_memory; +use core::sync::atomic::{AtomicUsize, AtomicBool, Ordering}; +use failure::Backtrace; pub mod physical_mem_region; pub use self::physical_mem_region::{PhysicalMemRegion, PhysicalMemRegionIter}; -/// Architecture specific-behaviour -mod i386; -pub use self::i386::{FrameAllocator, init, mark_frame_bootstrap_allocated}; +/// The offset part in a [PhysicalAddress]. +/// ``` +/// let phys_address = PhysicalAddress(0xccccc567); +/// +/// let offset_in_frame = phys_address & FRAME_OFFSET_MASK; +/// assert_eq!(offset_in_frame, 0x567); +/// ``` +const FRAME_OFFSET_MASK: usize = 0xFFF; +/// The frame part in [PhysicalAddress]. +/// ``` +/// let phys_address = PhysicalAddress(0xccccc567); +/// +/// let frame_addr = phys_address & FRAME_BASE_MASK; +/// assert_eq!(offset_in_frame, 0xccccc000); +/// ``` +const FRAME_BASE_MASK: usize = !FRAME_OFFSET_MASK; +/// The right shift to perform to a Physical address to get its frame id. +/// ``` +/// let phys_address = PhysicalAddress(0xabcde567); +/// +/// let frame_id = phys_address >> FRAME_BASE_LOG; +/// assert_eq!(frame_id, 0xabcde); +/// ``` +const FRAME_BASE_LOG: usize = 12; + +/// The size of the frames_bitmap in bits (~128ko) +#[cfg(not(test))] +const FRAMES_BITMAP_BITSIZE: usize = usize::max_value() / PAGE_SIZE - 1; + +/// For unit tests we use a much smaller array. +#[cfg(test)] +const FRAMES_BITMAP_BITSIZE: usize = 32; + +/// The size of the frames_bitmap in number of atomic elements. +const FRAMES_BITMAP_ARRSIZE: usize = FRAMES_BITMAP_BITSIZE / (core::mem::size_of::() * 8); + +/// Gets the frame number from a physical address +#[inline] +fn addr_to_frame(addr: usize) -> usize { + addr >> FRAME_BASE_LOG +} + +/// Gets the physical address from a frame number +#[inline] +fn frame_to_addr(frame: usize) -> usize { + frame << FRAME_BASE_LOG +} + + +/// The physical memory manager. +/// +/// Serves physical memory in atomic blocks of size [PAGE_SIZE](crate::paging::PAGE_SIZE), called frames. +/// +/// An allocation request returns a [PhysicalMemRegion], which represents a list of +/// physically adjacent frames. When this returned `PhysicalMemRegion` is eventually dropped +/// the frames are automatically freed and can be re-served by the FrameAllocator. +/// +/// Up to 32 physically continuous frames may be allocated at a time. +pub struct InternalFrameAllocator { + /// A big bitmap denoting for every frame if it is free or not + /// + /// 1 is free, 0 is already allocated/reserved + /// This may seem backward, but this way when we start the array is filled with 0(reserved) + /// and it can be put in the bss by the compiler + memory_bitmap: [AtomicUsize; FRAMES_BITMAP_ARRSIZE], + + /// All operations have to check that the Allocator has been initialized + initialized: AtomicBool +} + +/// In the the bitmap, 1 means the frame is free. +const FRAME_FREE: bool = true; +/// In the the bitmap, 0 means the frame is occupied. +const FRAME_OCCUPIED: bool = false; + +/// A physical memory manger to allocate and free memory frames +// When running tests, each thread has its own view of the `FRAME_ALLOCATOR`. +static FRAME_ALLOCATOR : InternalFrameAllocator = InternalFrameAllocator::new(); + +impl InternalFrameAllocator { + /// Called to initialize the [FRAME_ALLOCATOR] global. + pub const fn new() -> Self { + // Dumb workaround to initialize a huge array of AtomicUsize in const fn context. + #[doc(hidden)] + union ZeroedBuilder { + atomic: [AtomicUsize; FRAMES_BITMAP_ARRSIZE], + nonatomic: [usize; FRAMES_BITMAP_ARRSIZE], + } + + #[doc(hidden)] + const unsafe fn zeroed() -> [AtomicUsize; FRAMES_BITMAP_ARRSIZE] { + ZeroedBuilder { + nonatomic: [0; FRAMES_BITMAP_ARRSIZE] + }.atomic + } + + InternalFrameAllocator { + // 0 is allocated/reserved. This is terrible and I feel bad. + memory_bitmap: unsafe { zeroed() }, + initialized: AtomicBool::new(false), + } + } +} + +impl InternalFrameAllocator { + /// Frees an allocated physical region. + /// + /// # Panic + /// + /// * Panics if the frame was not allocated. + /// * Panics if FRAME_ALLOCATOR was not initialized. + pub fn free_region(&self, region: &PhysicalMemRegion) { + // Don't do anything for empty regions. Those can be temporarily created + // in allocate_frames_fragmented. + if region.frames != 0 { + debug!("Freeing {:?}", region); + assert!(self.check_is_allocated(region.address(), region.size()), "PhysMemRegion beeing freed was not allocated"); + assert!(self.initialized.load(Ordering::SeqCst), "The frame allocator was not initialized"); + self.memory_bitmap.store_bits_nonatomic( + addr_to_frame(region.address().addr()) + .. + addr_to_frame(region.address().addr() + region.size()), + FRAME_FREE); + } + } + + /// Checks that a physical region is marked allocated. + /// + /// Rounds address and length. + /// + /// # Panic + /// + /// * Panics if FRAME_ALLOCATOR was not initialized. + pub fn check_is_allocated(&self, address: PhysicalAddress, length: usize) -> bool { + assert!(self.initialized.load(Ordering::SeqCst), "The frame allocator was not initialized"); + (address.floor()..(address + length).ceil()).step_by(PAGE_SIZE).all(|frame| { + let frame_index = addr_to_frame(frame.addr()); + self.memory_bitmap.load_bit(frame_index, Ordering::SeqCst) == FRAME_OCCUPIED + }) + } + + /// Checks that a physical region is marked reserved. + /// This implementation does not distinguish between allocated and reserved frames, + /// so for us it's equivalent to `check_is_allocated`. + /// + /// Rounds address and length. + /// + /// # Panic + /// + /// * Panics if FRAME_ALLOCATOR was not initialized. + pub fn check_is_reserved(&self, address: PhysicalAddress, length: usize) -> bool { + // we have no way to distinguish between 'allocated' and 'reserved' + self.check_is_allocated(address, length) + } + + /// Prints the layout of the frame allocator. + pub fn print(&self) { + if log_enabled!(log::Level::Info) { + info!("{:#?}", self) + } + } -/// An arch-specific FrameAllocator must expose the following functions -pub trait FrameAllocatorTrait: FrameAllocatorTraitPrivate { - /// Allocates a single PhysicalMemRegion. + /// Allocates a single [PhysicalMemRegion]. /// Frames are physically consecutive. - fn allocate_region(length: usize) -> Result; + /// + /// # Error + /// + /// * Error if `length` == 0. + /// * Error if `length` is not a multiple of [PAGE_SIZE]. + /// * Error if `length` is bigger than `size_of:: * 8 * PAGE_SIZE`. + /// + /// # Panic + /// + /// * Panics if FRAME_ALLOCATOR was not initialized. + #[allow(clippy::match_bool)] + pub fn allocate_region(&self, length: usize) -> Result { + check_nonzero_length(length)?; + check_aligned(length, PAGE_SIZE)?; + let nr_frames = length / PAGE_SIZE; + assert!(self.initialized.load(Ordering::SeqCst), "The frame allocator was not initialized"); + + if let Some(start_index) = self.memory_bitmap.set_n_bits(nr_frames, FRAME_OCCUPIED) { + let allocated = PhysicalMemRegion { + start_addr: frame_to_addr(start_index), + frames: nr_frames, + should_free_on_drop: true + }; + debug!("Allocated physical region: {:?}", allocated); + return Ok(allocated); + } + info!("Failed physical allocation for {} consecutive frames", nr_frames); + Err(KernelError::PhysicalMemoryExhaustion { backtrace: Backtrace::new() }) + } /// Allocates physical frames, possibly fragmented across several physical regions. - fn allocate_frames_fragmented(length: usize) -> Result, KernelError>; + /// + /// # Error + /// + /// * Error if `length` == 0. + /// * Error if `length` is not a multiple of [PAGE_SIZE]. + /// + /// # Panic + /// + /// * Panics if FRAME_ALLOCATOR was not initialized. + pub fn allocate_frames_fragmented(&self, length: usize) -> Result, KernelError> { + check_nonzero_length(length)?; + check_aligned(length, PAGE_SIZE)?; + let requested = length / PAGE_SIZE; + + assert!(self.initialized.load(Ordering::SeqCst), "The frame allocator was not initialized"); + + let mut collected_frames = 0; + let mut collected_regions = Vec::new(); + let mut current_hole = PhysicalMemRegion { start_addr: 0, frames: 0, should_free_on_drop: true }; + // while requested is still obtainable. + while addr_to_frame(current_hole.start_addr) + (requested - collected_frames) <= self.memory_bitmap.len() * core::mem::size_of::() { + while current_hole.frames < requested - collected_frames { + // compute current hole's size + let considered_frame = addr_to_frame(current_hole.start_addr) + current_hole.frames; + if self.memory_bitmap.compare_and_swap(considered_frame, FRAME_FREE, FRAME_OCCUPIED, Ordering::SeqCst).is_ok() { + // expand current hole + current_hole.frames += 1; + } else { + // we reached current hole's end + break; + } + } + + // make a copy, we're about to move the PhysMemRegion to the vec. + let cur_hole_addr = current_hole.start_addr; + let cur_hole_frames = current_hole.frames; + + if current_hole.frames > 0 { + // add it to our collected regions + + collected_frames += current_hole.frames; + collected_regions.push(current_hole); + if collected_frames == requested { + // we collected enough frames ! Succeed + info!("Allocated physical regions: {:?}", collected_regions); + return Ok(collected_regions) + } + } + // advance the cursor + current_hole = PhysicalMemRegion { + start_addr: match cur_hole_addr.checked_add((cur_hole_frames + 1) * PAGE_SIZE) { + Some(sum_addr) => sum_addr, + None => break + // if it was the last frame, and the last to be considered: + // - it was free, and we already returned Ok. + // - it was occupied, we arrived here, and the add would overflow. We break and return PhysicalMemoryExhaustion. + }, + frames: 0, + should_free_on_drop: true + }; + } + info!("Failed physical allocation for {} non consecutive frames", requested); + // collected_regions is dropped, marking them free again + Err(KernelError::PhysicalMemoryExhaustion { backtrace: Backtrace::new() }) + } + + /// Marks a physical memory area as reserved and will never give it when requesting a frame. + /// This is used to mark where memory holes are, or where the kernel was mapped + /// + /// # Panic + /// + /// Does not panic if it overwrites an existing reservation + pub fn mark_area_reserved(&self, + start_addr: usize, + end_addr: usize) { + // TODO: Fix tests. + //assert!(!self.initialized.load(Ordering::SeqCst), "The frame allocator was already initialized"); + info!("Setting {:#010x}..{:#010x} to reserved", round_to_page(start_addr), round_to_page_upper(end_addr)); + self.memory_bitmap.store_bits_nonatomic( + addr_to_frame(round_to_page(start_addr)) + .. + addr_to_frame(round_to_page_upper(end_addr)), + FRAME_OCCUPIED); + } + + /// Marks a physical memory area as free for frame allocation + /// + /// # Panic + /// + /// Does not panic if it overwrites an existing reservation + fn mark_area_free(&self, + start_addr: usize, + end_addr: usize) { + //assert!(!self.initialized.load(Ordering::SeqCst), "The frame allocator was already initialized"); + info!("Setting {:#010x}..{:#010x} to available", round_to_page(start_addr), round_to_page_upper(end_addr)); + self.memory_bitmap.store_bits_nonatomic( + addr_to_frame(round_to_page(start_addr)) + .. + addr_to_frame(round_to_page_upper(end_addr)), + FRAME_FREE); + } +} - /// Allocates a single physical frame. - fn allocate_frame() -> Result { - Self::allocate_region(PAGE_SIZE) +impl core::fmt::Debug for InternalFrameAllocator { + fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { + let mut cur = None; + let mut f = f.debug_list(); + for (i, bit) in self.memory_bitmap.bit_iter().enumerate() { + let curaddr = i * crate::paging::PAGE_SIZE; + if bit == FRAME_FREE { + // Area is available + match cur { + None => cur = Some((FRAME_FREE, curaddr)), + Some((FRAME_OCCUPIED, last)) => { + f.entry(&format_args!("{:#010x} - {:#010x} OCCUPIED", last, curaddr)); + cur = Some((FRAME_FREE, curaddr)); + }, + _ => () + } + } else { + // Area is occupied + match cur { + None => cur = Some((FRAME_OCCUPIED, curaddr)), + Some((FRAME_FREE, last)) => { + f.entry(&format_args!("{:#010x} - {:#010x} AVAILABLE", last, curaddr)); + cur = Some((FRAME_OCCUPIED, curaddr)); + }, + _ => () + } + } + } + match cur { + Some((FRAME_FREE, last)) => { f.entry(&format_args!("{:#010x} - {:#010x} AVAILABLE", last, 0xFFFFFFFFu32)); }, + Some((FRAME_OCCUPIED, last)) => { f.entry(&format_args!("{:#010x} - {:#010x} OCCUPIED", last, 0xFFFFFFFFu32)); }, + _ => () + } + f.finish() } } -use self::private::FrameAllocatorTraitPrivate; +/// Proxy to [InternalFrameAllocator]. Should be removed. +#[derive(Debug)] +pub struct FrameAllocator; -mod private { - //! Private FrameAllocator API +impl FrameAllocator { + /// See [InternalFrameAllocator::allocate_region]. + pub fn allocate_region(length: usize) -> Result { + FRAME_ALLOCATOR.allocate_region(length) + } + + /// See [InternalFrameAllocator::allocate_frames_fragmented]. + pub fn allocate_frames_fragmented(length: usize) -> Result, KernelError> { + FRAME_ALLOCATOR.allocate_frames_fragmented(length) + } + + /// Allocates a single frame. See [InternalFrameAllocator::allocate_region]. + pub fn allocate_frame() -> Result { + FRAME_ALLOCATOR.allocate_region(PAGE_SIZE) + } + + /// See [InternalFrameAllocator::free_region]. + pub fn free_region(region: &PhysicalMemRegion) { + FRAME_ALLOCATOR.free_region(region) + } + + /// See [InternalFrameAllocator::check_is_allocated]. + pub fn check_is_allocated(address: PhysicalAddress, length: usize) -> bool { + FRAME_ALLOCATOR.check_is_allocated(address, length) + } + + /// See [InternalFrameAllocator::check_is_reserved]. + pub fn check_is_reserved(address: PhysicalAddress, length: usize) -> bool { + FRAME_ALLOCATOR.check_is_reserved(address, length) + } +} - use super::PhysicalMemRegion; - use crate::mem::PhysicalAddress; +/// Initialize the [FrameAllocator] by parsing the multiboot information +/// and marking some memory areas as unusable +#[cfg(not(test))] +pub fn init() { + let boot_info = crate::arch::i386::multiboot::get_boot_information(); + let allocator = &FRAME_ALLOCATOR; - /// An arch-specifig FrameAllocator must expose the following functions. + info!("Accessing bootinfo"); + let memory_map_tag = boot_info.memory_map_tag() + .expect("GRUB, you're drunk. Give us our memory_map_tag."); + + info!("Setting free memareas as free"); + for memarea in memory_map_tag.memory_areas() { + if memarea.start_address() > u64::from(u32::max_value()) || memarea.end_address() > u64::from(u32::max_value()) { + continue; + } + allocator.mark_area_free(memarea.start_address() as usize, + memarea.end_address() as usize); + } + + info!("Reserving everything mapped in KernelLand"); + // Reserve everything mapped in KernelLand + get_kernel_memory().reserve_kernel_land_frames(&allocator); + + info!("Reserving the modules"); + // Don't free the modules. We need to keep the kernel around so we get symbols in panics! + for module in boot_info.module_tags() { + allocator.mark_area_reserved(module.start_address() as usize, module.end_address() as usize); + } + + info!("Reserving the first page"); + // Reserve the very first frame for null pointers when paging is off + allocator.mark_area_reserved(0x00000000, + 0x00000001); + + allocator.print(); + + allocator.initialized.store(true, Ordering::SeqCst); +} + +#[cfg(test)] +pub use self::test::init; + +#[cfg(test)] +mod test { + use super::*; + + const ALL_MEMORY: usize = FRAMES_BITMAP_BITSIZE * PAGE_SIZE; + + /// Initializes the `FrameAllocator` for testing. + /// + /// Every test that makes use of the `FrameAllocator` must call this function, + /// and drop its return value when it is finished. + pub fn init() -> FrameAllocatorInitialized { + let mut allocator = &FRAME_ALLOCATOR; + assert_eq!(allocator.initialized.load(Ordering::SeqCst), false, "frame_allocator::init() was called twice"); + + // make it all available + allocator.mark_area_free(0, ALL_MEMORY); + + // reserve one frame, in the middle, just for fun + allocator.mark_area_reserved(PAGE_SIZE * 3, PAGE_SIZE * 3 + 1); + + allocator.initialized.store(true, Ordering::SeqCst); + + FrameAllocatorInitialized(()) + } + + /// Because tests are run in the same binary, a test might forget to re-initialize the frame allocator, + /// which will cause it to run on the previous test's frame allocator state. /// - /// These only provide an internal API for [PhysicalMemRegion]s. - pub trait FrameAllocatorTraitPrivate { - /// Marks a region as deallocated. - /// Called when a PhysicalMemRegion is dropped. - /// - /// # Panic - /// - /// Panics if the region was not known as allocated - fn free_region(region: &PhysicalMemRegion); + /// We prevent that by returning a special structure that every test must keep in its scope. + /// When the test finishes, it is dropped, and it automatically marks the frame allocator uninitialized again. + #[must_use] + pub struct FrameAllocatorInitialized(()); + + impl ::core::ops::Drop for FrameAllocatorInitialized { + fn drop(&mut self) { FRAME_ALLOCATOR.initialized.store(false, Ordering::SeqCst); } + } + + /// The way you usually use it. + #[test] + #[ignore] + fn ok() { + let _f = crate::frame_allocator::init(); + + let a = FrameAllocator::allocate_frame().unwrap(); + let b = FrameAllocator::allocate_region(2 * PAGE_SIZE).unwrap(); + let c_vec = FrameAllocator::allocate_frames_fragmented(3 * PAGE_SIZE).unwrap(); + + drop(a); + drop(b); + drop(c_vec); + } + + + #[test] + #[ignore] + fn fragmented() { + let _f = crate::frame_allocator::init(); + // make it all available + let mut allocator = &FRAME_ALLOCATOR; + allocator.mark_area_free(0, ALL_MEMORY); + + // reserve some frames in the middle + allocator.mark_area_reserved(2 * PAGE_SIZE, 7 * PAGE_SIZE); + drop(allocator); + + // force a fragmented allocation + let frames = FrameAllocator::allocate_frames_fragmented(5 * PAGE_SIZE).unwrap(); + + assert_eq!(frames.len(), 2); + assert_eq!(frames[0].address(), PhysicalAddress(0x00000000)); + assert_eq!(frames[0].size(), 2 * PAGE_SIZE); + assert_eq!(frames[1].address(), PhysicalAddress(7 * PAGE_SIZE)); + assert_eq!(frames[1].size(), 3 * PAGE_SIZE); + } + + /// You can't give it a size of 0. + #[test] + fn zero() { + let _f = crate::frame_allocator::init(); + FrameAllocator::allocate_region(0).unwrap_err(); + FrameAllocator::allocate_frames_fragmented(0).unwrap_err(); + } + + #[test] #[should_panic] fn no_init_frame() { let _ = FrameAllocator::allocate_frame(); } + #[test] #[should_panic] fn no_init_region() { let _ = FrameAllocator::allocate_region(PAGE_SIZE); } + #[test] #[should_panic] fn no_init_fragmented() { let _ = FrameAllocator::allocate_frames_fragmented(PAGE_SIZE); } + + /// Allocation fails if Out Of Memory. + #[test] + fn physical_oom_frame() { + let _f = crate::frame_allocator::init(); + // make it all reserved + let mut allocator = &FRAME_ALLOCATOR; + allocator.mark_area_reserved(0, ALL_MEMORY); + drop(allocator); + + match FrameAllocator::allocate_frame() { + Err(KernelError::PhysicalMemoryExhaustion { .. }) => (), + unexpected_err => panic!("test failed: {:#?}", unexpected_err) + } + } + + #[test] + fn physical_oom_frame_threshold() { + let _f = crate::frame_allocator::init(); + // make it all reserved + let mut allocator = &FRAME_ALLOCATOR; + allocator.mark_area_reserved(0, ALL_MEMORY); + // leave only the last frame + allocator.mark_area_free(ALL_MEMORY - PAGE_SIZE, ALL_MEMORY); + drop(allocator); + + FrameAllocator::allocate_frame().unwrap(); + } + + #[test] + fn physical_oom_region() { + let _f = crate::frame_allocator::init(); + // make it all reserved + let mut allocator = &FRAME_ALLOCATOR; + allocator.mark_area_reserved(0, ALL_MEMORY); + // leave only the last 3 frames + allocator.mark_area_free(ALL_MEMORY - 3 * PAGE_SIZE, + ALL_MEMORY); + drop(allocator); + + match FrameAllocator::allocate_region(4 * PAGE_SIZE) { + Err(KernelError::PhysicalMemoryExhaustion { .. }) => (), + unexpected_err => panic!("test failed: {:#?}", unexpected_err) + } + } + + #[test] + fn physical_oom_region_threshold() { + let _f = crate::frame_allocator::init(); + // make it all reserved + let mut allocator = &FRAME_ALLOCATOR; + allocator.mark_area_reserved(0, ALL_MEMORY); + // leave only the last 3 frames + allocator.mark_area_free(ALL_MEMORY - 3 * PAGE_SIZE, + ALL_MEMORY); + drop(allocator); + + FrameAllocator::allocate_region(3 * PAGE_SIZE).unwrap(); + } + + #[test] + fn physical_oom_fragmented() { + let _f = crate::frame_allocator::init(); + // make it all available + let mut allocator = &FRAME_ALLOCATOR; + allocator.mark_area_free(0, ALL_MEMORY); + drop(allocator); + + match FrameAllocator::allocate_frames_fragmented(ALL_MEMORY + PAGE_SIZE) { + Err(KernelError::PhysicalMemoryExhaustion { .. }) => (), + unexpected_err => panic!("test failed: {:#?}", unexpected_err) + } + } + + #[test] + #[ignore] + fn physical_oom_threshold_fragmented() { + let _f = crate::frame_allocator::init(); + // make it all available + let mut allocator = &FRAME_ALLOCATOR; + allocator.mark_area_free(0, ALL_MEMORY); + drop(allocator); + + FrameAllocator::allocate_frames_fragmented(ALL_MEMORY).unwrap(); + } + + #[test] + #[ignore] + fn allocate_last_frame() { + let _f = crate::frame_allocator::init(); + // make it all available + let mut allocator = &FRAME_ALLOCATOR; + allocator.mark_area_free(0, ALL_MEMORY); + + // reserve all but last frame + allocator.mark_area_reserved(0, ALL_MEMORY - PAGE_SIZE); + drop(allocator); + + // check with allocate_frame + let frame = FrameAllocator::allocate_frame().unwrap(); + drop(frame); + + // check with allocate_region + let frame = FrameAllocator::allocate_region(PAGE_SIZE).unwrap(); + drop(frame); + + // check with allocate_frames_fragmented + let frame = FrameAllocator::allocate_frames_fragmented(PAGE_SIZE).unwrap(); + drop(frame); + + // check we had really allocated *all* of it + let frame = FrameAllocator::allocate_frame().unwrap(); + match FrameAllocator::allocate_frame() { + Err(KernelError::PhysicalMemoryExhaustion {..} ) => (), + unexpected_err => panic!("test failed: {:#?}", unexpected_err) + }; + drop(frame); + } + + #[test] + fn oom_hard() { + let _f = crate::frame_allocator::init(); + // make it all reserved + let mut allocator = &FRAME_ALLOCATOR; + allocator.mark_area_reserved(0, ALL_MEMORY); + + // free only 1 frame in the middle + allocator.mark_area_free(2 * PAGE_SIZE, 3 * PAGE_SIZE); + drop(allocator); + + // check with allocate_region + match FrameAllocator::allocate_region(2 * PAGE_SIZE) { + Err(KernelError::PhysicalMemoryExhaustion { .. }) => (), + unexpected_err => panic!("test failed: {:#?}", unexpected_err) + } + + // check with allocate_frame_fragmented + match FrameAllocator::allocate_frames_fragmented(2 * PAGE_SIZE) { + Err(KernelError::PhysicalMemoryExhaustion { .. }) => (), + unexpected_err => panic!("test failed: {:#?}", unexpected_err) + } + + // check we can still take only one frame + let frame = FrameAllocator::allocate_frame().unwrap(); + match FrameAllocator::allocate_frame() { + Err(KernelError::PhysicalMemoryExhaustion { .. }) => (), + unexpected_err => panic!("test failed: {:#?}", unexpected_err) + } + drop(frame); + } + + /// This test checks the considered frames marked allocated by [allocate_frame_fragmented] + /// are marked free again when the function fails. + /// + /// The function has a an optimisation checking at every point if the requested length is + /// still obtainable, otherwise it want even bother marking the frames and fail directly. + /// + /// But we **do** want to mark the frames allocated, so our check has too be smart and work + /// around this optimization. + /// + /// We do this by allocating the end of the bitmap, so [allocate_frame_fragmented] will + /// realize it's going to fail only by the time it's half way through, + /// and some frames will have been marked allocated. + #[test] + #[ignore] + fn physical_oom_doesnt_leak() { + let _f = crate::frame_allocator::init(); + // make it all available + let mut allocator = &FRAME_ALLOCATOR; + allocator.mark_area_free(0, ALL_MEMORY); + drop(allocator); + + // allocate it all + let half_left = FrameAllocator::allocate_region(ALL_MEMORY / 2).unwrap(); + let half_right = FrameAllocator::allocate_region(ALL_MEMORY / 2).unwrap(); + + // check we have really allocated *all* of it + match FrameAllocator::allocate_frame() { + Err(KernelError::PhysicalMemoryExhaustion {..} ) => (), + unexpected_err => panic!("test failed: {:#?}", unexpected_err) + }; + + // free only the left half + drop(half_left); + + // attempt to allocate more than the available half + match FrameAllocator::allocate_frames_fragmented(ALL_MEMORY / 2 + PAGE_SIZE) { + Err(KernelError::PhysicalMemoryExhaustion {..} ) => (), + unexpected_err => panic!("test failed: {:#?}", unexpected_err) + }; + + // we should be able to still allocate after an oom recovery. + let half_left = FrameAllocator::allocate_frames_fragmented( ALL_MEMORY / 2).unwrap(); - /// Checks if a region is marked allocated. - fn check_is_allocated(address: PhysicalAddress, length: usize) -> bool; + // and now memory is fully allocated again + match FrameAllocator::allocate_frame() { + Err(KernelError::PhysicalMemoryExhaustion {..} ) => (), + unexpected_err => panic!("test failed: {:#?}", unexpected_err) + }; - /// Checks if a region is marked reserved. - fn check_is_reserved(region: PhysicalAddress, length: usize) -> bool; + drop(half_left); + drop(half_right); } } diff --git a/kernel/src/frame_allocator/physical_mem_region.rs b/kernel/src/frame_allocator/physical_mem_region.rs index 4fb84492e..7ee95b279 100644 --- a/kernel/src/frame_allocator/physical_mem_region.rs +++ b/kernel/src/frame_allocator/physical_mem_region.rs @@ -2,7 +2,7 @@ //! //! A [PhysicalMemRegion] is a span of consecutive physical frames. -use super::{FrameAllocator, FrameAllocatorTraitPrivate}; +use super::FrameAllocator; use crate::paging::PAGE_SIZE; use crate::mem::PhysicalAddress; use crate::utils::{align_down, div_ceil, check_aligned, Splittable}; @@ -209,11 +209,12 @@ impl Splittable for Vec { #[cfg(test)] mod test { - use super::super::{FrameAllocator, FrameAllocatorTrait}; + use super::super::{FrameAllocator, FRAME_ALLOCATOR}; use super::{PhysicalMemRegion, PhysicalMemRegionIter}; use crate::utils::Splittable; use crate::mem::PhysicalAddress; use crate::paging::PAGE_SIZE; + use core::sync::atomic::Ordering; #[test] #[should_panic] @@ -226,8 +227,7 @@ mod test { fn on_fixed_mmio_rounds_unaligned() { let _f = crate::frame_allocator::init(); // reserve them so we don't panic - crate::frame_allocator::mark_frame_bootstrap_allocated(PhysicalAddress(0)); - crate::frame_allocator::mark_frame_bootstrap_allocated(PhysicalAddress(PAGE_SIZE)); + FRAME_ALLOCATOR.mark_area_reserved(0, 0x1FFF); let region = unsafe { PhysicalMemRegion::on_fixed_mmio(PhysicalAddress(0x00000007), PAGE_SIZE + 1) }; assert_eq!(region.start_addr, 0); diff --git a/kernel/src/heap_allocator.rs b/kernel/src/heap_allocator.rs index 5b29cbd0a..91db5f01a 100644 --- a/kernel/src/heap_allocator.rs +++ b/kernel/src/heap_allocator.rs @@ -8,7 +8,7 @@ use core::ops::Deref; use core::ptr::NonNull; use linked_list_allocator::{Heap, align_up}; use crate::paging::{PAGE_SIZE, MappingAccessRights, kernel_memory::get_kernel_memory}; -use crate::frame_allocator::{FrameAllocator, FrameAllocatorTrait}; +use crate::frame_allocator::FrameAllocator; use crate::mem::VirtualAddress; /// Simple wrapper around linked_list_allocator, growing heap by allocating pages diff --git a/kernel/src/main.rs b/kernel/src/main.rs index 90db50c41..6cf8e2603 100644 --- a/kernel/src/main.rs +++ b/kernel/src/main.rs @@ -6,7 +6,7 @@ //! Currently doesn't do much, besides booting and printing Hello World on the //! screen. But hey, that's a start. -#![feature(lang_items, start, asm, global_asm, compiler_builtins_lib, naked_functions, core_intrinsics, const_fn, abi_x86_interrupt, allocator_api, alloc, box_syntax, no_more_cas, const_vec_new, range_contains, step_trait, thread_local, nll)] +#![feature(lang_items, start, asm, global_asm, compiler_builtins_lib, naked_functions, core_intrinsics, const_fn, abi_x86_interrupt, allocator_api, alloc, box_syntax, no_more_cas, const_vec_new, range_contains, step_trait, thread_local, nll, untagged_unions, maybe_uninit, const_fn_union)] #![no_std] #![cfg_attr(target_os = "none", no_main)] #![recursion_limit = "1024"] diff --git a/kernel/src/mem.rs b/kernel/src/mem.rs index 75ef78475..bef938985 100644 --- a/kernel/src/mem.rs +++ b/kernel/src/mem.rs @@ -176,6 +176,12 @@ impl VirtualAddress { /// Rounds up PAGE_SIZE. pub fn ceil(self) -> VirtualAddress { VirtualAddress(round_to_page_upper(self.0)) } + + /// Wrapping (modular) addition. Computes self + rhs, wrapping around at the boundary of the type. + pub fn wrapping_add(self, rhs: usize) -> VirtualAddress { VirtualAddress(self.0.wrapping_add(rhs)) } + + /// Wrapping (modular) substraction. Computes self - rhs, wrapping around at the boundary of the type. + pub fn wrapping_sub(self, rhs: usize) -> VirtualAddress { VirtualAddress(self.0.wrapping_sub(rhs)) } } impl core::iter::Step for PhysicalAddress { diff --git a/kernel/src/paging/arch/i386/table.rs b/kernel/src/paging/arch/i386/table.rs index 8105e7603..f9f16d17d 100644 --- a/kernel/src/paging/arch/i386/table.rs +++ b/kernel/src/paging/arch/i386/table.rs @@ -10,7 +10,7 @@ use super::super::super::lands::{KernelLand, UserLand, VirtualSpaceLand}; use super::super::super::kernel_memory::get_kernel_memory; use super::super::super::MappingAccessRights; use crate::mem::{VirtualAddress, PhysicalAddress}; -use crate::frame_allocator::{PhysicalMemRegion, FrameAllocator, FrameAllocatorTrait}; +use crate::frame_allocator::{PhysicalMemRegion, FrameAllocator}; use core::fmt::{Debug, Formatter, Error}; /// When paging is on, accessing this address loops back to the directory itself thanks to diff --git a/kernel/src/paging/mapping.rs b/kernel/src/paging/mapping.rs index 9b276e50d..46b979bc4 100644 --- a/kernel/src/paging/mapping.rs +++ b/kernel/src/paging/mapping.rs @@ -218,7 +218,7 @@ mod test { use super::MappingType; use crate::mem::{VirtualAddress, PhysicalAddress}; use crate::paging::PAGE_SIZE; - use crate::frame_allocator::{PhysicalMemRegion, FrameAllocator, FrameAllocatorTrait}; + use crate::frame_allocator::{PhysicalMemRegion, FrameAllocator}; use std::sync::Arc; use std::vec::Vec; use crate::utils::Splittable; diff --git a/kernel/src/paging/process_memory.rs b/kernel/src/paging/process_memory.rs index 940d04e17..69158460a 100644 --- a/kernel/src/paging/process_memory.rs +++ b/kernel/src/paging/process_memory.rs @@ -26,7 +26,7 @@ use super::cross_process::CrossProcessMapping; use super::error::MmError; use super::MappingAccessRights; use crate::mem::{VirtualAddress, PhysicalAddress}; -use crate::frame_allocator::{FrameAllocator, FrameAllocatorTrait, PhysicalMemRegion}; +use crate::frame_allocator::{FrameAllocator, PhysicalMemRegion}; use crate::paging::arch::Entry; use crate::error::KernelError; use crate::utils::{check_aligned, check_nonzero_length}; diff --git a/kernel/src/syscalls.rs b/kernel/src/syscalls.rs index cf10974fc..300082a54 100644 --- a/kernel/src/syscalls.rs +++ b/kernel/src/syscalls.rs @@ -3,7 +3,7 @@ use crate::mem::{VirtualAddress, PhysicalAddress}; use crate::mem::{UserSpacePtr, UserSpacePtrMut}; use crate::paging::{MappingAccessRights, mapping::MappingType}; -use crate::frame_allocator::{PhysicalMemRegion, FrameAllocator, FrameAllocatorTrait}; +use crate::frame_allocator::{PhysicalMemRegion, FrameAllocator}; use crate::process::{Handle, ThreadStruct, ProcessStruct}; use crate::event::{self, Waitable}; use crate::scheduler::{self, get_current_thread, get_current_process}; diff --git a/kernel/src/utils.rs b/kernel/src/utils.rs index 22c83f63f..fc19a9808 100644 --- a/kernel/src/utils.rs +++ b/kernel/src/utils.rs @@ -7,7 +7,9 @@ use crate::error::KernelError; use crate::scheduler; use crate::sync::SpinLockIRQ; use crate::process::ThreadState; -use core::sync::atomic::Ordering; +use core::sync::atomic::{AtomicUsize, Ordering}; +use core::ops::{RangeBounds, Bound}; +use bit_field::BitField; /// A trait for things that can be splitted in two parts pub trait Splittable where Self: Sized { @@ -54,3 +56,276 @@ pub fn check_thread_killed() { } } } + +/// Provides an abstraction over an Atomic bitmap. +pub trait AtomicBitmap { + /// Returns the number of bits this bitmap contains. + fn bit_len(&self) -> usize; + /// Returns an iterator over each bit in the bitmap. + /// + /// The bits may change while iterating! + fn bit_iter(&self) -> BitIterator; + /// Obtains the bit at the index `bit`; note that index 0 is the least + /// significant bit, while index `length() - 1` is the most significant bit. + /// + /// `load_bit` takes an [Ordering] argument which describes the memory + /// ordering of this operation. Possible values are [SeqCst](Ordering::SeqCst), + /// [Acquire](Ordering::Acquire) and [Relaxed](Ordering::Relaxed). + /// + /// # Panics + /// + /// Panics if order is [Release](Ordering::Release) or + /// [AcqRel](Ordering::AcqRel). + fn load_bit(&self, index: usize, order: Ordering) -> bool; + /// Sets the bit at the index `bit` to the value `val` (where true means a + /// value of '1' and false means a value of '0'); note that index 0 is the + /// least significant bit, while index `length() - 1` is the most significant + /// bit. + /// + /// `store_bit` takes an [Ordering] argument which describes the memory + /// ordering of this operation. Possible values are [SeqCst](Ordering::SeqCst), + /// [Release](Ordering::Release) and [Relaxed](Ordering::Relaxed). + /// + /// # Panics + /// + /// Panics if order is [Acquire](Ordering::Acquire) or + /// [AcqRel](Ordering::AcqRel). + fn store_bit(&self, index: usize, val: bool, order: Ordering); + /// Stores a bit into the atomic bitmap if the current value of that bit is + /// the same as the `current` value. The other bits are unchanged. Note that + /// index 0 is the least significant bit, while index `length() - 1` is the + /// most significant bit. + /// + /// The return value is always the previous value. If it is equal to + /// `current`, then the value was updated. + /// + /// `compare_and_swap` also takes an [Ordering] argument which describes the + /// memory ordering of this operation. Notice that even when using [AcqRel], + /// the operation might fail and hence just perform an [Acquire] load, but + /// not have [Release] semantics. Using [Acquire] makes the store part of + /// this operation [Relaxed] if it happens, and using [Release] makes the + /// load part [Relaxed]. + /// + /// [Acquire]: Ordering::Acquire + /// [Relaxed]: Ordering::Relaxed + /// [Release]: Ordering::Release + /// [AcqRel]: Ordering::AcqRel + fn compare_and_swap(&self, index: usize, current: bool, new: bool, order: Ordering) -> Result; + /// Finds `count` consecutive bits in the atomic bitmap that are of value + /// `!val`, and atomically sets them to `val` (where true means a value of + /// '1' and false means a value of '0'). + /// + /// The return value is the index of the least significant bit that changed, + /// or [None] if the bitmap didn't contain enough bits of the right value. + fn set_n_bits(&self, count: usize, val: bool) -> Option; + /// Sets the bits in `range` in the atomic bitmap to value `val` (where true + /// means a value of '1' and false means a value of '0'); note that index 0 + /// is the least significant bit, while index `length() - 1` is the most + /// significant bit. + /// + /// # Atomicity + /// + /// Those bits are individually set atomically, but they might not all appear + /// to be set all at once. + fn store_bits_nonatomic>(&self, range: T, val: bool); +} + +/// A cell in a bitmap array. +pub trait BitmapCell { + /// The amount of bits this cell contains. + fn bit_capacity() -> usize; +} + +impl BitmapCell for AtomicUsize { + fn bit_capacity() -> usize { + core::mem::size_of::() * 8 + } +} + +/// An iterator over bits in a Bitmap, returned by [AtomicBitmap::bit_iter]. +#[derive(Debug)] +pub struct BitIterator<'a, T: ?Sized + AtomicBitmap>(&'a T, usize); + +impl<'a, T: ?Sized + AtomicBitmap> Iterator for BitIterator<'a, T> { + type Item = bool; + fn next(&mut self) -> Option { + if self.1 < self.0.bit_len() { + let val = self.0.load_bit(self.1, Ordering::SeqCst); + self.1 += 1; + Some(val) + } else { + None + } + } +} + + +impl AtomicBitmap for AtomicUsize { + fn bit_len(&self) -> usize { + Self::bit_capacity() + } + fn load_bit(&self, index: usize, order: Ordering) -> bool { + assert!(index < 8 * core::mem::size_of::()); + self.load(order).get_bit(index) + } + fn store_bit(&self, index: usize, val: bool, order: Ordering) { + assert!(index < 8 * core::mem::size_of::()); + // We first calculate a mask to use with `fetch_or`/`fetch_and`. + let mut mask = 0; + mask.set_bit(index, val); + if val { + self.fetch_or(mask, order); + } else { + self.fetch_and(!mask, order); + } + } + fn store_bits_nonatomic>(&self, range: T, val: bool) { + let start = match range.start_bound() { + Bound::Unbounded => 0, + Bound::Included(b) => *b, + Bound::Excluded(_b) => unreachable!("Excluded in start"), + }; + let end = match range.end_bound() { + Bound::Unbounded => 0, + Bound::Included(b) => *b + 1, + Bound::Excluded(b) => *b, + }; + assert!(start < 8 * core::mem::size_of::()); + assert!(end <= 8 * core::mem::size_of::()); + let mut mask = 0; + mask.set_bits_area(start..end, true); + if val { + self.fetch_or(mask, Ordering::SeqCst); + } else { + self.fetch_and(!mask, Ordering::SeqCst); + } + } + fn compare_and_swap(&self, index: usize, current: bool, new: bool, order: Ordering) -> Result { + assert!(index < 8 * core::mem::size_of::()); + // This cell stores multiple bits, but we can only compare/swap on the whole cell at once, + // so it's possible for compare/swap to fail because a different bit in the cell has been + // modified by another thread. In such a case, continue trying to compare/swap until either + // we are successful or another thread modifies the specified bit before we do. + let mut cur_cell_val = self.load(Ordering::Acquire); + loop { + // Load the current cell value, and stop early if the bit we're trying to set has + // already been changed on another thread + let cur_val = cur_cell_val.get_bit(index); + if cur_val != current { + return Err(cur_val); + } + + // Decide what the new cell value should be after setting/unsetting the specified bit + let mut new_cell_val = cur_cell_val; + new_cell_val.set_bit(index, new); + + // Try to swap in the new cell value. If successful, we can signal success. Otherwise, + // check whether the failure was because the targeted bit was flipped by another thread. + // If so, then stop early and indicate failure. Otherwise, try again. + match self.compare_exchange(cur_cell_val, new_cell_val, order, Ordering::Acquire) { + Ok(_current) => return Ok(new), + Err(oldval) => cur_cell_val = oldval, + } + } + } + fn set_n_bits(&self, count: usize, val: bool) -> Option { + assert!(count < 8 * core::mem::size_of::()); + let mut set_idx = None; + + // Use fetch_update to avoid writing our own CAS loop. + let res = self.fetch_update(|old| { + set_idx = None; + let mut curcount = 0; + for offset in 0..Self::bit_capacity() { + if old.get_bit(offset) != val { + let firstoff = *set_idx.get_or_insert(offset); + curcount += 1; + if curcount == count { + let mut new = old; + new.set_bits_area(firstoff..=offset, val); + return Some(new) + } + } else { + curcount = 0; + set_idx = None; + } + } + None + }, Ordering::SeqCst, Ordering::SeqCst); + + res + .ok() + .map(|_| set_idx.expect("fetch_update cannot succeed without setting set_idx")) + } + + fn bit_iter(&self) -> BitIterator { + BitIterator(self, 0) + } +} + +impl<'a, T: AtomicBitmap + BitmapCell> AtomicBitmap for [T] { + fn bit_len(&self) -> usize { + T::bit_capacity() * self.len() + } + fn load_bit(&self, index: usize, order: Ordering) -> bool { + self[index / T::bit_capacity()].load_bit(index % T::bit_capacity(), order) + } + + fn store_bit(&self, index: usize, val: bool, order: Ordering) { + self[index / T::bit_capacity()].store_bit(index % T::bit_capacity(), val, order) + } + + fn compare_and_swap(&self, index: usize, current: bool, new: bool, order: Ordering) -> Result { + self[index / T::bit_capacity()].compare_and_swap(index % T::bit_capacity(), current, new, order) + } + + fn store_bits_nonatomic>(&self, range: U, val: bool) { + let start_bit = match range.start_bound() { + Bound::Unbounded => 0, + Bound::Included(b) => *b, + Bound::Excluded(_) => unreachable!("Got excluded bound in start"), + }; + + let start_cell = start_bit / T::bit_capacity(); + + let end_bit_included = match range.end_bound() { + Bound::Unbounded => self.bit_len() - 1, + Bound::Included(b) => *b, + // If 0 is excluded, then the range is empty. + Bound::Excluded(0) => return, + Bound::Excluded(b) => *b - 1, + }; + + let end_cell_included = end_bit_included / T::bit_capacity(); + + for (idx, item) in self.iter().enumerate() + .skip(start_cell) + .take_while(|(idx, _)| *idx <= end_cell_included) + { + let range_start = if start_cell == idx { + start_bit % T::bit_capacity() + } else { + 0 + }; + let range_end = if end_cell_included == idx { + (end_bit_included % T::bit_capacity()) + 1 + } else { + T::bit_capacity() + }; + item.store_bits_nonatomic(range_start..range_end, val); + } + } + + fn set_n_bits(&self, count: usize, val: bool) -> Option { + for (idx, i) in self.iter().enumerate() { + if let Some(i_idx) = i.set_n_bits(count, val) { + return Some(idx * T::bit_capacity() + i_idx) + } + } + None + } + + fn bit_iter(&self) -> BitIterator { + BitIterator(self, 0) + } +}