From 7dcc96318dc23268ed926b1750db7bbd44075980 Mon Sep 17 00:00:00 2001 From: Tom Dohrmann Date: Sat, 26 Oct 2024 13:27:29 +0200 Subject: [PATCH 1/2] copy more PML4 entries If there is more than 512 GiB of memory, we need to copy more than the first PML4 entry to access it. Similarly if the frame buffer is not in the address range covered by the first PML4 entry, we need to copy more entries in order to access it. --- uefi/src/main.rs | 23 +++++++++++++++++++---- 1 file changed, 19 insertions(+), 4 deletions(-) diff --git a/uefi/src/main.rs b/uefi/src/main.rs index 93dfb6c7..4eaef2b5 100644 --- a/uefi/src/main.rs +++ b/uefi/src/main.rs @@ -144,7 +144,8 @@ fn main_inner(image: Handle, mut st: SystemTable) -> Status { let mut frame_allocator = LegacyFrameAllocator::new(memory_map.entries().copied().map(UefiMemoryDescriptor)); - let page_tables = create_page_tables(&mut frame_allocator); + let max_phys_addr = frame_allocator.max_phys_addr(); + let page_tables = create_page_tables(&mut frame_allocator, max_phys_addr, framebuffer.as_ref()); let mut ramdisk_len = 0u64; let ramdisk_addr = if let Some(rd) = ramdisk { ramdisk_len = rd.len() as u64; @@ -385,6 +386,8 @@ fn load_file_from_tftp_boot_server( /// Creates page table abstraction types for both the bootloader and kernel page tables. fn create_page_tables( frame_allocator: &mut impl FrameAllocator, + max_phys_addr: PhysAddr, + frame_buffer: Option<&RawFrameBufferInfo>, ) -> bootloader_x86_64_common::PageTables { // UEFI identity-maps all memory, so the offset between physical and virtual addresses is 0 let phys_offset = VirtAddr::new(0); @@ -410,9 +413,21 @@ fn create_page_tables( } }; - // copy the first entry (we don't need to access more than 512 GiB; also, some UEFI - // implementations seem to create an level 4 table entry 0 in all slots) - new_table[0] = old_table[0].clone(); + // copy the pml4 entries for all identity mapped memory. + let end_addr = VirtAddr::new(max_phys_addr.as_u64() - 1); + for p4 in 0..=usize::from(end_addr.p4_index()) { + new_table[p4] = old_table[p4].clone(); + } + + // copy the pml4 entry for the frame buffer (the frame buffer is not + // necessarily part of the identity mapping). + if let Some(frame_buffer) = frame_buffer { + let start_addr = VirtAddr::new(frame_buffer.addr.as_u64()); + let end_addr = start_addr + frame_buffer.info.byte_len; + for p4 in usize::from(start_addr.p4_index())..=usize::from(end_addr.p4_index()) { + new_table[p4] = old_table[p4].clone(); + } + } // the first level 4 table entry is now identical, so we can just load the new one unsafe { From 97268147ecf5bb247d93da4be4681399e9f30a2f Mon Sep 17 00:00:00 2001 From: Tom Dohrmann Date: Sat, 26 Oct 2024 13:29:01 +0200 Subject: [PATCH 2/2] mark more entries as used The bootloader maps some of the memory used by the kernel into its own address space as well. In order for that to work we must ensure that the bootloader doesn't already have memory mapped there. Mark regions that are likely to be used by the bootloader as unusable. --- common/src/level_4_entries.rs | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/common/src/level_4_entries.rs b/common/src/level_4_entries.rs index 6e6ad9d3..c894d0b5 100644 --- a/common/src/level_4_entries.rs +++ b/common/src/level_4_entries.rs @@ -39,7 +39,17 @@ impl UsedLevel4Entries { rng: config.mappings.aslr.then(entropy::build_rng), }; - used.entry_state[0] = true; // TODO: Can we do this dynamically? + // The bootloader maps of the kernel's memory into its own page tables. + // We need to prevent overlaps, so mark all memory that could already + // be used by the bootload as inaccessible. + + // All memory in this range is identity mapped. + used.mark_range_as_used(0, max_phys_addr.as_u64()); + + // The bootload needs to access the frame buffer. + if let Some(frame_buffer) = framebuffer { + used.mark_range_as_used(frame_buffer.addr.as_u64(), frame_buffer.info.byte_len); + } // Mark the statically configured ranges from the config as used.