diff --git a/runtime/address.rs b/runtime/address.rs index 03f6541f..82deb2ef 100644 --- a/runtime/address.rs +++ b/runtime/address.rs @@ -6,53 +6,56 @@ use crate::handler; use core::{ fmt, mem::{size_of, MaybeUninit}, - ptr, slice, + ptr, slice, marker::PhantomData, }; use kerla_utils::alignment::align_down; /// Represents a physical memory address. #[derive(Debug, Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Hash)] #[repr(transparent)] -pub struct PAddr(usize); +pub struct PAddr<'memory> { + address: usize, + _lifetime: PhantomData<&'memory ()>, +} -impl PAddr { - pub const fn new(addr: usize) -> PAddr { - PAddr(addr) +impl<'memory> PAddr<'memory> { + pub const fn new(address: usize) -> Self { + PAddr { address, _lifetime: PhantomData::default() } } #[inline(always)] pub const fn is_null(self) -> bool { - self.0 == 0 + self.address == 0 } - pub const fn as_vaddr(self) -> VAddr { - debug_assert!(self.0 < KERNEL_STRAIGHT_MAP_PADDR_END); - VAddr::new(self.0 + KERNEL_BASE_ADDR) + pub const fn as_vaddr(self) -> VAddr<'memory> { // TODO: Shouldn't it be impl Into for VAddr trait? + debug_assert!(self.address < KERNEL_STRAIGHT_MAP_PADDR_END); + VAddr::new(self.address + KERNEL_BASE_ADDR) } pub const fn as_ptr(self) -> *const T { - debug_assert!(self.0 < KERNEL_STRAIGHT_MAP_PADDR_END); - (self.0 + KERNEL_BASE_ADDR) as *const _ + debug_assert!(self.address < KERNEL_STRAIGHT_MAP_PADDR_END); + (self.address + KERNEL_BASE_ADDR) as *const _ } pub const fn as_mut_ptr(self) -> *mut T { - debug_assert!(self.0 < KERNEL_STRAIGHT_MAP_PADDR_END); - (self.0 + KERNEL_BASE_ADDR) as *mut _ + debug_assert!(self.address < KERNEL_STRAIGHT_MAP_PADDR_END); + (self.address + KERNEL_BASE_ADDR) as *mut _ } #[inline(always)] #[must_use] - pub const fn add(self, offset: usize) -> PAddr { - PAddr(self.0 + offset) + pub const fn add(self, offset: usize) -> Self { + PAddr { address: self.address + offset, _lifetime: PhantomData::default() } } #[inline(always)] pub const fn value(self) -> usize { - self.0 + self.address } } -impl fmt::Display for PAddr { +impl fmt::Display for PAddr<'_> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "{:016x}", self.value()) } @@ -61,17 +64,21 @@ impl fmt::Display for PAddr { /// Represents a *kernel* virtual memory address. #[derive(Debug, Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Hash)] #[repr(transparent)] -pub struct VAddr(usize); +pub struct VAddr<'memory> { + address: usize, + _lifetime: PhantomData<&'memory ()>, +} -impl VAddr { - pub const fn new(addr: usize) -> VAddr { - debug_assert!(addr >= KERNEL_BASE_ADDR); - VAddr(addr) +impl<'memory> VAddr<'memory> { + pub const fn new(address: usize) -> Self { + debug_assert!(address >= KERNEL_BASE_ADDR); + VAddr { address, _lifetime: PhantomData::default() } } - pub const fn as_paddr(self) -> PAddr { - debug_assert!(self.0 >= KERNEL_BASE_ADDR); - PAddr::new(self.0 - KERNEL_BASE_ADDR) + pub const fn as_paddr(self) -> PAddr<'memory> { + debug_assert!(self.address >= KERNEL_BASE_ADDR); + //PAddr::new(self.address - KERNEL_BASE_ADDR) + PAddr { address: self.address, _lifetime: self._lifetime } } pub const fn is_accessible_from_kernel(addr: usize) -> bool { @@ -79,13 +86,13 @@ impl VAddr { } pub const fn as_ptr(self) -> *const T { - debug_assert!(self.0 >= KERNEL_BASE_ADDR); - self.0 as *const _ + debug_assert!(self.address >= KERNEL_BASE_ADDR); + self.address as *const _ } pub const fn as_mut_ptr(self) -> *mut T { - debug_assert!(self.0 >= KERNEL_BASE_ADDR); - self.0 as *mut _ + debug_assert!(self.address >= KERNEL_BASE_ADDR); + self.address as *mut _ } /// # Safety @@ -109,29 +116,29 @@ impl VAddr { #[inline(always)] #[must_use] - pub const fn add(self, offset: usize) -> VAddr { - VAddr::new(self.0 + offset) + pub const fn add(self, offset: usize) -> VAddr<'memory> { + VAddr::new(self.address + offset) } #[inline(always)] #[must_use] - pub const fn sub(self, offset: usize) -> VAddr { - VAddr::new(self.0 - offset) + pub const fn sub(self, offset: usize) -> VAddr<'memory> { + VAddr::new(self.address - offset) } #[inline(always)] #[must_use] - pub const fn align_down(self, alignment: usize) -> VAddr { - VAddr::new(align_down(self.0, alignment)) + pub const fn align_down(self, alignment: usize) -> VAddr<'memory> { + VAddr::new(align_down(self.address, alignment)) } #[inline(always)] pub const fn value(self) -> usize { - self.0 + self.address } } -impl fmt::Display for VAddr { +impl fmt::Display for VAddr<'_> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "{:016x}", self.value()) } @@ -157,21 +164,23 @@ pub struct NullUserPointerError; /// Represents a user virtual memory address. /// -/// It is guaranteed that `UserVaddr` contains a valid address, in other words, +/// It is guaranteed that `UserVAddr` contains a valid address, in other words, /// it does not point to a kernel address. /// /// Futhermore, like `NonNull`, it is always non-null. Use `Option` /// represent a nullable user pointer. #[derive(Debug, Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Hash)] #[repr(transparent)] -pub struct UserVAddr(usize); +pub struct UserVAddr { + address: usize +} impl UserVAddr { - pub const fn new(addr: usize) -> Option { - if addr == 0 { + pub const fn new(address: usize) -> Option { + if address == 0 { None } else { - Some(UserVAddr(addr)) + Some(UserVAddr { address }) } } @@ -183,32 +192,32 @@ impl UserVAddr { } /// # Safety - /// Make sure `addr` doesn't point to the kernel memory address or it can + /// Make sure `address` doesn't point to the kernel memory address or it can /// lead to a serious vulnerability! - pub const unsafe fn new_unchecked(addr: usize) -> UserVAddr { - UserVAddr(addr) + pub const unsafe fn new_unchecked(address: usize) -> UserVAddr { + UserVAddr { address } } #[inline(always)] pub const fn as_isize(self) -> isize { // This cast is always safe thanks to the KERNEL_BASE_ADDR check in // `UserVAddr::new`. - self.0 as isize + self.address as isize } #[inline(always)] pub const fn add(self, offset: usize) -> UserVAddr { - unsafe { UserVAddr::new_unchecked(self.0 + offset) } + unsafe { UserVAddr::new_unchecked(self.address + offset) } } #[inline(always)] pub const fn sub(self, offset: usize) -> UserVAddr { - unsafe { UserVAddr::new_unchecked(self.0 - offset) } + unsafe { UserVAddr::new_unchecked(self.address - offset) } } #[inline(always)] pub const fn value(self) -> usize { - self.0 + self.address } pub fn access_ok(self, len: usize) -> Result<(), AccessError> { diff --git a/runtime/backtrace.rs b/runtime/backtrace.rs index 2f29a368..9716f30c 100644 --- a/runtime/backtrace.rs +++ b/runtime/backtrace.rs @@ -37,7 +37,7 @@ global_asm!( struct Symbol { name: &'static str, - addr: VAddr, + addr: VAddr<'static>, } fn resolve_symbol(vaddr: VAddr) -> Option { @@ -96,23 +96,23 @@ pub fn backtrace() { }); } -pub struct CapturedBacktraceFrame { - pub vaddr: VAddr, +pub struct CapturedBacktraceFrame<'memory> { + pub vaddr: VAddr<'memory>, pub offset: usize, pub symbol_name: &'static str, } -pub struct CapturedBacktrace { - pub trace: Box>, +pub struct CapturedBacktrace<'memory> { + pub trace: Box, 8>>, } -impl CapturedBacktrace { +impl CapturedBacktrace<'_> { /// Returns a saved backtrace. - pub fn capture() -> CapturedBacktrace { + pub fn capture() -> CapturedBacktrace<'memory> { let mut trace = Box::new(ArrayVec::new()); - Backtrace::current_frame().traverse(|_, vaddr| { + Backtrace::current_frame().traverse( |_, vaddr: VAddr<'memory>| { if let Some(symbol) = resolve_symbol(vaddr) { - let _ = trace.try_push(CapturedBacktraceFrame { + let _ = trace.try_push(CapturedBacktraceFrame::<'memory> { vaddr, symbol_name: symbol.name, offset: vaddr.value() - symbol.addr.value(), @@ -123,7 +123,7 @@ impl CapturedBacktrace { } } -impl fmt::Debug for CapturedBacktrace { +impl fmt::Debug for CapturedBacktrace<'_> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { for (i, frame) in self.trace.iter().enumerate() { let _ = writeln!( diff --git a/runtime/bootinfo.rs b/runtime/bootinfo.rs index c99fd155..d981e83d 100644 --- a/runtime/bootinfo.rs +++ b/runtime/bootinfo.rs @@ -3,12 +3,12 @@ use arrayvec::{ArrayString, ArrayVec}; use crate::address::PAddr; pub struct RamArea { - pub base: PAddr, + pub base: PAddr<'static>, pub len: usize, } pub struct VirtioMmioDevice { - pub mmio_base: PAddr, + pub mmio_base: PAddr<'static>, pub irq: u8, } diff --git a/runtime/lib.rs b/runtime/lib.rs index e3db0e59..1721c389 100644 --- a/runtime/lib.rs +++ b/runtime/lib.rs @@ -3,6 +3,7 @@ #![no_std] #![feature(asm)] #![feature(global_asm)] +#![feature(in_band_lifetimes)] extern crate alloc; diff --git a/runtime/page_allocator.rs b/runtime/page_allocator.rs index 870efbd3..0bdab639 100644 --- a/runtime/page_allocator.rs +++ b/runtime/page_allocator.rs @@ -61,33 +61,33 @@ bitflags! { #[derive(Debug)] pub struct PageAllocError; -pub struct OwnedPages { - paddr: PAddr, +pub struct OwnedPages<'memory> { + paddr: PAddr<'memory>, num_pages: usize, } -impl OwnedPages { +impl OwnedPages<'_> { fn new(paddr: PAddr, num_pages: usize) -> OwnedPages { OwnedPages { paddr, num_pages } } } -impl Deref for OwnedPages { - type Target = PAddr; +impl Deref for OwnedPages<'memory> { + type Target = PAddr<'memory>; fn deref(&self) -> &Self::Target { &self.paddr } } -impl Drop for OwnedPages { +impl Drop for OwnedPages<'_> { fn drop(&mut self) { free_pages(self.paddr, self.num_pages); } } // TODO: Use alloc_page -pub fn alloc_pages(num_pages: usize, flags: AllocPageFlags) -> Result { +pub fn alloc_pages(num_pages: usize, flags: AllocPageFlags) -> Result, PageAllocError> { let order = num_pages_to_order(num_pages); let mut zones = ZONES.lock(); for zone in zones.iter_mut() { @@ -111,7 +111,7 @@ pub fn alloc_pages(num_pages: usize, flags: AllocPageFlags) -> Result Result { +) -> Result, PageAllocError> { let order = num_pages_to_order(num_pages); let mut zones = ZONES.lock(); for zone in zones.iter_mut() { diff --git a/runtime/spinlock.rs b/runtime/spinlock.rs index d2d68d61..ac2847a7 100644 --- a/runtime/spinlock.rs +++ b/runtime/spinlock.rs @@ -12,14 +12,14 @@ use crate::global_allocator::is_kernel_heap_enabled; #[cfg(debug_assertions)] use atomic_refcell::AtomicRefCell; -pub struct SpinLock { +pub struct SpinLock<'memory, T: ?Sized> { #[cfg(debug_assertions)] - locked_by: AtomicRefCell>, + locked_by: AtomicRefCell>>, inner: spin::mutex::SpinMutex, } -impl SpinLock { - pub const fn new(value: T) -> SpinLock { +impl SpinLock<'_, T> { + pub const fn new(value: T) -> SpinLock<'memory, T> { SpinLock { inner: spin::mutex::SpinMutex::new(value), #[cfg(debug_assertions)] @@ -28,8 +28,8 @@ impl SpinLock { } } -impl SpinLock { - pub fn lock(&self) -> SpinLockGuard<'_, T> { +impl SpinLock<'memory, T> { + pub fn lock(&'memory self) -> SpinLockGuard<'memory, T> { if self.inner.is_locked() { // Since we don't yet support multiprocessors and interrupts are // disabled until all locks are released, `lock()` will never fail @@ -81,13 +81,13 @@ impl SpinLock { } } -unsafe impl Sync for SpinLock {} -unsafe impl Send for SpinLock {} +unsafe impl Sync for SpinLock<'_, T> {} +unsafe impl Send for SpinLock<'_, T> {} pub struct SpinLockGuard<'a, T: ?Sized> { inner: ManuallyDrop>, #[cfg(debug_assertions)] - locked_by: &'a AtomicRefCell>, + locked_by: &'a AtomicRefCell>>, saved_intr_status: ManuallyDrop, } diff --git a/runtime/x64/apic.rs b/runtime/x64/apic.rs index 0c489518..1a98b626 100644 --- a/runtime/x64/apic.rs +++ b/runtime/x64/apic.rs @@ -16,11 +16,11 @@ enum LocalApicReg { SpuriousInterrupt = 0xf0, } -struct LocalApic { - base: PAddr, +struct LocalApic<'memory> { + base: PAddr<'memory>, } -impl LocalApic { +impl LocalApic<'_> { pub const fn new(base: PAddr) -> LocalApic { LocalApic { base } } diff --git a/runtime/x64/ioapic.rs b/runtime/x64/ioapic.rs index 2cd815ea..3b9ab617 100644 --- a/runtime/x64/ioapic.rs +++ b/runtime/x64/ioapic.rs @@ -14,11 +14,11 @@ enum IoApicReg { RedirectTableBase = 0x10, } -struct IoApic { - base: PAddr, +struct IoApic<'memory> { + base: PAddr<'memory>, } -impl IoApic { +impl IoApic<'_> { pub const fn new(base: PAddr) -> IoApic { IoApic { base } } diff --git a/runtime/x64/paging.rs b/runtime/x64/paging.rs index 1ac0ccaa..1ba9fbc3 100644 --- a/runtime/x64/paging.rs +++ b/runtime/x64/paging.rs @@ -29,7 +29,7 @@ bitflags! { } } -fn entry_paddr(entry: PageTableEntry) -> PAddr { +fn entry_paddr(entry: PageTableEntry) -> PAddr<'static> { PAddr::new((entry & 0x7ffffffffffff000) as usize) } @@ -127,7 +127,7 @@ fn duplicate_table(original_table_paddr: PAddr, level: usize) -> Result Result { +fn allocate_pml4() -> Result, PageAllocError> { extern "C" { static __kernel_pml4: u8; } @@ -154,7 +154,7 @@ fn allocate_pml4() -> Result { } pub struct PageTable { - pml4: PAddr, + pml4: PAddr<'static>, } impl PageTable { diff --git a/runtime/x64/vga.rs b/runtime/x64/vga.rs index 81f08485..43095536 100644 --- a/runtime/x64/vga.rs +++ b/runtime/x64/vga.rs @@ -35,7 +35,7 @@ const BANNER: &str = " Kerla /dev/console is connected to the serial port (no keyboard support) "; struct Console { - base: VAddr, + base: VAddr<'static>, x: usize, y: usize, fg: VgaColor,