Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[Resolves #142] Add lifetimes to VAddr & PAddr #150

Draft
wants to merge 1 commit into
base: main
Choose a base branch
from
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
107 changes: 58 additions & 49 deletions runtime/address.rs
Original file line number Diff line number Diff line change
Expand Up @@ -6,53 +6,56 @@ use crate::handler;
use core::{
fmt,
mem::{size_of, MaybeUninit},
ptr, slice,
ptr, slice, marker::PhantomData,
};
use kerla_utils::alignment::align_down;

/// Represents a physical memory address.
#[derive(Debug, Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Hash)]
#[repr(transparent)]
pub struct PAddr(usize);
pub struct PAddr<'memory> {
Copy link
Owner

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Use 'a to follow a naming convention in Rust.

Suggested change
pub struct PAddr<'memory> {
pub struct PAddr<'a> {

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@nuta there's no such convention - lifetimes should be named in meaningful way.

address: usize,
_lifetime: PhantomData<&'memory ()>,
}

impl PAddr {
pub const fn new(addr: usize) -> PAddr {
PAddr(addr)
impl<'memory> PAddr<'memory> {
pub const fn new(address: usize) -> Self {
PAddr { address, _lifetime: PhantomData::default() }
}

#[inline(always)]
pub const fn is_null(self) -> bool {
self.0 == 0
self.address == 0
}

pub const fn as_vaddr(self) -> VAddr {
debug_assert!(self.0 < KERNEL_STRAIGHT_MAP_PADDR_END);
VAddr::new(self.0 + KERNEL_BASE_ADDR)
pub const fn as_vaddr(self) -> VAddr<'memory> { // TODO: Shouldn't it be impl Into for VAddr trait?
debug_assert!(self.address < KERNEL_STRAIGHT_MAP_PADDR_END);
VAddr::new(self.address + KERNEL_BASE_ADDR)
}

pub const fn as_ptr<T>(self) -> *const T {
debug_assert!(self.0 < KERNEL_STRAIGHT_MAP_PADDR_END);
(self.0 + KERNEL_BASE_ADDR) as *const _
debug_assert!(self.address < KERNEL_STRAIGHT_MAP_PADDR_END);
(self.address + KERNEL_BASE_ADDR) as *const _
}

pub const fn as_mut_ptr<T>(self) -> *mut T {
debug_assert!(self.0 < KERNEL_STRAIGHT_MAP_PADDR_END);
(self.0 + KERNEL_BASE_ADDR) as *mut _
debug_assert!(self.address < KERNEL_STRAIGHT_MAP_PADDR_END);
(self.address + KERNEL_BASE_ADDR) as *mut _
}

#[inline(always)]
#[must_use]
pub const fn add(self, offset: usize) -> PAddr {
PAddr(self.0 + offset)
pub const fn add(self, offset: usize) -> Self {
PAddr { address: self.address + offset, _lifetime: PhantomData::default() }
}

#[inline(always)]
pub const fn value(self) -> usize {
self.0
self.address
}
}

impl fmt::Display for PAddr {
impl fmt::Display for PAddr<'_> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{:016x}", self.value())
}
Expand All @@ -61,31 +64,35 @@ impl fmt::Display for PAddr {
/// Represents a *kernel* virtual memory address.
#[derive(Debug, Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Hash)]
#[repr(transparent)]
pub struct VAddr(usize);
pub struct VAddr<'memory> {
address: usize,
_lifetime: PhantomData<&'memory ()>,
}

impl VAddr {
pub const fn new(addr: usize) -> VAddr {
debug_assert!(addr >= KERNEL_BASE_ADDR);
VAddr(addr)
impl<'memory> VAddr<'memory> {
pub const fn new(address: usize) -> Self {
debug_assert!(address >= KERNEL_BASE_ADDR);
VAddr { address, _lifetime: PhantomData::default() }
}

pub const fn as_paddr(self) -> PAddr {
debug_assert!(self.0 >= KERNEL_BASE_ADDR);
PAddr::new(self.0 - KERNEL_BASE_ADDR)
pub const fn as_paddr(self) -> PAddr<'memory> {
debug_assert!(self.address >= KERNEL_BASE_ADDR);
//PAddr::new(self.address - KERNEL_BASE_ADDR)
PAddr { address: self.address, _lifetime: self._lifetime }
}

pub const fn is_accessible_from_kernel(addr: usize) -> bool {
(addr) >= KERNEL_BASE_ADDR && (addr) < KERNEL_BASE_ADDR + KERNEL_STRAIGHT_MAP_PADDR_END
}

pub const fn as_ptr<T>(self) -> *const T {
debug_assert!(self.0 >= KERNEL_BASE_ADDR);
self.0 as *const _
debug_assert!(self.address >= KERNEL_BASE_ADDR);
self.address as *const _
}

pub const fn as_mut_ptr<T>(self) -> *mut T {
debug_assert!(self.0 >= KERNEL_BASE_ADDR);
self.0 as *mut _
debug_assert!(self.address >= KERNEL_BASE_ADDR);
self.address as *mut _
}

/// # Safety
Expand All @@ -109,29 +116,29 @@ impl VAddr {

#[inline(always)]
#[must_use]
pub const fn add(self, offset: usize) -> VAddr {
VAddr::new(self.0 + offset)
pub const fn add(self, offset: usize) -> VAddr<'memory> {
VAddr::new(self.address + offset)
}

#[inline(always)]
#[must_use]
pub const fn sub(self, offset: usize) -> VAddr {
VAddr::new(self.0 - offset)
pub const fn sub(self, offset: usize) -> VAddr<'memory> {
VAddr::new(self.address - offset)
}

#[inline(always)]
#[must_use]
pub const fn align_down(self, alignment: usize) -> VAddr {
VAddr::new(align_down(self.0, alignment))
pub const fn align_down(self, alignment: usize) -> VAddr<'memory> {
VAddr::new(align_down(self.address, alignment))
}

#[inline(always)]
pub const fn value(self) -> usize {
self.0
self.address
}
}

impl fmt::Display for VAddr {
impl fmt::Display for VAddr<'_> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{:016x}", self.value())
}
Expand All @@ -157,21 +164,23 @@ pub struct NullUserPointerError;

/// Represents a user virtual memory address.
///
/// It is guaranteed that `UserVaddr` contains a valid address, in other words,
/// It is guaranteed that `UserVAddr` contains a valid address, in other words,
/// it does not point to a kernel address.
///
/// Futhermore, like `NonNull<T>`, it is always non-null. Use `Option<UserVaddr>`
/// represent a nullable user pointer.
#[derive(Debug, Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Hash)]
#[repr(transparent)]
pub struct UserVAddr(usize);
pub struct UserVAddr {
address: usize
}

impl UserVAddr {
pub const fn new(addr: usize) -> Option<UserVAddr> {
if addr == 0 {
pub const fn new(address: usize) -> Option<UserVAddr> {
if address == 0 {
None
} else {
Some(UserVAddr(addr))
Some(UserVAddr { address })
}
}

Expand All @@ -183,32 +192,32 @@ impl UserVAddr {
}

/// # Safety
/// Make sure `addr` doesn't point to the kernel memory address or it can
/// Make sure `address` doesn't point to the kernel memory address or it can
/// lead to a serious vulnerability!
pub const unsafe fn new_unchecked(addr: usize) -> UserVAddr {
UserVAddr(addr)
pub const unsafe fn new_unchecked(address: usize) -> UserVAddr {
UserVAddr { address }
}

#[inline(always)]
pub const fn as_isize(self) -> isize {
// This cast is always safe thanks to the KERNEL_BASE_ADDR check in
// `UserVAddr::new`.
self.0 as isize
self.address as isize
}

#[inline(always)]
pub const fn add(self, offset: usize) -> UserVAddr {
unsafe { UserVAddr::new_unchecked(self.0 + offset) }
unsafe { UserVAddr::new_unchecked(self.address + offset) }
}

#[inline(always)]
pub const fn sub(self, offset: usize) -> UserVAddr {
unsafe { UserVAddr::new_unchecked(self.0 - offset) }
unsafe { UserVAddr::new_unchecked(self.address - offset) }
}

#[inline(always)]
pub const fn value(self) -> usize {
self.0
self.address
}

pub fn access_ok(self, len: usize) -> Result<(), AccessError> {
Expand Down
20 changes: 10 additions & 10 deletions runtime/backtrace.rs
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,7 @@ global_asm!(

struct Symbol {
name: &'static str,
addr: VAddr,
addr: VAddr<'static>,
}

fn resolve_symbol(vaddr: VAddr) -> Option<Symbol> {
Expand Down Expand Up @@ -96,23 +96,23 @@ pub fn backtrace() {
});
}

pub struct CapturedBacktraceFrame {
pub vaddr: VAddr,
pub struct CapturedBacktraceFrame<'memory> {
pub vaddr: VAddr<'memory>,
pub offset: usize,
pub symbol_name: &'static str,
}

pub struct CapturedBacktrace {
pub trace: Box<ArrayVec<CapturedBacktraceFrame, 8>>,
pub struct CapturedBacktrace<'memory> {
pub trace: Box<ArrayVec<CapturedBacktraceFrame<'memory>, 8>>,
}

impl CapturedBacktrace {
impl CapturedBacktrace<'_> {
/// Returns a saved backtrace.
pub fn capture() -> CapturedBacktrace {
pub fn capture() -> CapturedBacktrace<'memory> {
let mut trace = Box::new(ArrayVec::new());
Backtrace::current_frame().traverse(|_, vaddr| {
Backtrace::current_frame().traverse( |_, vaddr: VAddr<'memory>| {
if let Some(symbol) = resolve_symbol(vaddr) {
let _ = trace.try_push(CapturedBacktraceFrame {
let _ = trace.try_push(CapturedBacktraceFrame::<'memory> {
vaddr,
symbol_name: symbol.name,
offset: vaddr.value() - symbol.addr.value(),
Expand All @@ -123,7 +123,7 @@ impl CapturedBacktrace {
}
}

impl fmt::Debug for CapturedBacktrace {
impl fmt::Debug for CapturedBacktrace<'_> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
for (i, frame) in self.trace.iter().enumerate() {
let _ = writeln!(
Expand Down
4 changes: 2 additions & 2 deletions runtime/bootinfo.rs
Original file line number Diff line number Diff line change
Expand Up @@ -3,12 +3,12 @@ use arrayvec::{ArrayString, ArrayVec};
use crate::address::PAddr;

pub struct RamArea {
pub base: PAddr,
pub base: PAddr<'static>,
pub len: usize,
}

pub struct VirtioMmioDevice {
pub mmio_base: PAddr,
pub mmio_base: PAddr<'static>,
pub irq: u8,
}

Expand Down
1 change: 1 addition & 0 deletions runtime/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@
#![no_std]
#![feature(asm)]
#![feature(global_asm)]
#![feature(in_band_lifetimes)]

extern crate alloc;

Expand Down
16 changes: 8 additions & 8 deletions runtime/page_allocator.rs
Original file line number Diff line number Diff line change
Expand Up @@ -61,33 +61,33 @@ bitflags! {
#[derive(Debug)]
pub struct PageAllocError;

pub struct OwnedPages {
paddr: PAddr,
pub struct OwnedPages<'memory> {
Copy link
Owner

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Ditto

Suggested change
pub struct OwnedPages<'memory> {
pub struct OwnedPages<'a> {

paddr: PAddr<'memory>,
num_pages: usize,
}

impl OwnedPages {
impl OwnedPages<'_> {
fn new(paddr: PAddr, num_pages: usize) -> OwnedPages {
OwnedPages { paddr, num_pages }
}
}

impl Deref for OwnedPages {
type Target = PAddr;
impl Deref for OwnedPages<'memory> {
type Target = PAddr<'memory>;

fn deref(&self) -> &Self::Target {
&self.paddr
}
}

impl Drop for OwnedPages {
impl Drop for OwnedPages<'_> {
fn drop(&mut self) {
free_pages(self.paddr, self.num_pages);
}
}

// TODO: Use alloc_page
pub fn alloc_pages(num_pages: usize, flags: AllocPageFlags) -> Result<PAddr, PageAllocError> {
pub fn alloc_pages(num_pages: usize, flags: AllocPageFlags) -> Result<PAddr<'static>, PageAllocError> {
let order = num_pages_to_order(num_pages);
let mut zones = ZONES.lock();
for zone in zones.iter_mut() {
Expand All @@ -111,7 +111,7 @@ pub fn alloc_pages(num_pages: usize, flags: AllocPageFlags) -> Result<PAddr, Pag
pub fn alloc_pages_owned(
num_pages: usize,
flags: AllocPageFlags,
) -> Result<OwnedPages, PageAllocError> {
) -> Result<OwnedPages<'static>, PageAllocError> {
let order = num_pages_to_order(num_pages);
let mut zones = ZONES.lock();
for zone in zones.iter_mut() {
Expand Down
18 changes: 9 additions & 9 deletions runtime/spinlock.rs
Original file line number Diff line number Diff line change
Expand Up @@ -12,14 +12,14 @@ use crate::global_allocator::is_kernel_heap_enabled;
#[cfg(debug_assertions)]
use atomic_refcell::AtomicRefCell;

pub struct SpinLock<T: ?Sized> {
pub struct SpinLock<'memory, T: ?Sized> {
#[cfg(debug_assertions)]
locked_by: AtomicRefCell<Option<CapturedBacktrace>>,
locked_by: AtomicRefCell<Option<CapturedBacktrace<'memory>>>,
inner: spin::mutex::SpinMutex<T>,
}

impl<T> SpinLock<T> {
pub const fn new(value: T) -> SpinLock<T> {
impl<T> SpinLock<'_, T> {
pub const fn new(value: T) -> SpinLock<'memory, T> {
SpinLock {
inner: spin::mutex::SpinMutex::new(value),
#[cfg(debug_assertions)]
Expand All @@ -28,8 +28,8 @@ impl<T> SpinLock<T> {
}
}

impl<T: ?Sized> SpinLock<T> {
pub fn lock(&self) -> SpinLockGuard<'_, T> {
impl<T: ?Sized> SpinLock<'memory, T> {
pub fn lock(&'memory self) -> SpinLockGuard<'memory, T> {
if self.inner.is_locked() {
// Since we don't yet support multiprocessors and interrupts are
// disabled until all locks are released, `lock()` will never fail
Expand Down Expand Up @@ -81,13 +81,13 @@ impl<T: ?Sized> SpinLock<T> {
}
}

unsafe impl<T: ?Sized + Send> Sync for SpinLock<T> {}
unsafe impl<T: ?Sized + Send> Send for SpinLock<T> {}
unsafe impl<T: ?Sized + Send> Sync for SpinLock<'_, T> {}
unsafe impl<T: ?Sized + Send> Send for SpinLock<'_, T> {}

pub struct SpinLockGuard<'a, T: ?Sized> {
inner: ManuallyDrop<spin::mutex::SpinMutexGuard<'a, T>>,
#[cfg(debug_assertions)]
locked_by: &'a AtomicRefCell<Option<CapturedBacktrace>>,
locked_by: &'a AtomicRefCell<Option<CapturedBacktrace<'a>>>,
saved_intr_status: ManuallyDrop<SavedInterruptStatus>,
}

Expand Down
Loading