Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Switch back to Counter based SpinLockIRQ #534

Open
wants to merge 13 commits into
base: master
Choose a base branch
from
176 changes: 120 additions & 56 deletions kernel/src/i386/interrupt_service_routines.rs

Large diffs are not rendered by default.

11 changes: 11 additions & 0 deletions kernel/src/i386/process_switch.rs
Original file line number Diff line number Diff line change
Expand Up @@ -347,6 +347,17 @@ fn jump_to_entrypoint(ep: usize, userspace_stack_ptr: usize, arg1: usize, arg2:
const_assert_eq!((GdtIndex::UTlsRegion as u16) << 3 | 0b11, 0x3B);
const_assert_eq!((GdtIndex::UTlsElf as u16) << 3 | 0b11, 0x43);
const_assert_eq!((GdtIndex::UStack as u16) << 3 | 0b11, 0x4B);


unsafe {
// Safety: This is paired with an undropped SpinLockIrq (interrupt_manager) in scheduler::internal_schedule.
// (Normally, this SpinLockIrq evens out with an identical one in the same function in the new process,
// however, when a new process starts this object is not present, therefore we must manually decrement
// the counter.)
// Additionally, an iret occurs later in this function, enabling interrupts.
crate::sync::spin_lock_irq::decrement_lock_count();
}

unsafe {
asm!("
mov ax,0x33 // ds, es <- UData, Ring 3
Expand Down
2 changes: 1 addition & 1 deletion kernel/src/i386/structures/idt.rs
Original file line number Diff line number Diff line change
Expand Up @@ -584,7 +584,7 @@ impl<F> IdtEntry<F> {

/// Set a task gate for the IDT entry and sets the present bit.
///
/// # Unsafety
/// # Safety
///
/// `tss_selector` must point to a valid TSS, which will remain present.
/// The TSS' `eip` should point to the handler function.
Expand Down
6 changes: 6 additions & 0 deletions kernel/src/main.rs
Original file line number Diff line number Diff line change
Expand Up @@ -250,6 +250,12 @@ pub extern "C" fn common_start(multiboot_info_addr: usize) -> ! {

info!("Allocating cpu_locals");
init_cpu_locals(1);
// Now that cpu_locals are present, ensure that SpinLockIRQs
// are aware that interrupts are disabled.
unsafe {
// Safety: paired with enable_interrupts in interrupt_service_routines::init
sync::spin_lock_irq::disable_interrupts();
}

info!("Enabling interrupts");
unsafe { i386::interrupt_service_routines::init(); }
Expand Down
2 changes: 1 addition & 1 deletion kernel/src/paging/hierarchical_table.rs
Original file line number Diff line number Diff line change
Expand Up @@ -648,7 +648,7 @@ pub trait InactiveHierarchyTrait : TableHierarchy {
/// These frames were marked as occupied when initialising the `FrameAllocator`,
/// we're making them available again.
///
/// # Unsafety
/// # Safety
///
/// Having multiple InactiveHierarchy pointing to the same table hierarchy is unsafe.
/// Should not be used for any other purpose, it is only guaranteed to be safe to drop.
Expand Down
1 change: 1 addition & 0 deletions kernel/src/process.rs
Original file line number Diff line number Diff line change
Expand Up @@ -178,6 +178,7 @@ pub struct ThreadStruct {
///
/// This is used when signaling that this thread as exited.
state_event: ThreadStateEvent

}

/// A handle to a userspace-accessible resource.
Expand Down
2 changes: 1 addition & 1 deletion kernel/src/process/thread_local_storage.rs
Original file line number Diff line number Diff line change
Expand Up @@ -165,7 +165,7 @@ impl TLSManager {

/// Mark this TLS as free, so it can be re-used by future spawned thread.
///
/// # Unsafety
/// # Safety
///
/// The TLS will be reassigned, so it must never be used again after calling this function.
///
Expand Down
10 changes: 8 additions & 2 deletions kernel/src/scheduler.rs
Original file line number Diff line number Diff line change
Expand Up @@ -71,7 +71,7 @@ pub fn get_current_process() -> Arc<ProcessStruct> {
/// The passed function will be executed after setting the CURRENT_THREAD, but before
/// setting it back to the RUNNING state.
///
/// # Unsafety
/// # Safety
///
/// Interrupts must be disabled when calling this function. It will mutably borrow [`CURRENT_THREAD`],
/// so we can't have interrupts on top of that which try to access it while it is borrowed mutably by us,
Expand Down Expand Up @@ -257,6 +257,12 @@ where
// TODO: Ensure the global counter is <= 1

let interrupt_manager = SpinLockIRQ::new(());

// This is complicated: interrupt_lock is needed to disable interrupts while
// performing a process switch: however, it is not dropped until after a process_switch
// *back* to this process. Usually it is paired with the SpinLockIrq being dropped in
// 'process_b', but in the case of a brand new process, it is paired with a manual decrement
// before performing an iret to userspace.
let mut interrupt_lock = interrupt_manager.lock();

loop {
Expand Down Expand Up @@ -333,7 +339,7 @@ where
/// The passed function should take care to change the protection level, and ensure it cleans up all
/// the registers before calling the EIP, in order to avoid leaking information to userspace.
///
/// # Unsafety:
/// # Safety:
///
/// Interrupts must be off when calling this function. It will set [`CURRENT_THREAD`], and then
/// turn them on, as we are running a new thread, no SpinLockIRQ is held.
Expand Down
5 changes: 4 additions & 1 deletion kernel/src/sync/spin_lock.rs
Original file line number Diff line number Diff line change
Expand Up @@ -148,7 +148,10 @@ impl<T: ?Sized> SpinLock<T> {
/// a guard within Some.
pub fn try_lock(&self) -> Option<SpinLockGuard<T>> {
use core::sync::atomic::Ordering;
if crate::i386::interrupt_service_routines::INSIDE_INTERRUPT_COUNT.load(Ordering::SeqCst) != 0 {
use crate::cpu_locals::ARE_CPU_LOCALS_INITIALIZED_YET;
use crate::i386::interrupt_service_routines::INSIDE_INTERRUPT_COUNT;
use super::INTERRUPT_DISARM;
if !INTERRUPT_DISARM.load(Ordering::SeqCst) && ARE_CPU_LOCALS_INITIALIZED_YET.load(Ordering::SeqCst) && INSIDE_INTERRUPT_COUNT.load(Ordering::SeqCst) != 0 {
panic!("\
You have attempted to lock a spinlock in interrupt context. \
This is most likely a design flaw. \
Expand Down
174 changes: 108 additions & 66 deletions kernel/src/sync/spin_lock_irq.rs
Original file line number Diff line number Diff line change
Expand Up @@ -9,8 +9,68 @@ use spin::{Mutex as SpinLock, MutexGuard as SpinLockGuard};
use core::fmt;
use core::mem::ManuallyDrop;
use core::ops::{Deref, DerefMut};
use core::sync::atomic::Ordering;
use core::sync::atomic::{AtomicU8, Ordering};
use super::INTERRUPT_DISARM;
use crate::cpu_locals::ARE_CPU_LOCALS_INITIALIZED_YET;

/// Interrupt disable counter.
///
/// # Description
///
/// Allows recursively disabling interrupts while keeping a sane behavior.
/// Should only be manipulated through [enable_interrupts],
/// [disable_interrupts], and [decrement_lock_count].
///
/// Used by the SpinLockIRQ to implement recursive irqsave logic.
#[thread_local]
static INTERRUPT_DISABLE_COUNTER: AtomicU8 = AtomicU8::new(0);

/// Decrement the interrupt disable counter.
///
/// Look at documentation for [INTERRUPT_DISABLE_COUNTER] to know more.
///
/// # Safety
///
/// Should be called in pairs with [disable_interrupts] or [decrement_lock_count],
/// otherwise the counter will get out of sync and deadlocks will likely occur.
pub unsafe fn enable_interrupts() {
if !INTERRUPT_DISARM.load(Ordering::SeqCst) && ARE_CPU_LOCALS_INITIALIZED_YET.load(Ordering::SeqCst) && INTERRUPT_DISABLE_COUNTER.fetch_sub(1, Ordering::SeqCst) == 1 {
unsafe { interrupts::sti() }
}
}

/// Decrement the interrupt disable counter without re-enabling interrupts.
///
/// Used to decrement counter while keeping interrupts disabled before an iret.
/// Look at documentation for [INTERRUPT_DISABLE_COUNTER] to know more.
///
/// # Safety
///
/// Should be called in pairs with [enable_interrupts],
/// otherwise the counter will get out of sync and deadlocks will likely occur.
///
/// Additionally, this should only be used when interrupts are about to be enabled anyway,
/// such as by an iret to userspace.
pub unsafe fn decrement_lock_count() {
if !INTERRUPT_DISARM.load(Ordering::SeqCst) && ARE_CPU_LOCALS_INITIALIZED_YET.load(Ordering::SeqCst) {
let _ = INTERRUPT_DISABLE_COUNTER.fetch_sub(1, Ordering::SeqCst);
}
}

/// Increment the interrupt disable counter.
///
/// Look at documentation for [INTERRUPT_DISABLE_COUNTER] to know more.
///
/// # Safety
///
/// Should be called in pairs with [enable_interrupts],
/// otherwise the counter will get out of sync and deadlocks will likely occur.
pub unsafe fn disable_interrupts() {
if !INTERRUPT_DISARM.load(Ordering::SeqCst) && ARE_CPU_LOCALS_INITIALIZED_YET.load(Ordering::SeqCst) && INTERRUPT_DISABLE_COUNTER.fetch_add(1, Ordering::SeqCst) == 0 {
unsafe { interrupts::cli() }
}
}


/// Permanently disables the interrupts. Forever.
///
Expand All @@ -32,27 +92,12 @@ pub unsafe fn permanently_disable_interrupts() {
/// - `lock` behaves like a `spinlock_irqsave`. It returns a guard.
/// - Dropping the guard behaves like `spinlock_irqrestore`
///
/// This means that locking a spinlock disables interrupts until all spinlock
/// guards have been dropped.
///
/// A note on reordering: reordering lock drops is prohibited and doing so will
/// result in UB.
//
// TODO: Find sane design for SpinLockIRQ safety
// BODY: Currently, SpinLockIRQ API is unsound. If the guards are dropped in
// BODY: the wrong order, it may cause IF to be reset too early.
// BODY:
// BODY: Ideally, we would need a way to prevent the guard variable to be
// BODY: reassigned. AKA: prevent moving. Note that this is different from what
// BODY: the Pin API solves. The Pin API is about locking a variable in one
// BODY: memory location, but its binding may still be moved and dropped.
// BODY: Unfortunately, Rust does not have a way to express that a value cannot
// BODY: be reassigned.
// BODY:
// BODY: Another possibility would be to switch to a callback API. This would
// BODY: solve the problem, but the scheduler would be unable to consume such
// BODY: locks. Maybe we could have an unsafe "scheduler_relock" function that
// BODY: may only be called from the scheduler?
/// This means that locking a spinlock disables interrupts until all spinlocks
/// have been dropped.
///
/// Note that it is allowed to lock/unlock the locks in a different order. It uses
/// a global counter to disable/enable interrupts. View [INTERRUPT_DISABLE_COUNTER]
/// documentation for more information.
pub struct SpinLockIRQ<T: ?Sized> {
/// SpinLock we wrap.
internal: SpinLock<T>
Expand All @@ -74,44 +119,39 @@ impl<T> SpinLockIRQ<T> {

impl<T: ?Sized> SpinLockIRQ<T> {
/// Disables interrupts and locks the mutex.
pub fn lock(&self) -> SpinLockIRQGuard<T> {
if INTERRUPT_DISARM.load(Ordering::SeqCst) {
let internalguard = self.internal.lock();
SpinLockIRQGuard(ManuallyDrop::new(internalguard), false)
} else {
// Save current interrupt state.
let saved_intpt_flag = interrupts::are_enabled();

// Disable interruptions
unsafe { interrupts::cli(); }

let internalguard = self.internal.lock();
SpinLockIRQGuard(ManuallyDrop::new(internalguard), saved_intpt_flag)
pub fn lock(&self) -> SpinLockIRQGuard<'_, T> {
unsafe {
// Safety: Paired with enable_interrupts in the impl of Drop for SpinLockIrqGuard.
disable_interrupts();
}

// TODO: Disable preemption.
// TODO: Spin acquire

// lock
let internalguard = self.internal.lock();
SpinLockIRQGuard(ManuallyDrop::new(internalguard))
}

/// Disables interrupts and locks the mutex.
pub fn try_lock(&self) -> Option<SpinLockIRQGuard<T>> {
if INTERRUPT_DISARM.load(Ordering::SeqCst) {
self.internal.try_lock()
.map(|v| SpinLockIRQGuard(ManuallyDrop::new(v), false))
} else {
// Save current interrupt state.
let saved_intpt_flag = interrupts::are_enabled();

// Disable interruptions
unsafe { interrupts::cli(); }

// Lock spinlock
let internalguard = self.internal.try_lock();

if let Some(internalguard) = internalguard {
// if lock is successful, return guard.
Some(SpinLockIRQGuard(ManuallyDrop::new(internalguard), saved_intpt_flag))
} else {
// Else, restore interrupt state
if saved_intpt_flag {
unsafe { interrupts::sti(); }
pub fn try_lock(&self) -> Option<SpinLockIRQGuard<'_, T>> {
unsafe {
// Safety: Paired with enable_interrupts in the impl of Drop for SpinLockIrq,
// or in case a guard is not created, later in this function.
disable_interrupts();
}

// TODO: Disable preemption.
// TODO: Spin acquire

// lock
match self.internal.try_lock() {
Some(internalguard) => Some(SpinLockIRQGuard(ManuallyDrop::new(internalguard))),
None => {
// We couldn't lock. Restore irqs and return None
unsafe {
// Safety: Paired with disable_interrupts above in the case that a guard is not created.
enable_interrupts();
}
None
}
Expand All @@ -126,29 +166,31 @@ impl<T: ?Sized> SpinLockIRQ<T> {

impl<T: fmt::Debug> fmt::Debug for SpinLockIRQ<T> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
if let Some(v) = self.try_lock() {
f.debug_struct("SpinLockIRQ")
.field("data", &v)
.finish()
} else {
write!(f, "SpinLockIRQ {{ <locked> }}")
match self.try_lock() {
Some(d) => {
write!(f, "SpinLockIRQ {{ data: ")?;
d.fmt(f)?;
write!(f, "}}")
},
None => write!(f, "SpinLockIRQ {{ <locked> }}")
}
}
}

/// The SpinLockIrq lock guard.
#[derive(Debug)]
pub struct SpinLockIRQGuard<'a, T: ?Sized>(ManuallyDrop<SpinLockGuard<'a, T>>, bool);
pub struct SpinLockIRQGuard<'a, T: ?Sized>(ManuallyDrop<SpinLockGuard<'a, T>>);

impl<'a, T: ?Sized + 'a> Drop for SpinLockIRQGuard<'a, T> {
fn drop(&mut self) {
// TODO: Spin release
// unlock
unsafe { ManuallyDrop::drop(&mut self.0); }

// Restore irq
if self.1 {
unsafe { interrupts::sti(); }
unsafe {
// Safety: paired with disable_interrupts in SpinLockIRQ::{lock, try_lock}, which returns
// this guard to re-enable interrupts when it is dropped.
enable_interrupts();
}

// TODO: Enable preempt
Expand Down