Skip to content

Commit

Permalink
Add safety comments and fix clippy.
Browse files Browse the repository at this point in the history
  • Loading branch information
kitlith committed Oct 31, 2019
1 parent 495cb50 commit c62dc63
Show file tree
Hide file tree
Showing 5 changed files with 63 additions and 25 deletions.
12 changes: 10 additions & 2 deletions kernel/src/i386/interrupt_service_routines.rs
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,6 @@
//! [syscall_interrupt_dispatcher]: self::interrupt_service_routines::syscall_interrupt_dispatcher

use crate::i386::structures::idt::{PageFaultErrorCode, Idt};
use crate::i386::instructions::interrupts::sti;
use crate::mem::VirtualAddress;
use crate::paging::kernel_memory::get_kernel_memory;
use crate::i386::PrivilegeLevel;
Expand Down Expand Up @@ -65,6 +64,10 @@ pub fn check_thread_killed() {
if scheduler::get_current_thread().state.load(Ordering::SeqCst) == ThreadState::TerminationPending {
let lock = SpinLockIRQ::new(());
loop { // in case of spurious wakeups
// TODO: Is it valid for interrupts to be active here?
// BODY: The interrupt wrappers call decrement_lock_count before calling this function,
// BODY: because it has the possibility to never return.
// BODY: Upon a spurious wake up, interrupts will end up being enabled.
let _ = scheduler::unschedule(&lock, lock.lock());
}
}
Expand Down Expand Up @@ -572,7 +575,9 @@ macro_rules! generate_trap_gate_handler {

if $interrupt_context {
let _ = INSIDE_INTERRUPT_COUNT.fetch_add(1, Ordering::SeqCst);
disable_interrupts();
// Lets SpinLockIrq know that we are an interrupt; interrupts should not be re-enabled.
// Safety: Paired with decrement_lock_count, which is called before exiting the interrupt.
unsafe { disable_interrupts(); }
}

if let PrivilegeLevel::Ring0 = SegmentSelector(userspace_context.cs as u16).rpl() {
Expand All @@ -594,6 +599,8 @@ macro_rules! generate_trap_gate_handler {

if $interrupt_context {
let _ = INSIDE_INTERRUPT_COUNT.fetch_sub(1, Ordering::SeqCst);
// Safety: Paired with disable_interrupts, which was called earlier in this wrapper function.
// Additionally, this is called shortly before an iret occurs, inside the asm wrapper.
unsafe { decrement_lock_count(); }
}

Expand Down Expand Up @@ -1137,5 +1144,6 @@ pub unsafe fn init() {
(*idt).load();
}

// Paired with disable_interrupts in kernel common_start.
crate::sync::spin_lock_irq::enable_interrupts();
}
8 changes: 7 additions & 1 deletion kernel/src/i386/process_switch.rs
Original file line number Diff line number Diff line change
Expand Up @@ -347,7 +347,13 @@ fn jump_to_entrypoint(ep: usize, userspace_stack_ptr: usize, arg1: usize, arg2:
const_assert_eq!((GdtIndex::UTlsRegion as u16) << 3 | 0b11, 0x3B);
const_assert_eq!((GdtIndex::UTlsElf as u16) << 3 | 0b11, 0x43);
const_assert_eq!((GdtIndex::UStack as u16) << 3 | 0b11, 0x4B);
// We're about to enable interrupts by doing an iret.


// Safety: This is paired with an undropped SpinLockIrq (interrupt_manager) in scheduler::internal_schedule.
// (Normally, this SpinLockIrq evens out with an identical one in the same function in the new process,
// however, when a new process starts this object is not present, therefore we must manually decrement
// the counter.)
// Additionally, an iret occurs later in this function, enabling interrupts.
unsafe { crate::sync::spin_lock_irq::decrement_lock_count(); }
unsafe {
asm!("
Expand Down
3 changes: 2 additions & 1 deletion kernel/src/main.rs
Original file line number Diff line number Diff line change
Expand Up @@ -252,7 +252,8 @@ pub extern "C" fn common_start(multiboot_info_addr: usize) -> ! {
init_cpu_locals(1);
// Now that cpu_locals are present, ensure that SpinLockIRQs
// are aware that interrupts are disabled.
sync::spin_lock_irq::disable_interrupts();
// Safety: paired with enable_interrupts in interrupt_service_routines::init
unsafe { sync::spin_lock_irq::disable_interrupts(); }

info!("Enabling interrupts");
unsafe { i386::interrupt_service_routines::init(); }
Expand Down
6 changes: 6 additions & 0 deletions kernel/src/scheduler.rs
Original file line number Diff line number Diff line change
Expand Up @@ -257,6 +257,12 @@ where
// TODO: Ensure the global counter is <= 1

let interrupt_manager = SpinLockIRQ::new(());

// This is complicated: interrupt_lock is needed to disable interrupts while
// performing a process switch: however, it is not dropped until after a process_switch
// *back* to this process. Usually it is paired with the SpinLockIrq being dropped in
// 'process_b', but in the case of a brand new process, it is paired with a manual decrement
// before performing an iret to userspace.
let mut interrupt_lock = interrupt_manager.lock();

loop {
Expand Down
59 changes: 38 additions & 21 deletions kernel/src/sync/spin_lock_irq.rs
Original file line number Diff line number Diff line change
Expand Up @@ -18,28 +18,39 @@ use crate::cpu_locals::ARE_CPU_LOCALS_INITIALIZED_YET;
/// # Description
///
/// Allows recursively disabling interrupts while keeping a sane behavior.
/// Should only be manipulated through sync::enable_interrupts and
/// sync::disable_interrupts.
/// Should only be manipulated through [enable_interrupts],
/// [disable_interrupts], and [decrement_lock_count].
///
/// Used by the SpinLockIRQ to implement recursive irqsave logic.
#[thread_local]
static INTERRUPT_DISABLE_COUNTER: AtomicU8 = AtomicU8::new(0);

/// Decrement the interrupt disable counter.
///
/// Look at documentation for ProcessStruct::pint_disable_counter to know more.
pub fn enable_interrupts() {
if !INTERRUPT_DISARM.load(Ordering::SeqCst) && ARE_CPU_LOCALS_INITIALIZED_YET.load(Ordering::SeqCst) {
if INTERRUPT_DISABLE_COUNTER.fetch_sub(1, Ordering::SeqCst) == 1 {
unsafe { interrupts::sti() }
}
/// Look at documentation for [INTERRUPT_DISABLE_COUNTER] to know more.
///
/// # Unsafety:
///
/// Should be called in pairs with [disable_iterrupts] or [decrement_lock_count],
/// otherwise the counter will get out of sync and deadlocks will likely occur.
pub unsafe fn enable_interrupts() {
if !INTERRUPT_DISARM.load(Ordering::SeqCst) && ARE_CPU_LOCALS_INITIALIZED_YET.load(Ordering::SeqCst) && INTERRUPT_DISABLE_COUNTER.fetch_sub(1, Ordering::SeqCst) == 1 {
unsafe { interrupts::sti() }
}
}

/// Decrement the interrupt disable counter without possibly re-enabling interrupts.
/// Decrement the interrupt disable counter without re-enabling interrupts.
///
/// Used to decrement counter while keeping interrupts disabled before an iret.
/// Look at documentation for [INTERRUPT_DISABLE_COUNTER] to know more.
///
/// # Unsafety:
///
/// Used to decrement counter while keeping interrupts disabled inside an interrupt.
/// Look at documentation for INTERRUPT_DISABLE_COUNTER to know more.
/// Should be called in pairs with [enable_iterrupts],
/// otherwise the counter will get out of sync and deadlocks will likely occur.
///
/// Additionally, this should only be used when interrupts are about to be enabled anyway,
/// such as by an iret to userspace.
pub unsafe fn decrement_lock_count() {
if !INTERRUPT_DISARM.load(Ordering::SeqCst) && ARE_CPU_LOCALS_INITIALIZED_YET.load(Ordering::SeqCst) {
let _ = INTERRUPT_DISABLE_COUNTER.fetch_sub(1, Ordering::SeqCst);
Expand All @@ -48,12 +59,15 @@ pub unsafe fn decrement_lock_count() {

/// Increment the interrupt disable counter.
///
/// Look at documentation for INTERRUPT_DISABLE_COUNTER to know more.
pub fn disable_interrupts() {
if !INTERRUPT_DISARM.load(Ordering::SeqCst) && ARE_CPU_LOCALS_INITIALIZED_YET.load(Ordering::SeqCst) {
if INTERRUPT_DISABLE_COUNTER.fetch_add(1, Ordering::SeqCst) == 0 {
unsafe { interrupts::cli() }
}
/// Look at documentation for [INTERRUPT_DISABLE_COUNTER] to know more.
///
/// # Unsafety:
///
/// Should be called in pairs with [enable_iterrupts],
/// otherwise the counter will get out of sync and deadlocks will likely occur.
pub unsafe fn disable_interrupts() {
if !INTERRUPT_DISARM.load(Ordering::SeqCst) && ARE_CPU_LOCALS_INITIALIZED_YET.load(Ordering::SeqCst) && INTERRUPT_DISABLE_COUNTER.fetch_add(1, Ordering::SeqCst) == 0 {
unsafe { interrupts::cli() }
}
}

Expand Down Expand Up @@ -82,7 +96,7 @@ pub unsafe fn permanently_disable_interrupts() {
/// have been dropped.
///
/// Note that it is allowed to lock/unlock the locks in a different order. It uses
/// a global counter to disable/enable interrupts. View INTERRUPT_DISABLE_COUNTER
/// a global counter to disable/enable interrupts. View [INTERRUPT_DISABLE_COUNTER]
/// documentation for more information.
pub struct SpinLockIRQ<T: ?Sized> {
/// SpinLock we wrap.
Expand All @@ -106,7 +120,7 @@ impl<T> SpinLockIRQ<T> {
impl<T: ?Sized> SpinLockIRQ<T> {
/// Disables interrupts and locks the mutex.
pub fn lock(&self) -> SpinLockIRQGuard<'_, T> {
// Disable irqs
// Safety: Paired with enable_interrupts in the impl of Drop for SpinLockIrqGuard.
unsafe { disable_interrupts(); }

// TODO: Disable preemption.
Expand All @@ -119,7 +133,8 @@ impl<T: ?Sized> SpinLockIRQ<T> {

/// Disables interrupts and locks the mutex.
pub fn try_lock(&self) -> Option<SpinLockIRQGuard<'_, T>> {
// Disable irqs
// Safety: Paired with enable_interrupts in the impl of Drop for SpinLockIrq,
// or in case a guard is not created, later in this function.
unsafe { disable_interrupts(); }

// TODO: Disable preemption.
Expand All @@ -130,6 +145,7 @@ impl<T: ?Sized> SpinLockIRQ<T> {
Some(internalguard) => Some(SpinLockIRQGuard(ManuallyDrop::new(internalguard))),
None => {
// We couldn't lock. Restore irqs and return None
// Safety: Paired with disable_interrupts above in the case that a guard is not created.
unsafe { enable_interrupts(); }
None
}
Expand Down Expand Up @@ -165,7 +181,8 @@ impl<'a, T: ?Sized + 'a> Drop for SpinLockIRQGuard<'a, T> {
// unlock
unsafe { ManuallyDrop::drop(&mut self.0); }

// Restore irq
// Safety: paired with disable_interrupts in SpinLockIRQ::{lock, try_lock}, which returns
// this guard to re-enable interrupts when it is dropped.
unsafe { enable_interrupts(); }

// TODO: Enable preempt
Expand Down

0 comments on commit c62dc63

Please sign in to comment.