diff --git a/kernel/src/i386/interrupt_service_routines.rs b/kernel/src/i386/interrupt_service_routines.rs index 612476ab4..32e68c5bd 100644 --- a/kernel/src/i386/interrupt_service_routines.rs +++ b/kernel/src/i386/interrupt_service_routines.rs @@ -28,7 +28,6 @@ //! [syscall_interrupt_dispatcher]: self::interrupt_service_routines::syscall_interrupt_dispatcher use crate::i386::structures::idt::{PageFaultErrorCode, Idt}; -use crate::i386::instructions::interrupts::sti; use crate::mem::VirtualAddress; use crate::paging::kernel_memory::get_kernel_memory; use crate::i386::PrivilegeLevel; @@ -65,6 +64,10 @@ pub fn check_thread_killed() { if scheduler::get_current_thread().state.load(Ordering::SeqCst) == ThreadState::TerminationPending { let lock = SpinLockIRQ::new(()); loop { // in case of spurious wakeups + // TODO: Is it valid for interrupts to be active here? + // BODY: The interrupt wrappers call decrement_lock_count before calling this function, + // BODY: because it has the possibility to never return. + // BODY: Upon a spurious wake up, interrupts will end up being enabled. let _ = scheduler::unschedule(&lock, lock.lock()); } } @@ -333,7 +336,7 @@ macro_rules! trap_gate_asm { /// kernel_fault_strategy: panic, // what to do if we were in kernelspace when this interruption happened. /// user_fault_strategy: panic, // what to do if we were in userspace when this interruption happened, and feature "panic-on-exception" is enabled. /// handler_strategy: kill, // what to for this interrupt otherwise -/// interrupt_context: true // OPTIONAL: basically: are IRQs disabled. True by default, false used for syscalls. +/// type: exception // what "type" of interrupt this is ///); /// ``` /// @@ -346,6 +349,10 @@ macro_rules! trap_gate_asm { /// * `ignore`: don't do anything for this interrupt. /// * `kill`: kills the process in which this interrupt originated. /// * `my_handler_func`: calls `my_handler_func` to handle this interrupt. Useful if you want to override a standard strategy. +/// * The possible values for `type` are: +/// * `syscall`: interrupts are enabled, and usage of regular SpinLocks are allowed. +/// * `exception`: interrupts are disabled, but regular SpinLocks can be accessed if the exception comes from userspace. +/// * `interrupt`: interrupts and usage of regular SpinLocks are disabled. /// /// When providing a custom function as strategy, the function must be of signature: /// @@ -371,6 +378,12 @@ macro_rules! trap_gate_asm { /// /// extern "C" fn $wrapper_rust_fnname(userspace_context: &mut UserspaceHardwareContext) { /// +/// disable_interrupts() // +/// // type +/// if coming from Ring == 0 { // (here: exception) +/// disable_spinlocks() // +/// } // +/// /// if coming from Ring == 0 { /// /// kernel_panic(&PanicOrigin::KernelFault { // @@ -407,6 +420,12 @@ macro_rules! trap_gate_asm { /// ProcessStruct::kill_current_process(); // /// } /// +/// enable_interrupts() // +/// // type +/// if coming from Ring == 0 { // (here: exception) +/// enable_spinlocks() // +/// } // +/// /// // if we're returning to userspace, check we haven't been killed /// if comming from Ring == 3 { /// check_thread_killed(); @@ -524,26 +543,53 @@ macro_rules! generate_trap_gate_handler { } }; - // handle optional argument interrupt_context. - ( - name: $exception_name:literal, - has_errcode: $has_errcode:ident, - wrapper_asm_fnname: $wrapper_asm_fnname:ident, - wrapper_rust_fnname: $wrapper_rust_fnname:ident, - kernel_fault_strategy: $kernel_fault_strategy:ident, - user_fault_strategy: $user_fault_strategy:ident, - handler_strategy: $handler_strategy:ident - ) => { - generate_trap_gate_handler!( - name: $exception_name, - has_errcode: $has_errcode, - wrapper_asm_fnname: $wrapper_asm_fnname, - wrapper_rust_fnname: $wrapper_rust_fnname, - kernel_fault_strategy: $kernel_fault_strategy, - user_fault_strategy: $user_fault_strategy, - handler_strategy: $handler_strategy, - interrupt_context: true - ); + /* Counter manipulation */ + + // syscall + (__gen increment syscall; $_privlvl:path) => { + /* ignored -- interrupts are enabled, spinlocks are enabled */ + }; + (__gen decrement syscall; $_privlvl:path) => { + /* ignored -- interrupts are enabled, spinlocks are enabled */ + }; + + // exception + (__gen increment exception; $privlvl:path) => { + unsafe { + // Safety: Paired with decrement_lock_count in (__gen decrement exception; _) + crate::sync::spin_lock_irq::disable_interrupts(); + } + if $privlvl == PrivilegeLevel::Ring0 { + crate::i386::interrupt_service_routines::INSIDE_INTERRUPT_COUNT.fetch_add(1, core::sync::atomic::Ordering::SeqCst); + } + + }; + (__gen decrement exception; $privlvl:path) => { + unsafe { + // Safety: Paired with disable_interrupts in (__gen increment exception; _) + // Additionally, this is called shortly before an iret occurs, inside the asm wrapper. + crate::sync::spin_lock_irq::decrement_lock_count(); + } + if $privlvl == PrivilegeLevel::Ring0 { + crate::i386::interrupt_service_routines::INSIDE_INTERRUPT_COUNT.fetch_sub(1, core::sync::atomic::Ordering::SeqCst); + } + }; + + // interrupt + (__gen increment interrupt; $_privlvl:path) => { + unsafe { + // Safety: Paired with decrement_lock_count in (__gen decrement interrupt; _) + crate::sync::spin_lock_irq::disable_interrupts(); + } + crate::i386::interrupt_service_routines::INSIDE_INTERRUPT_COUNT.fetch_add(1, core::sync::atomic::Ordering::SeqCst); + }; + (__gen decrement interrupt; $_privlvl:path) => { + unsafe { + // Safety: Paired with disable_interrupts in (__gen increment interrupt; _) + // Additionally, this is called shortly before an iret occurs, inside the asm wrapper. + crate::sync::spin_lock_irq::decrement_lock_count(); + } + crate::i386::interrupt_service_routines::INSIDE_INTERRUPT_COUNT.fetch_sub(1, core::sync::atomic::Ordering::SeqCst); }; /* The full wrapper */ @@ -557,23 +603,21 @@ macro_rules! generate_trap_gate_handler { kernel_fault_strategy: $kernel_fault_strategy:ident, user_fault_strategy: $user_fault_strategy:ident, handler_strategy: $handler_strategy:ident, - interrupt_context: $interrupt_context:literal + type: $type:ident ) => { generate_trap_gate_handler!(__gen asm_wrapper; $wrapper_asm_fnname, $wrapper_rust_fnname, $has_errcode); /// Auto generated function. See [generate_trap_gate_handler]. extern "C" fn $wrapper_rust_fnname(userspace_context: &mut UserspaceHardwareContext) { - use crate::i386::structures::gdt::SegmentSelector; - use crate::i386::interrupt_service_routines::INSIDE_INTERRUPT_COUNT; - use core::sync::atomic::Ordering; - if $interrupt_context { - let _ = INSIDE_INTERRUPT_COUNT.fetch_add(1, Ordering::SeqCst); - } + let privilege_level = SegmentSelector(userspace_context.cs as u16).rpl(); - if let PrivilegeLevel::Ring0 = SegmentSelector(userspace_context.cs as u16).rpl() { + // mark interrupts and spinlocks disabled as apporpriate + generate_trap_gate_handler!(__gen increment $type; privilege_level); + + if let PrivilegeLevel::Ring0 = privilege_level { generate_trap_gate_handler!(__gen kernel_fault; name: $exception_name, userspace_context, errcode: $has_errcode, strategy: $kernel_fault_strategy); } else { // we come from userspace, backup the hardware context in the thread struct @@ -590,13 +634,12 @@ macro_rules! generate_trap_gate_handler { // call the handler generate_trap_gate_handler!(__gen handler; name: $exception_name, userspace_context, errcode: $has_errcode, strategy: $handler_strategy); - if $interrupt_context { - let _ = INSIDE_INTERRUPT_COUNT.fetch_sub(1, Ordering::SeqCst); - } + // re-enable interrupts and spinlocks as appropriate + generate_trap_gate_handler!(__gen decrement $type; privilege_level); // if we're returning to userspace, check we haven't been killed if let PrivilegeLevel::Ring3 = SegmentSelector(userspace_context.cs as u16).rpl() { - check_thread_killed(); + check_thread_killed() } } }; @@ -612,7 +655,8 @@ generate_trap_gate_handler!(name: "Divide Error Exception", wrapper_rust_fnname: divide_by_zero_exception_rust_wrapper, kernel_fault_strategy: panic, user_fault_strategy: panic, - handler_strategy: kill + handler_strategy: kill, + type: exception ); generate_trap_gate_handler!(name: "Debug Exception", @@ -621,7 +665,8 @@ generate_trap_gate_handler!(name: "Debug Exception", wrapper_rust_fnname: debug_exception_rust_wrapper, kernel_fault_strategy: panic, user_fault_strategy: panic, - handler_strategy: panic + handler_strategy: panic, + type: exception ); generate_trap_gate_handler!(name: "An unexpected non-maskable (but still kinda maskable) interrupt occurred", @@ -630,7 +675,8 @@ generate_trap_gate_handler!(name: "An unexpected non-maskable (but still kinda m wrapper_rust_fnname: nmi_exception_rust_wrapper, kernel_fault_strategy: panic, user_fault_strategy: panic, - handler_strategy: panic + handler_strategy: panic, + type: exception ); generate_trap_gate_handler!(name: "Breakpoint Exception", @@ -639,7 +685,8 @@ generate_trap_gate_handler!(name: "Breakpoint Exception", wrapper_rust_fnname: breakpoint_exception_rust_wrapper, kernel_fault_strategy: ignore, user_fault_strategy: ignore, - handler_strategy: panic + handler_strategy: panic, + type: exception ); generate_trap_gate_handler!(name: "Overflow Exception", @@ -648,7 +695,8 @@ generate_trap_gate_handler!(name: "Overflow Exception", wrapper_rust_fnname: overflow_exception_rust_wrapper, kernel_fault_strategy: panic, user_fault_strategy: panic, - handler_strategy: kill + handler_strategy: kill, + type: exception ); generate_trap_gate_handler!(name: "BOUND Range Exceeded Exception", @@ -657,7 +705,8 @@ generate_trap_gate_handler!(name: "BOUND Range Exceeded Exception", wrapper_rust_fnname: bound_range_exceeded_exception_rust_wrapper, kernel_fault_strategy: panic, user_fault_strategy: panic, - handler_strategy: kill + handler_strategy: kill, + type: exception ); generate_trap_gate_handler!(name: "Invalid opcode Exception", @@ -666,7 +715,8 @@ generate_trap_gate_handler!(name: "Invalid opcode Exception", wrapper_rust_fnname: invalid_opcode_exception_rust_wrapper, kernel_fault_strategy: panic, user_fault_strategy: panic, - handler_strategy: kill + handler_strategy: kill, + type: exception ); generate_trap_gate_handler!(name: "Device Not Available Exception", @@ -675,7 +725,8 @@ generate_trap_gate_handler!(name: "Device Not Available Exception", wrapper_rust_fnname: device_not_available_exception_rust_wrapper, kernel_fault_strategy: panic, user_fault_strategy: panic, - handler_strategy: kill + handler_strategy: kill, + type: exception ); /// Double fault handler. Panics the kernel unconditionally. @@ -691,7 +742,8 @@ generate_trap_gate_handler!(name: "Invalid TSS Exception", wrapper_rust_fnname: invalid_tss_exception_rust_wrapper, kernel_fault_strategy: panic, user_fault_strategy: panic, - handler_strategy: panic + handler_strategy: panic, + type: exception ); generate_trap_gate_handler!(name: "Segment Not Present Exception", @@ -700,7 +752,8 @@ generate_trap_gate_handler!(name: "Segment Not Present Exception", wrapper_rust_fnname: segment_not_present_exception_rust_wrapper, kernel_fault_strategy: panic, user_fault_strategy: panic, - handler_strategy: kill + handler_strategy: kill, + type: exception ); generate_trap_gate_handler!(name: "Stack Fault Exception", @@ -709,7 +762,8 @@ generate_trap_gate_handler!(name: "Stack Fault Exception", wrapper_rust_fnname: stack_fault_exception_rust_wrapper, kernel_fault_strategy: panic, user_fault_strategy: panic, - handler_strategy: kill + handler_strategy: kill, + type: exception ); generate_trap_gate_handler!(name: "General Protection Fault Exception", @@ -718,7 +772,8 @@ generate_trap_gate_handler!(name: "General Protection Fault Exception", wrapper_rust_fnname: general_protection_fault_exception_rust_wrapper, kernel_fault_strategy: panic, user_fault_strategy: panic, - handler_strategy: kill + handler_strategy: kill, + type: exception ); generate_trap_gate_handler!(name: "Page Fault Exception", @@ -727,7 +782,8 @@ generate_trap_gate_handler!(name: "Page Fault Exception", wrapper_rust_fnname: page_fault_exception_rust_wrapper, kernel_fault_strategy: kernel_page_fault_panic, user_fault_strategy: user_page_fault_panic, - handler_strategy: user_page_fault_handler + handler_strategy: user_page_fault_handler, + type: exception ); /// Overriding the default panic strategy so we can display cr2 @@ -772,7 +828,8 @@ generate_trap_gate_handler!(name: "x87 FPU floating-point error", wrapper_rust_fnname: x87_floating_point_exception_rust_wrapper, kernel_fault_strategy: panic, user_fault_strategy: panic, - handler_strategy: kill + handler_strategy: kill, + type: exception ); generate_trap_gate_handler!(name: "Alignment Check Exception", @@ -781,7 +838,8 @@ generate_trap_gate_handler!(name: "Alignment Check Exception", wrapper_rust_fnname: alignment_check_exception_rust_wrapper, kernel_fault_strategy: panic, user_fault_strategy: panic, - handler_strategy: kill + handler_strategy: kill, + type: exception ); generate_trap_gate_handler!(name: "Machine-Check Exception", @@ -790,7 +848,8 @@ generate_trap_gate_handler!(name: "Machine-Check Exception", wrapper_rust_fnname: machinee_check_exception_rust_wrapper, kernel_fault_strategy: panic, user_fault_strategy: panic, - handler_strategy: panic + handler_strategy: panic, + type: exception ); generate_trap_gate_handler!(name: "SIMD Floating-Point Exception", @@ -799,7 +858,8 @@ generate_trap_gate_handler!(name: "SIMD Floating-Point Exception", wrapper_rust_fnname: simd_floating_point_exception_rust_wrapper, kernel_fault_strategy: panic, user_fault_strategy: panic, - handler_strategy: kill + handler_strategy: kill, + type: exception ); generate_trap_gate_handler!(name: "Virtualization Exception", @@ -808,7 +868,8 @@ generate_trap_gate_handler!(name: "Virtualization Exception", wrapper_rust_fnname: virtualization_exception_rust_wrapper, kernel_fault_strategy: panic, user_fault_strategy: panic, - handler_strategy: kill + handler_strategy: kill, + type: exception ); generate_trap_gate_handler!(name: "Security Exception", @@ -817,7 +878,8 @@ generate_trap_gate_handler!(name: "Security Exception", wrapper_rust_fnname: security_exception_rust_wrapper, kernel_fault_strategy: panic, user_fault_strategy: panic, - handler_strategy: panic + handler_strategy: panic, + type: exception ); generate_trap_gate_handler!(name: "Syscall Interrupt", @@ -827,7 +889,7 @@ generate_trap_gate_handler!(name: "Syscall Interrupt", kernel_fault_strategy: panic, // you aren't expected to syscall from the kernel user_fault_strategy: ignore, // don't worry it's fine ;) handler_strategy: syscall_interrupt_dispatcher, - interrupt_context: false + type: syscall ); impl UserspaceHardwareContext { @@ -1041,7 +1103,8 @@ macro_rules! irq_handler { wrapper_rust_fnname: $rust_wrapper_name, kernel_fault_strategy: ignore, // irqs can happen while we're in kernel mode, don't worry, it's fine ;) user_fault_strategy: ignore, // don't worry it's fine ;) - handler_strategy: $handler_name + handler_strategy: $handler_name, + type: interrupt ); )* @@ -1134,5 +1197,6 @@ pub unsafe fn init() { (*idt).load(); } - sti(); + // Paired with disable_interrupts in kernel common_start. + crate::sync::spin_lock_irq::enable_interrupts(); } diff --git a/kernel/src/i386/process_switch.rs b/kernel/src/i386/process_switch.rs index e47c603a6..c75434e28 100644 --- a/kernel/src/i386/process_switch.rs +++ b/kernel/src/i386/process_switch.rs @@ -347,6 +347,17 @@ fn jump_to_entrypoint(ep: usize, userspace_stack_ptr: usize, arg1: usize, arg2: const_assert_eq!((GdtIndex::UTlsRegion as u16) << 3 | 0b11, 0x3B); const_assert_eq!((GdtIndex::UTlsElf as u16) << 3 | 0b11, 0x43); const_assert_eq!((GdtIndex::UStack as u16) << 3 | 0b11, 0x4B); + + + unsafe { + // Safety: This is paired with an undropped SpinLockIrq (interrupt_manager) in scheduler::internal_schedule. + // (Normally, this SpinLockIrq evens out with an identical one in the same function in the new process, + // however, when a new process starts this object is not present, therefore we must manually decrement + // the counter.) + // Additionally, an iret occurs later in this function, enabling interrupts. + crate::sync::spin_lock_irq::decrement_lock_count(); + } + unsafe { asm!(" mov ax,0x33 // ds, es <- UData, Ring 3 diff --git a/kernel/src/i386/structures/idt.rs b/kernel/src/i386/structures/idt.rs index e3f839a1f..7077345bd 100644 --- a/kernel/src/i386/structures/idt.rs +++ b/kernel/src/i386/structures/idt.rs @@ -584,7 +584,7 @@ impl IdtEntry { /// Set a task gate for the IDT entry and sets the present bit. /// - /// # Unsafety + /// # Safety /// /// `tss_selector` must point to a valid TSS, which will remain present. /// The TSS' `eip` should point to the handler function. diff --git a/kernel/src/main.rs b/kernel/src/main.rs index acf67bd9a..a8c6e9369 100644 --- a/kernel/src/main.rs +++ b/kernel/src/main.rs @@ -250,6 +250,12 @@ pub extern "C" fn common_start(multiboot_info_addr: usize) -> ! { info!("Allocating cpu_locals"); init_cpu_locals(1); + // Now that cpu_locals are present, ensure that SpinLockIRQs + // are aware that interrupts are disabled. + unsafe { + // Safety: paired with enable_interrupts in interrupt_service_routines::init + sync::spin_lock_irq::disable_interrupts(); + } info!("Enabling interrupts"); unsafe { i386::interrupt_service_routines::init(); } diff --git a/kernel/src/paging/hierarchical_table.rs b/kernel/src/paging/hierarchical_table.rs index 48286cbd5..82931b09e 100644 --- a/kernel/src/paging/hierarchical_table.rs +++ b/kernel/src/paging/hierarchical_table.rs @@ -648,7 +648,7 @@ pub trait InactiveHierarchyTrait : TableHierarchy { /// These frames were marked as occupied when initialising the `FrameAllocator`, /// we're making them available again. /// - /// # Unsafety + /// # Safety /// /// Having multiple InactiveHierarchy pointing to the same table hierarchy is unsafe. /// Should not be used for any other purpose, it is only guaranteed to be safe to drop. diff --git a/kernel/src/process.rs b/kernel/src/process.rs index a3272d2d4..9b59d0e10 100644 --- a/kernel/src/process.rs +++ b/kernel/src/process.rs @@ -178,6 +178,7 @@ pub struct ThreadStruct { /// /// This is used when signaling that this thread as exited. state_event: ThreadStateEvent + } /// A handle to a userspace-accessible resource. diff --git a/kernel/src/process/thread_local_storage.rs b/kernel/src/process/thread_local_storage.rs index 78b46bd50..b38760ad7 100644 --- a/kernel/src/process/thread_local_storage.rs +++ b/kernel/src/process/thread_local_storage.rs @@ -165,7 +165,7 @@ impl TLSManager { /// Mark this TLS as free, so it can be re-used by future spawned thread. /// - /// # Unsafety + /// # Safety /// /// The TLS will be reassigned, so it must never be used again after calling this function. /// diff --git a/kernel/src/scheduler.rs b/kernel/src/scheduler.rs index b8c4c353e..8998c16bb 100644 --- a/kernel/src/scheduler.rs +++ b/kernel/src/scheduler.rs @@ -71,7 +71,7 @@ pub fn get_current_process() -> Arc { /// The passed function will be executed after setting the CURRENT_THREAD, but before /// setting it back to the RUNNING state. /// -/// # Unsafety +/// # Safety /// /// Interrupts must be disabled when calling this function. It will mutably borrow [`CURRENT_THREAD`], /// so we can't have interrupts on top of that which try to access it while it is borrowed mutably by us, @@ -257,6 +257,12 @@ where // TODO: Ensure the global counter is <= 1 let interrupt_manager = SpinLockIRQ::new(()); + + // This is complicated: interrupt_lock is needed to disable interrupts while + // performing a process switch: however, it is not dropped until after a process_switch + // *back* to this process. Usually it is paired with the SpinLockIrq being dropped in + // 'process_b', but in the case of a brand new process, it is paired with a manual decrement + // before performing an iret to userspace. let mut interrupt_lock = interrupt_manager.lock(); loop { @@ -333,7 +339,7 @@ where /// The passed function should take care to change the protection level, and ensure it cleans up all /// the registers before calling the EIP, in order to avoid leaking information to userspace. /// -/// # Unsafety: +/// # Safety: /// /// Interrupts must be off when calling this function. It will set [`CURRENT_THREAD`], and then /// turn them on, as we are running a new thread, no SpinLockIRQ is held. diff --git a/kernel/src/sync/spin_lock.rs b/kernel/src/sync/spin_lock.rs index 28f707975..0b5f9b4db 100644 --- a/kernel/src/sync/spin_lock.rs +++ b/kernel/src/sync/spin_lock.rs @@ -148,7 +148,10 @@ impl SpinLock { /// a guard within Some. pub fn try_lock(&self) -> Option> { use core::sync::atomic::Ordering; - if crate::i386::interrupt_service_routines::INSIDE_INTERRUPT_COUNT.load(Ordering::SeqCst) != 0 { + use crate::cpu_locals::ARE_CPU_LOCALS_INITIALIZED_YET; + use crate::i386::interrupt_service_routines::INSIDE_INTERRUPT_COUNT; + use super::INTERRUPT_DISARM; + if !INTERRUPT_DISARM.load(Ordering::SeqCst) && ARE_CPU_LOCALS_INITIALIZED_YET.load(Ordering::SeqCst) && INSIDE_INTERRUPT_COUNT.load(Ordering::SeqCst) != 0 { panic!("\ You have attempted to lock a spinlock in interrupt context. \ This is most likely a design flaw. \ diff --git a/kernel/src/sync/spin_lock_irq.rs b/kernel/src/sync/spin_lock_irq.rs index d97788e97..2858ae031 100644 --- a/kernel/src/sync/spin_lock_irq.rs +++ b/kernel/src/sync/spin_lock_irq.rs @@ -9,8 +9,68 @@ use spin::{Mutex as SpinLock, MutexGuard as SpinLockGuard}; use core::fmt; use core::mem::ManuallyDrop; use core::ops::{Deref, DerefMut}; -use core::sync::atomic::Ordering; +use core::sync::atomic::{AtomicU8, Ordering}; use super::INTERRUPT_DISARM; +use crate::cpu_locals::ARE_CPU_LOCALS_INITIALIZED_YET; + +/// Interrupt disable counter. +/// +/// # Description +/// +/// Allows recursively disabling interrupts while keeping a sane behavior. +/// Should only be manipulated through [enable_interrupts], +/// [disable_interrupts], and [decrement_lock_count]. +/// +/// Used by the SpinLockIRQ to implement recursive irqsave logic. +#[thread_local] +static INTERRUPT_DISABLE_COUNTER: AtomicU8 = AtomicU8::new(0); + +/// Decrement the interrupt disable counter. +/// +/// Look at documentation for [INTERRUPT_DISABLE_COUNTER] to know more. +/// +/// # Safety +/// +/// Should be called in pairs with [disable_interrupts] or [decrement_lock_count], +/// otherwise the counter will get out of sync and deadlocks will likely occur. +pub unsafe fn enable_interrupts() { + if !INTERRUPT_DISARM.load(Ordering::SeqCst) && ARE_CPU_LOCALS_INITIALIZED_YET.load(Ordering::SeqCst) && INTERRUPT_DISABLE_COUNTER.fetch_sub(1, Ordering::SeqCst) == 1 { + unsafe { interrupts::sti() } + } +} + +/// Decrement the interrupt disable counter without re-enabling interrupts. +/// +/// Used to decrement counter while keeping interrupts disabled before an iret. +/// Look at documentation for [INTERRUPT_DISABLE_COUNTER] to know more. +/// +/// # Safety +/// +/// Should be called in pairs with [enable_interrupts], +/// otherwise the counter will get out of sync and deadlocks will likely occur. +/// +/// Additionally, this should only be used when interrupts are about to be enabled anyway, +/// such as by an iret to userspace. +pub unsafe fn decrement_lock_count() { + if !INTERRUPT_DISARM.load(Ordering::SeqCst) && ARE_CPU_LOCALS_INITIALIZED_YET.load(Ordering::SeqCst) { + let _ = INTERRUPT_DISABLE_COUNTER.fetch_sub(1, Ordering::SeqCst); + } +} + +/// Increment the interrupt disable counter. +/// +/// Look at documentation for [INTERRUPT_DISABLE_COUNTER] to know more. +/// +/// # Safety +/// +/// Should be called in pairs with [enable_interrupts], +/// otherwise the counter will get out of sync and deadlocks will likely occur. +pub unsafe fn disable_interrupts() { + if !INTERRUPT_DISARM.load(Ordering::SeqCst) && ARE_CPU_LOCALS_INITIALIZED_YET.load(Ordering::SeqCst) && INTERRUPT_DISABLE_COUNTER.fetch_add(1, Ordering::SeqCst) == 0 { + unsafe { interrupts::cli() } + } +} + /// Permanently disables the interrupts. Forever. /// @@ -32,27 +92,12 @@ pub unsafe fn permanently_disable_interrupts() { /// - `lock` behaves like a `spinlock_irqsave`. It returns a guard. /// - Dropping the guard behaves like `spinlock_irqrestore` /// -/// This means that locking a spinlock disables interrupts until all spinlock -/// guards have been dropped. -/// -/// A note on reordering: reordering lock drops is prohibited and doing so will -/// result in UB. -// -// TODO: Find sane design for SpinLockIRQ safety -// BODY: Currently, SpinLockIRQ API is unsound. If the guards are dropped in -// BODY: the wrong order, it may cause IF to be reset too early. -// BODY: -// BODY: Ideally, we would need a way to prevent the guard variable to be -// BODY: reassigned. AKA: prevent moving. Note that this is different from what -// BODY: the Pin API solves. The Pin API is about locking a variable in one -// BODY: memory location, but its binding may still be moved and dropped. -// BODY: Unfortunately, Rust does not have a way to express that a value cannot -// BODY: be reassigned. -// BODY: -// BODY: Another possibility would be to switch to a callback API. This would -// BODY: solve the problem, but the scheduler would be unable to consume such -// BODY: locks. Maybe we could have an unsafe "scheduler_relock" function that -// BODY: may only be called from the scheduler? +/// This means that locking a spinlock disables interrupts until all spinlocks +/// have been dropped. +/// +/// Note that it is allowed to lock/unlock the locks in a different order. It uses +/// a global counter to disable/enable interrupts. View [INTERRUPT_DISABLE_COUNTER] +/// documentation for more information. pub struct SpinLockIRQ { /// SpinLock we wrap. internal: SpinLock @@ -74,44 +119,39 @@ impl SpinLockIRQ { impl SpinLockIRQ { /// Disables interrupts and locks the mutex. - pub fn lock(&self) -> SpinLockIRQGuard { - if INTERRUPT_DISARM.load(Ordering::SeqCst) { - let internalguard = self.internal.lock(); - SpinLockIRQGuard(ManuallyDrop::new(internalguard), false) - } else { - // Save current interrupt state. - let saved_intpt_flag = interrupts::are_enabled(); - - // Disable interruptions - unsafe { interrupts::cli(); } - - let internalguard = self.internal.lock(); - SpinLockIRQGuard(ManuallyDrop::new(internalguard), saved_intpt_flag) + pub fn lock(&self) -> SpinLockIRQGuard<'_, T> { + unsafe { + // Safety: Paired with enable_interrupts in the impl of Drop for SpinLockIrqGuard. + disable_interrupts(); } + + // TODO: Disable preemption. + // TODO: Spin acquire + + // lock + let internalguard = self.internal.lock(); + SpinLockIRQGuard(ManuallyDrop::new(internalguard)) } /// Disables interrupts and locks the mutex. - pub fn try_lock(&self) -> Option> { - if INTERRUPT_DISARM.load(Ordering::SeqCst) { - self.internal.try_lock() - .map(|v| SpinLockIRQGuard(ManuallyDrop::new(v), false)) - } else { - // Save current interrupt state. - let saved_intpt_flag = interrupts::are_enabled(); - - // Disable interruptions - unsafe { interrupts::cli(); } - - // Lock spinlock - let internalguard = self.internal.try_lock(); - - if let Some(internalguard) = internalguard { - // if lock is successful, return guard. - Some(SpinLockIRQGuard(ManuallyDrop::new(internalguard), saved_intpt_flag)) - } else { - // Else, restore interrupt state - if saved_intpt_flag { - unsafe { interrupts::sti(); } + pub fn try_lock(&self) -> Option> { + unsafe { + // Safety: Paired with enable_interrupts in the impl of Drop for SpinLockIrq, + // or in case a guard is not created, later in this function. + disable_interrupts(); + } + + // TODO: Disable preemption. + // TODO: Spin acquire + + // lock + match self.internal.try_lock() { + Some(internalguard) => Some(SpinLockIRQGuard(ManuallyDrop::new(internalguard))), + None => { + // We couldn't lock. Restore irqs and return None + unsafe { + // Safety: Paired with disable_interrupts above in the case that a guard is not created. + enable_interrupts(); } None } @@ -126,19 +166,20 @@ impl SpinLockIRQ { impl fmt::Debug for SpinLockIRQ { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - if let Some(v) = self.try_lock() { - f.debug_struct("SpinLockIRQ") - .field("data", &v) - .finish() - } else { - write!(f, "SpinLockIRQ {{ }}") + match self.try_lock() { + Some(d) => { + write!(f, "SpinLockIRQ {{ data: ")?; + d.fmt(f)?; + write!(f, "}}") + }, + None => write!(f, "SpinLockIRQ {{ }}") } } } /// The SpinLockIrq lock guard. #[derive(Debug)] -pub struct SpinLockIRQGuard<'a, T: ?Sized>(ManuallyDrop>, bool); +pub struct SpinLockIRQGuard<'a, T: ?Sized>(ManuallyDrop>); impl<'a, T: ?Sized + 'a> Drop for SpinLockIRQGuard<'a, T> { fn drop(&mut self) { @@ -146,9 +187,10 @@ impl<'a, T: ?Sized + 'a> Drop for SpinLockIRQGuard<'a, T> { // unlock unsafe { ManuallyDrop::drop(&mut self.0); } - // Restore irq - if self.1 { - unsafe { interrupts::sti(); } + unsafe { + // Safety: paired with disable_interrupts in SpinLockIRQ::{lock, try_lock}, which returns + // this guard to re-enable interrupts when it is dropped. + enable_interrupts(); } // TODO: Enable preempt