From 20ad4b808c29f2d0ef067159e1a3f0e29d305bab Mon Sep 17 00:00:00 2001 From: Kitlith Date: Sat, 26 Oct 2019 19:41:51 -0700 Subject: [PATCH 01/13] Revert "Go back to backup/restore SpinLockIRQ system" This reverts commit e850a8908338a52d5f8e185a5bd914f669b615e1. --- kernel/src/process.rs | 16 +++- kernel/src/sync/spin_lock_irq.rs | 134 ++++++++++++++++--------------- 2 files changed, 85 insertions(+), 65 deletions(-) diff --git a/kernel/src/process.rs b/kernel/src/process.rs index a3272d2d4..8a71ba482 100644 --- a/kernel/src/process.rs +++ b/kernel/src/process.rs @@ -177,7 +177,19 @@ pub struct ThreadStruct { /// Thread state event /// /// This is used when signaling that this thread as exited. - state_event: ThreadStateEvent + state_event: ThreadStateEvent, + + /// Interrupt disable counter. + /// + /// # Description + /// + /// Allows recursively disabling interrupts while keeping a sane behavior. + /// Should only be manipulated through sync::enable_interrupts and + /// sync::disable_interrupts. + /// + /// Used by the SpinLockIRQ to implement recursive irqsave logic. + pub int_disable_counter: AtomicUsize, + } /// A handle to a userspace-accessible resource. @@ -793,6 +805,7 @@ impl ThreadStruct { state, kstack, hwcontext : empty_hwcontext, + int_disable_counter: AtomicUsize::new(0), process: Arc::clone(belonging_process), tls_region: tls, tls_elf: SpinLock::new(VirtualAddress(0x00000000)), @@ -891,6 +904,7 @@ impl ThreadStruct { state, kstack, hwcontext, + int_disable_counter: AtomicUsize::new(0), process: Arc::clone(&process), tls_region: tls, tls_elf: SpinLock::new(VirtualAddress(0x00000000)), diff --git a/kernel/src/sync/spin_lock_irq.rs b/kernel/src/sync/spin_lock_irq.rs index d97788e97..379adea1e 100644 --- a/kernel/src/sync/spin_lock_irq.rs +++ b/kernel/src/sync/spin_lock_irq.rs @@ -11,6 +11,41 @@ use core::mem::ManuallyDrop; use core::ops::{Deref, DerefMut}; use core::sync::atomic::Ordering; use super::INTERRUPT_DISARM; +use crate::scheduler; + + +/// Decrement the interrupt disable counter. +/// +/// Look at documentation for ProcessStruct::pint_disable_counter to know more. +fn enable_interrupts() { + if !INTERRUPT_DISARM.load(Ordering::SeqCst) { + if let Some(thread) = scheduler::try_get_current_thread() { + if thread.int_disable_counter.fetch_sub(1, Ordering::SeqCst) == 1 { + unsafe { interrupts::sti() } + } + } else { + // TODO: Safety??? + // don't do anything. + } + } +} + +/// Increment the interrupt disable counter. +/// +/// Look at documentation for INTERRUPT_DISABLE_COUNTER to know more. +fn disable_interrupts() { + if !INTERRUPT_DISARM.load(Ordering::SeqCst) { + if let Some(thread) = scheduler::try_get_current_thread() { + if thread.int_disable_counter.fetch_add(1, Ordering::SeqCst) == 0 { + unsafe { interrupts::cli() } + } + } else { + // TODO: Safety??? + // don't do anything. + } + } +} + /// Permanently disables the interrupts. Forever. /// @@ -32,27 +67,12 @@ pub unsafe fn permanently_disable_interrupts() { /// - `lock` behaves like a `spinlock_irqsave`. It returns a guard. /// - Dropping the guard behaves like `spinlock_irqrestore` /// -/// This means that locking a spinlock disables interrupts until all spinlock -/// guards have been dropped. +/// This means that locking a spinlock disables interrupts until all spinlocks +/// have been dropped. /// -/// A note on reordering: reordering lock drops is prohibited and doing so will -/// result in UB. -// -// TODO: Find sane design for SpinLockIRQ safety -// BODY: Currently, SpinLockIRQ API is unsound. If the guards are dropped in -// BODY: the wrong order, it may cause IF to be reset too early. -// BODY: -// BODY: Ideally, we would need a way to prevent the guard variable to be -// BODY: reassigned. AKA: prevent moving. Note that this is different from what -// BODY: the Pin API solves. The Pin API is about locking a variable in one -// BODY: memory location, but its binding may still be moved and dropped. -// BODY: Unfortunately, Rust does not have a way to express that a value cannot -// BODY: be reassigned. -// BODY: -// BODY: Another possibility would be to switch to a callback API. This would -// BODY: solve the problem, but the scheduler would be unable to consume such -// BODY: locks. Maybe we could have an unsafe "scheduler_relock" function that -// BODY: may only be called from the scheduler? +/// Note that it is allowed to lock/unlock the locks in a different order. It uses +/// a global counter to disable/enable interrupts. View INTERRUPT_DISABLE_COUNTER +/// documentation for more information. pub struct SpinLockIRQ { /// SpinLock we wrap. internal: SpinLock @@ -74,45 +94,32 @@ impl SpinLockIRQ { impl SpinLockIRQ { /// Disables interrupts and locks the mutex. - pub fn lock(&self) -> SpinLockIRQGuard { - if INTERRUPT_DISARM.load(Ordering::SeqCst) { - let internalguard = self.internal.lock(); - SpinLockIRQGuard(ManuallyDrop::new(internalguard), false) - } else { - // Save current interrupt state. - let saved_intpt_flag = interrupts::are_enabled(); + pub fn lock(&self) -> SpinLockIRQGuard<'_, T> { + // Disable irqs + unsafe { disable_interrupts(); } - // Disable interruptions - unsafe { interrupts::cli(); } + // TODO: Disable preemption. + // TODO: Spin acquire - let internalguard = self.internal.lock(); - SpinLockIRQGuard(ManuallyDrop::new(internalguard), saved_intpt_flag) - } + // lock + let internalguard = self.internal.lock(); + SpinLockIRQGuard(ManuallyDrop::new(internalguard)) } /// Disables interrupts and locks the mutex. - pub fn try_lock(&self) -> Option> { - if INTERRUPT_DISARM.load(Ordering::SeqCst) { - self.internal.try_lock() - .map(|v| SpinLockIRQGuard(ManuallyDrop::new(v), false)) - } else { - // Save current interrupt state. - let saved_intpt_flag = interrupts::are_enabled(); - - // Disable interruptions - unsafe { interrupts::cli(); } - - // Lock spinlock - let internalguard = self.internal.try_lock(); - - if let Some(internalguard) = internalguard { - // if lock is successful, return guard. - Some(SpinLockIRQGuard(ManuallyDrop::new(internalguard), saved_intpt_flag)) - } else { - // Else, restore interrupt state - if saved_intpt_flag { - unsafe { interrupts::sti(); } - } + pub fn try_lock(&self) -> Option> { + // Disable irqs + unsafe { disable_interrupts(); } + + // TODO: Disable preemption. + // TODO: Spin acquire + + // lock + match self.internal.try_lock() { + Some(internalguard) => Some(SpinLockIRQGuard(ManuallyDrop::new(internalguard))), + None => { + // We couldn't lock. Restore irqs and return None + unsafe { enable_interrupts(); } None } } @@ -126,19 +133,20 @@ impl SpinLockIRQ { impl fmt::Debug for SpinLockIRQ { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - if let Some(v) = self.try_lock() { - f.debug_struct("SpinLockIRQ") - .field("data", &v) - .finish() - } else { - write!(f, "SpinLockIRQ {{ }}") + match self.try_lock() { + Some(d) => { + write!(f, "SpinLockIRQ {{ data: ")?; + d.fmt(f)?; + write!(f, "}}") + }, + None => write!(f, "SpinLockIRQ {{ }}") } } } /// The SpinLockIrq lock guard. #[derive(Debug)] -pub struct SpinLockIRQGuard<'a, T: ?Sized>(ManuallyDrop>, bool); +pub struct SpinLockIRQGuard<'a, T: ?Sized>(ManuallyDrop>); impl<'a, T: ?Sized + 'a> Drop for SpinLockIRQGuard<'a, T> { fn drop(&mut self) { @@ -147,9 +155,7 @@ impl<'a, T: ?Sized + 'a> Drop for SpinLockIRQGuard<'a, T> { unsafe { ManuallyDrop::drop(&mut self.0); } // Restore irq - if self.1 { - unsafe { interrupts::sti(); } - } + unsafe { enable_interrupts(); } // TODO: Enable preempt } From 6efa7eaa6ac1c5723f678dc36c036533e4108187 Mon Sep 17 00:00:00 2001 From: Kitlith Date: Sun, 27 Oct 2019 12:16:10 -0700 Subject: [PATCH 02/13] Don't re-enable interrupts when in interrupt context. --- kernel/src/sync/spin_lock_irq.rs | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/kernel/src/sync/spin_lock_irq.rs b/kernel/src/sync/spin_lock_irq.rs index 379adea1e..f6ed4be38 100644 --- a/kernel/src/sync/spin_lock_irq.rs +++ b/kernel/src/sync/spin_lock_irq.rs @@ -18,10 +18,13 @@ use crate::scheduler; /// /// Look at documentation for ProcessStruct::pint_disable_counter to know more. fn enable_interrupts() { + use crate::i386::interrupt_service_routines::INSIDE_INTERRUPT_COUNT; if !INTERRUPT_DISARM.load(Ordering::SeqCst) { if let Some(thread) = scheduler::try_get_current_thread() { if thread.int_disable_counter.fetch_sub(1, Ordering::SeqCst) == 1 { - unsafe { interrupts::sti() } + if INSIDE_INTERRUPT_COUNT.load(Ordering::SeqCst) == 0 { + unsafe { interrupts::sti() } + } } } else { // TODO: Safety??? From de689010bed3f05b3fbe7248e792077e2a247354 Mon Sep 17 00:00:00 2001 From: Kitlith Date: Sun, 27 Oct 2019 16:04:36 -0700 Subject: [PATCH 03/13] Increment and decrement INTERRUPT_DISABLE_COUNTER in irqs. --- kernel/src/i386/interrupt_service_routines.rs | 3 ++ kernel/src/process.rs | 15 +----- kernel/src/sync/spin_lock_irq.rs | 49 +++++++++++-------- 3 files changed, 32 insertions(+), 35 deletions(-) diff --git a/kernel/src/i386/interrupt_service_routines.rs b/kernel/src/i386/interrupt_service_routines.rs index 612476ab4..c00342069 100644 --- a/kernel/src/i386/interrupt_service_routines.rs +++ b/kernel/src/i386/interrupt_service_routines.rs @@ -567,10 +567,12 @@ macro_rules! generate_trap_gate_handler { use crate::i386::structures::gdt::SegmentSelector; use crate::i386::interrupt_service_routines::INSIDE_INTERRUPT_COUNT; + use crate::sync::spin_lock_irq::{disable_interrupts, decrement_lock_count}; use core::sync::atomic::Ordering; if $interrupt_context { let _ = INSIDE_INTERRUPT_COUNT.fetch_add(1, Ordering::SeqCst); + disable_interrupts(); } if let PrivilegeLevel::Ring0 = SegmentSelector(userspace_context.cs as u16).rpl() { @@ -592,6 +594,7 @@ macro_rules! generate_trap_gate_handler { if $interrupt_context { let _ = INSIDE_INTERRUPT_COUNT.fetch_sub(1, Ordering::SeqCst); + unsafe { decrement_lock_count(); } } // if we're returning to userspace, check we haven't been killed diff --git a/kernel/src/process.rs b/kernel/src/process.rs index 8a71ba482..9b59d0e10 100644 --- a/kernel/src/process.rs +++ b/kernel/src/process.rs @@ -177,18 +177,7 @@ pub struct ThreadStruct { /// Thread state event /// /// This is used when signaling that this thread as exited. - state_event: ThreadStateEvent, - - /// Interrupt disable counter. - /// - /// # Description - /// - /// Allows recursively disabling interrupts while keeping a sane behavior. - /// Should only be manipulated through sync::enable_interrupts and - /// sync::disable_interrupts. - /// - /// Used by the SpinLockIRQ to implement recursive irqsave logic. - pub int_disable_counter: AtomicUsize, + state_event: ThreadStateEvent } @@ -805,7 +794,6 @@ impl ThreadStruct { state, kstack, hwcontext : empty_hwcontext, - int_disable_counter: AtomicUsize::new(0), process: Arc::clone(belonging_process), tls_region: tls, tls_elf: SpinLock::new(VirtualAddress(0x00000000)), @@ -904,7 +892,6 @@ impl ThreadStruct { state, kstack, hwcontext, - int_disable_counter: AtomicUsize::new(0), process: Arc::clone(&process), tls_region: tls, tls_elf: SpinLock::new(VirtualAddress(0x00000000)), diff --git a/kernel/src/sync/spin_lock_irq.rs b/kernel/src/sync/spin_lock_irq.rs index f6ed4be38..ce3e3a3c0 100644 --- a/kernel/src/sync/spin_lock_irq.rs +++ b/kernel/src/sync/spin_lock_irq.rs @@ -9,42 +9,49 @@ use spin::{Mutex as SpinLock, MutexGuard as SpinLockGuard}; use core::fmt; use core::mem::ManuallyDrop; use core::ops::{Deref, DerefMut}; -use core::sync::atomic::Ordering; +use core::sync::atomic::{AtomicU8, Ordering}; use super::INTERRUPT_DISARM; -use crate::scheduler; +/// Interrupt disable counter. +/// +/// # Description +/// +/// Allows recursively disabling interrupts while keeping a sane behavior. +/// Should only be manipulated through sync::enable_interrupts and +/// sync::disable_interrupts. +/// +/// Used by the SpinLockIRQ to implement recursive irqsave logic. +#[thread_local] +static INTERRUPT_DISABLE_COUNTER: AtomicU8 = AtomicU8::new(0); /// Decrement the interrupt disable counter. /// /// Look at documentation for ProcessStruct::pint_disable_counter to know more. -fn enable_interrupts() { - use crate::i386::interrupt_service_routines::INSIDE_INTERRUPT_COUNT; +pub fn enable_interrupts() { if !INTERRUPT_DISARM.load(Ordering::SeqCst) { - if let Some(thread) = scheduler::try_get_current_thread() { - if thread.int_disable_counter.fetch_sub(1, Ordering::SeqCst) == 1 { - if INSIDE_INTERRUPT_COUNT.load(Ordering::SeqCst) == 0 { - unsafe { interrupts::sti() } - } - } - } else { - // TODO: Safety??? - // don't do anything. + if INTERRUPT_DISABLE_COUNTER.fetch_sub(1, Ordering::SeqCst) == 1 { + unsafe { interrupts::sti() } } } } +/// Decrement the interrupt disable counter without possibly re-enabling interrupts. +/// +/// Used to decrement counter while keeping interrupts disabled inside an interrupt. +/// Look at documentation for INTERRUPT_DISABLE_COUNTER to know more. +pub unsafe fn decrement_lock_count() { + if !INTERRUPT_DISARM.load(Ordering::SeqCst) { + let _ = INTERRUPT_DISABLE_COUNTER.fetch_sub(1, Ordering::SeqCst); + } +} + /// Increment the interrupt disable counter. /// /// Look at documentation for INTERRUPT_DISABLE_COUNTER to know more. -fn disable_interrupts() { +pub fn disable_interrupts() { if !INTERRUPT_DISARM.load(Ordering::SeqCst) { - if let Some(thread) = scheduler::try_get_current_thread() { - if thread.int_disable_counter.fetch_add(1, Ordering::SeqCst) == 0 { - unsafe { interrupts::cli() } - } - } else { - // TODO: Safety??? - // don't do anything. + if INTERRUPT_DISABLE_COUNTER.fetch_add(1, Ordering::SeqCst) == 0 { + unsafe { interrupts::cli() } } } } From 692738dbab3f990931394e34c97baae908e9d8b9 Mon Sep 17 00:00:00 2001 From: Kitlith Date: Mon, 28 Oct 2019 21:01:08 -0700 Subject: [PATCH 04/13] Fix initial boot. It still deadlocks later on... :/ --- kernel/src/i386/interrupt_service_routines.rs | 2 +- kernel/src/main.rs | 3 +++ kernel/src/sync/spin_lock_irq.rs | 7 ++++--- 3 files changed, 8 insertions(+), 4 deletions(-) diff --git a/kernel/src/i386/interrupt_service_routines.rs b/kernel/src/i386/interrupt_service_routines.rs index c00342069..e62ea9d79 100644 --- a/kernel/src/i386/interrupt_service_routines.rs +++ b/kernel/src/i386/interrupt_service_routines.rs @@ -1137,5 +1137,5 @@ pub unsafe fn init() { (*idt).load(); } - sti(); + crate::sync::spin_lock_irq::enable_interrupts(); } diff --git a/kernel/src/main.rs b/kernel/src/main.rs index acf67bd9a..670ccc26b 100644 --- a/kernel/src/main.rs +++ b/kernel/src/main.rs @@ -250,6 +250,9 @@ pub extern "C" fn common_start(multiboot_info_addr: usize) -> ! { info!("Allocating cpu_locals"); init_cpu_locals(1); + // Now that cpu_locals are present, ensure that SpinLockIRQs + // are aware that interrupts are disabled. + sync::spin_lock_irq::disable_interrupts(); info!("Enabling interrupts"); unsafe { i386::interrupt_service_routines::init(); } diff --git a/kernel/src/sync/spin_lock_irq.rs b/kernel/src/sync/spin_lock_irq.rs index ce3e3a3c0..891367b2d 100644 --- a/kernel/src/sync/spin_lock_irq.rs +++ b/kernel/src/sync/spin_lock_irq.rs @@ -11,6 +11,7 @@ use core::mem::ManuallyDrop; use core::ops::{Deref, DerefMut}; use core::sync::atomic::{AtomicU8, Ordering}; use super::INTERRUPT_DISARM; +use crate::cpu_locals::ARE_CPU_LOCALS_INITIALIZED_YET; /// Interrupt disable counter. /// @@ -28,7 +29,7 @@ static INTERRUPT_DISABLE_COUNTER: AtomicU8 = AtomicU8::new(0); /// /// Look at documentation for ProcessStruct::pint_disable_counter to know more. pub fn enable_interrupts() { - if !INTERRUPT_DISARM.load(Ordering::SeqCst) { + if !INTERRUPT_DISARM.load(Ordering::SeqCst) && ARE_CPU_LOCALS_INITIALIZED_YET.load(Ordering::SeqCst) { if INTERRUPT_DISABLE_COUNTER.fetch_sub(1, Ordering::SeqCst) == 1 { unsafe { interrupts::sti() } } @@ -40,7 +41,7 @@ pub fn enable_interrupts() { /// Used to decrement counter while keeping interrupts disabled inside an interrupt. /// Look at documentation for INTERRUPT_DISABLE_COUNTER to know more. pub unsafe fn decrement_lock_count() { - if !INTERRUPT_DISARM.load(Ordering::SeqCst) { + if !INTERRUPT_DISARM.load(Ordering::SeqCst) && ARE_CPU_LOCALS_INITIALIZED_YET.load(Ordering::SeqCst) { let _ = INTERRUPT_DISABLE_COUNTER.fetch_sub(1, Ordering::SeqCst); } } @@ -49,7 +50,7 @@ pub unsafe fn decrement_lock_count() { /// /// Look at documentation for INTERRUPT_DISABLE_COUNTER to know more. pub fn disable_interrupts() { - if !INTERRUPT_DISARM.load(Ordering::SeqCst) { + if !INTERRUPT_DISARM.load(Ordering::SeqCst) && ARE_CPU_LOCALS_INITIALIZED_YET.load(Ordering::SeqCst) { if INTERRUPT_DISABLE_COUNTER.fetch_add(1, Ordering::SeqCst) == 0 { unsafe { interrupts::cli() } } From f583a678958ca5af97be18ea01da90a615e66099 Mon Sep 17 00:00:00 2001 From: Kitlith Date: Wed, 30 Oct 2019 23:21:42 -0700 Subject: [PATCH 05/13] Fix deadlock. This was interesting to figure out. In short, when a scheduler based process switch occurs, there is a lock held exclusively for enabling and disabling interrupts. This lock is not dropped before the process switch. If this is not the first time this process is run, it evens out by dropping the copy of this lock held by the 'new' process. However, if it is *not* the first time the process is run, there is no lock, and the counter never gets decremented. This commit fixes the issue by decrementing the counter in the new process right before the iret which reenabled interrupts. --- kernel/src/i386/process_switch.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/kernel/src/i386/process_switch.rs b/kernel/src/i386/process_switch.rs index e47c603a6..763a6a347 100644 --- a/kernel/src/i386/process_switch.rs +++ b/kernel/src/i386/process_switch.rs @@ -347,6 +347,8 @@ fn jump_to_entrypoint(ep: usize, userspace_stack_ptr: usize, arg1: usize, arg2: const_assert_eq!((GdtIndex::UTlsRegion as u16) << 3 | 0b11, 0x3B); const_assert_eq!((GdtIndex::UTlsElf as u16) << 3 | 0b11, 0x43); const_assert_eq!((GdtIndex::UStack as u16) << 3 | 0b11, 0x4B); + // We're about to enable interrupts by doing an iret. + unsafe { crate::sync::spin_lock_irq::decrement_lock_count(); } unsafe { asm!(" mov ax,0x33 // ds, es <- UData, Ring 3 From 495cb50c18e7cfadd7f1fa46bda846ad0ba681c6 Mon Sep 17 00:00:00 2001 From: Kitlith Date: Wed, 30 Oct 2019 23:27:52 -0700 Subject: [PATCH 06/13] Preform ALL checks in try_lock. Forgot to copy paste after fixing issues in lock. Doesn't seem to be used early enough to matter, but in case it ever is... --- kernel/src/sync/spin_lock.rs | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/kernel/src/sync/spin_lock.rs b/kernel/src/sync/spin_lock.rs index 28f707975..0b5f9b4db 100644 --- a/kernel/src/sync/spin_lock.rs +++ b/kernel/src/sync/spin_lock.rs @@ -148,7 +148,10 @@ impl SpinLock { /// a guard within Some. pub fn try_lock(&self) -> Option> { use core::sync::atomic::Ordering; - if crate::i386::interrupt_service_routines::INSIDE_INTERRUPT_COUNT.load(Ordering::SeqCst) != 0 { + use crate::cpu_locals::ARE_CPU_LOCALS_INITIALIZED_YET; + use crate::i386::interrupt_service_routines::INSIDE_INTERRUPT_COUNT; + use super::INTERRUPT_DISARM; + if !INTERRUPT_DISARM.load(Ordering::SeqCst) && ARE_CPU_LOCALS_INITIALIZED_YET.load(Ordering::SeqCst) && INSIDE_INTERRUPT_COUNT.load(Ordering::SeqCst) != 0 { panic!("\ You have attempted to lock a spinlock in interrupt context. \ This is most likely a design flaw. \ From c62dc639065547441b85303d7cbc387e21dea966 Mon Sep 17 00:00:00 2001 From: Kitlith Date: Thu, 31 Oct 2019 14:24:55 -0700 Subject: [PATCH 07/13] Add safety comments and fix clippy. --- kernel/src/i386/interrupt_service_routines.rs | 12 +++- kernel/src/i386/process_switch.rs | 8 ++- kernel/src/main.rs | 3 +- kernel/src/scheduler.rs | 6 ++ kernel/src/sync/spin_lock_irq.rs | 59 ++++++++++++------- 5 files changed, 63 insertions(+), 25 deletions(-) diff --git a/kernel/src/i386/interrupt_service_routines.rs b/kernel/src/i386/interrupt_service_routines.rs index e62ea9d79..a14a85c02 100644 --- a/kernel/src/i386/interrupt_service_routines.rs +++ b/kernel/src/i386/interrupt_service_routines.rs @@ -28,7 +28,6 @@ //! [syscall_interrupt_dispatcher]: self::interrupt_service_routines::syscall_interrupt_dispatcher use crate::i386::structures::idt::{PageFaultErrorCode, Idt}; -use crate::i386::instructions::interrupts::sti; use crate::mem::VirtualAddress; use crate::paging::kernel_memory::get_kernel_memory; use crate::i386::PrivilegeLevel; @@ -65,6 +64,10 @@ pub fn check_thread_killed() { if scheduler::get_current_thread().state.load(Ordering::SeqCst) == ThreadState::TerminationPending { let lock = SpinLockIRQ::new(()); loop { // in case of spurious wakeups + // TODO: Is it valid for interrupts to be active here? + // BODY: The interrupt wrappers call decrement_lock_count before calling this function, + // BODY: because it has the possibility to never return. + // BODY: Upon a spurious wake up, interrupts will end up being enabled. let _ = scheduler::unschedule(&lock, lock.lock()); } } @@ -572,7 +575,9 @@ macro_rules! generate_trap_gate_handler { if $interrupt_context { let _ = INSIDE_INTERRUPT_COUNT.fetch_add(1, Ordering::SeqCst); - disable_interrupts(); + // Lets SpinLockIrq know that we are an interrupt; interrupts should not be re-enabled. + // Safety: Paired with decrement_lock_count, which is called before exiting the interrupt. + unsafe { disable_interrupts(); } } if let PrivilegeLevel::Ring0 = SegmentSelector(userspace_context.cs as u16).rpl() { @@ -594,6 +599,8 @@ macro_rules! generate_trap_gate_handler { if $interrupt_context { let _ = INSIDE_INTERRUPT_COUNT.fetch_sub(1, Ordering::SeqCst); + // Safety: Paired with disable_interrupts, which was called earlier in this wrapper function. + // Additionally, this is called shortly before an iret occurs, inside the asm wrapper. unsafe { decrement_lock_count(); } } @@ -1137,5 +1144,6 @@ pub unsafe fn init() { (*idt).load(); } + // Paired with disable_interrupts in kernel common_start. crate::sync::spin_lock_irq::enable_interrupts(); } diff --git a/kernel/src/i386/process_switch.rs b/kernel/src/i386/process_switch.rs index 763a6a347..409b4f348 100644 --- a/kernel/src/i386/process_switch.rs +++ b/kernel/src/i386/process_switch.rs @@ -347,7 +347,13 @@ fn jump_to_entrypoint(ep: usize, userspace_stack_ptr: usize, arg1: usize, arg2: const_assert_eq!((GdtIndex::UTlsRegion as u16) << 3 | 0b11, 0x3B); const_assert_eq!((GdtIndex::UTlsElf as u16) << 3 | 0b11, 0x43); const_assert_eq!((GdtIndex::UStack as u16) << 3 | 0b11, 0x4B); - // We're about to enable interrupts by doing an iret. + + + // Safety: This is paired with an undropped SpinLockIrq (interrupt_manager) in scheduler::internal_schedule. + // (Normally, this SpinLockIrq evens out with an identical one in the same function in the new process, + // however, when a new process starts this object is not present, therefore we must manually decrement + // the counter.) + // Additionally, an iret occurs later in this function, enabling interrupts. unsafe { crate::sync::spin_lock_irq::decrement_lock_count(); } unsafe { asm!(" diff --git a/kernel/src/main.rs b/kernel/src/main.rs index 670ccc26b..5bb3b7ed8 100644 --- a/kernel/src/main.rs +++ b/kernel/src/main.rs @@ -252,7 +252,8 @@ pub extern "C" fn common_start(multiboot_info_addr: usize) -> ! { init_cpu_locals(1); // Now that cpu_locals are present, ensure that SpinLockIRQs // are aware that interrupts are disabled. - sync::spin_lock_irq::disable_interrupts(); + // Safety: paired with enable_interrupts in interrupt_service_routines::init + unsafe { sync::spin_lock_irq::disable_interrupts(); } info!("Enabling interrupts"); unsafe { i386::interrupt_service_routines::init(); } diff --git a/kernel/src/scheduler.rs b/kernel/src/scheduler.rs index b8c4c353e..ce3789e2f 100644 --- a/kernel/src/scheduler.rs +++ b/kernel/src/scheduler.rs @@ -257,6 +257,12 @@ where // TODO: Ensure the global counter is <= 1 let interrupt_manager = SpinLockIRQ::new(()); + + // This is complicated: interrupt_lock is needed to disable interrupts while + // performing a process switch: however, it is not dropped until after a process_switch + // *back* to this process. Usually it is paired with the SpinLockIrq being dropped in + // 'process_b', but in the case of a brand new process, it is paired with a manual decrement + // before performing an iret to userspace. let mut interrupt_lock = interrupt_manager.lock(); loop { diff --git a/kernel/src/sync/spin_lock_irq.rs b/kernel/src/sync/spin_lock_irq.rs index 891367b2d..77137a050 100644 --- a/kernel/src/sync/spin_lock_irq.rs +++ b/kernel/src/sync/spin_lock_irq.rs @@ -18,8 +18,8 @@ use crate::cpu_locals::ARE_CPU_LOCALS_INITIALIZED_YET; /// # Description /// /// Allows recursively disabling interrupts while keeping a sane behavior. -/// Should only be manipulated through sync::enable_interrupts and -/// sync::disable_interrupts. +/// Should only be manipulated through [enable_interrupts], +/// [disable_interrupts], and [decrement_lock_count]. /// /// Used by the SpinLockIRQ to implement recursive irqsave logic. #[thread_local] @@ -27,19 +27,30 @@ static INTERRUPT_DISABLE_COUNTER: AtomicU8 = AtomicU8::new(0); /// Decrement the interrupt disable counter. /// -/// Look at documentation for ProcessStruct::pint_disable_counter to know more. -pub fn enable_interrupts() { - if !INTERRUPT_DISARM.load(Ordering::SeqCst) && ARE_CPU_LOCALS_INITIALIZED_YET.load(Ordering::SeqCst) { - if INTERRUPT_DISABLE_COUNTER.fetch_sub(1, Ordering::SeqCst) == 1 { - unsafe { interrupts::sti() } - } +/// Look at documentation for [INTERRUPT_DISABLE_COUNTER] to know more. +/// +/// # Unsafety: +/// +/// Should be called in pairs with [disable_iterrupts] or [decrement_lock_count], +/// otherwise the counter will get out of sync and deadlocks will likely occur. +pub unsafe fn enable_interrupts() { + if !INTERRUPT_DISARM.load(Ordering::SeqCst) && ARE_CPU_LOCALS_INITIALIZED_YET.load(Ordering::SeqCst) && INTERRUPT_DISABLE_COUNTER.fetch_sub(1, Ordering::SeqCst) == 1 { + unsafe { interrupts::sti() } } } -/// Decrement the interrupt disable counter without possibly re-enabling interrupts. +/// Decrement the interrupt disable counter without re-enabling interrupts. +/// +/// Used to decrement counter while keeping interrupts disabled before an iret. +/// Look at documentation for [INTERRUPT_DISABLE_COUNTER] to know more. +/// +/// # Unsafety: /// -/// Used to decrement counter while keeping interrupts disabled inside an interrupt. -/// Look at documentation for INTERRUPT_DISABLE_COUNTER to know more. +/// Should be called in pairs with [enable_iterrupts], +/// otherwise the counter will get out of sync and deadlocks will likely occur. +/// +/// Additionally, this should only be used when interrupts are about to be enabled anyway, +/// such as by an iret to userspace. pub unsafe fn decrement_lock_count() { if !INTERRUPT_DISARM.load(Ordering::SeqCst) && ARE_CPU_LOCALS_INITIALIZED_YET.load(Ordering::SeqCst) { let _ = INTERRUPT_DISABLE_COUNTER.fetch_sub(1, Ordering::SeqCst); @@ -48,12 +59,15 @@ pub unsafe fn decrement_lock_count() { /// Increment the interrupt disable counter. /// -/// Look at documentation for INTERRUPT_DISABLE_COUNTER to know more. -pub fn disable_interrupts() { - if !INTERRUPT_DISARM.load(Ordering::SeqCst) && ARE_CPU_LOCALS_INITIALIZED_YET.load(Ordering::SeqCst) { - if INTERRUPT_DISABLE_COUNTER.fetch_add(1, Ordering::SeqCst) == 0 { - unsafe { interrupts::cli() } - } +/// Look at documentation for [INTERRUPT_DISABLE_COUNTER] to know more. +/// +/// # Unsafety: +/// +/// Should be called in pairs with [enable_iterrupts], +/// otherwise the counter will get out of sync and deadlocks will likely occur. +pub unsafe fn disable_interrupts() { + if !INTERRUPT_DISARM.load(Ordering::SeqCst) && ARE_CPU_LOCALS_INITIALIZED_YET.load(Ordering::SeqCst) && INTERRUPT_DISABLE_COUNTER.fetch_add(1, Ordering::SeqCst) == 0 { + unsafe { interrupts::cli() } } } @@ -82,7 +96,7 @@ pub unsafe fn permanently_disable_interrupts() { /// have been dropped. /// /// Note that it is allowed to lock/unlock the locks in a different order. It uses -/// a global counter to disable/enable interrupts. View INTERRUPT_DISABLE_COUNTER +/// a global counter to disable/enable interrupts. View [INTERRUPT_DISABLE_COUNTER] /// documentation for more information. pub struct SpinLockIRQ { /// SpinLock we wrap. @@ -106,7 +120,7 @@ impl SpinLockIRQ { impl SpinLockIRQ { /// Disables interrupts and locks the mutex. pub fn lock(&self) -> SpinLockIRQGuard<'_, T> { - // Disable irqs + // Safety: Paired with enable_interrupts in the impl of Drop for SpinLockIrqGuard. unsafe { disable_interrupts(); } // TODO: Disable preemption. @@ -119,7 +133,8 @@ impl SpinLockIRQ { /// Disables interrupts and locks the mutex. pub fn try_lock(&self) -> Option> { - // Disable irqs + // Safety: Paired with enable_interrupts in the impl of Drop for SpinLockIrq, + // or in case a guard is not created, later in this function. unsafe { disable_interrupts(); } // TODO: Disable preemption. @@ -130,6 +145,7 @@ impl SpinLockIRQ { Some(internalguard) => Some(SpinLockIRQGuard(ManuallyDrop::new(internalguard))), None => { // We couldn't lock. Restore irqs and return None + // Safety: Paired with disable_interrupts above in the case that a guard is not created. unsafe { enable_interrupts(); } None } @@ -165,7 +181,8 @@ impl<'a, T: ?Sized + 'a> Drop for SpinLockIRQGuard<'a, T> { // unlock unsafe { ManuallyDrop::drop(&mut self.0); } - // Restore irq + // Safety: paired with disable_interrupts in SpinLockIRQ::{lock, try_lock}, which returns + // this guard to re-enable interrupts when it is dropped. unsafe { enable_interrupts(); } // TODO: Enable preempt From f104aec2a98212c00105a88448e4ad13b4f079bd Mon Sep 17 00:00:00 2001 From: Kitlith Date: Thu, 31 Oct 2019 15:02:42 -0700 Subject: [PATCH 08/13] Fix typo in docs. --- kernel/src/sync/spin_lock_irq.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/kernel/src/sync/spin_lock_irq.rs b/kernel/src/sync/spin_lock_irq.rs index 77137a050..f243df185 100644 --- a/kernel/src/sync/spin_lock_irq.rs +++ b/kernel/src/sync/spin_lock_irq.rs @@ -31,7 +31,7 @@ static INTERRUPT_DISABLE_COUNTER: AtomicU8 = AtomicU8::new(0); /// /// # Unsafety: /// -/// Should be called in pairs with [disable_iterrupts] or [decrement_lock_count], +/// Should be called in pairs with [disable_interrupts] or [decrement_lock_count], /// otherwise the counter will get out of sync and deadlocks will likely occur. pub unsafe fn enable_interrupts() { if !INTERRUPT_DISARM.load(Ordering::SeqCst) && ARE_CPU_LOCALS_INITIALIZED_YET.load(Ordering::SeqCst) && INTERRUPT_DISABLE_COUNTER.fetch_sub(1, Ordering::SeqCst) == 1 { @@ -46,7 +46,7 @@ pub unsafe fn enable_interrupts() { /// /// # Unsafety: /// -/// Should be called in pairs with [enable_iterrupts], +/// Should be called in pairs with [enable_interrupts], /// otherwise the counter will get out of sync and deadlocks will likely occur. /// /// Additionally, this should only be used when interrupts are about to be enabled anyway, @@ -63,7 +63,7 @@ pub unsafe fn decrement_lock_count() { /// /// # Unsafety: /// -/// Should be called in pairs with [enable_iterrupts], +/// Should be called in pairs with [enable_interrupts], /// otherwise the counter will get out of sync and deadlocks will likely occur. pub unsafe fn disable_interrupts() { if !INTERRUPT_DISARM.load(Ordering::SeqCst) && ARE_CPU_LOCALS_INITIALIZED_YET.load(Ordering::SeqCst) && INTERRUPT_DISABLE_COUNTER.fetch_add(1, Ordering::SeqCst) == 0 { From 093606395297cfed633f1259e8e777d522edfadf Mon Sep 17 00:00:00 2001 From: Kitlith Date: Fri, 1 Nov 2019 19:23:07 -0700 Subject: [PATCH 09/13] s/Unsafety/Safety/g Looks like when I went looking for examples, I found the wrong example. --- kernel/src/i386/structures/idt.rs | 2 +- kernel/src/paging/hierarchical_table.rs | 2 +- kernel/src/process/thread_local_storage.rs | 2 +- kernel/src/scheduler.rs | 4 ++-- kernel/src/sync/spin_lock_irq.rs | 6 +++--- 5 files changed, 8 insertions(+), 8 deletions(-) diff --git a/kernel/src/i386/structures/idt.rs b/kernel/src/i386/structures/idt.rs index e3f839a1f..7077345bd 100644 --- a/kernel/src/i386/structures/idt.rs +++ b/kernel/src/i386/structures/idt.rs @@ -584,7 +584,7 @@ impl IdtEntry { /// Set a task gate for the IDT entry and sets the present bit. /// - /// # Unsafety + /// # Safety /// /// `tss_selector` must point to a valid TSS, which will remain present. /// The TSS' `eip` should point to the handler function. diff --git a/kernel/src/paging/hierarchical_table.rs b/kernel/src/paging/hierarchical_table.rs index 48286cbd5..82931b09e 100644 --- a/kernel/src/paging/hierarchical_table.rs +++ b/kernel/src/paging/hierarchical_table.rs @@ -648,7 +648,7 @@ pub trait InactiveHierarchyTrait : TableHierarchy { /// These frames were marked as occupied when initialising the `FrameAllocator`, /// we're making them available again. /// - /// # Unsafety + /// # Safety /// /// Having multiple InactiveHierarchy pointing to the same table hierarchy is unsafe. /// Should not be used for any other purpose, it is only guaranteed to be safe to drop. diff --git a/kernel/src/process/thread_local_storage.rs b/kernel/src/process/thread_local_storage.rs index 78b46bd50..b38760ad7 100644 --- a/kernel/src/process/thread_local_storage.rs +++ b/kernel/src/process/thread_local_storage.rs @@ -165,7 +165,7 @@ impl TLSManager { /// Mark this TLS as free, so it can be re-used by future spawned thread. /// - /// # Unsafety + /// # Safety /// /// The TLS will be reassigned, so it must never be used again after calling this function. /// diff --git a/kernel/src/scheduler.rs b/kernel/src/scheduler.rs index ce3789e2f..8998c16bb 100644 --- a/kernel/src/scheduler.rs +++ b/kernel/src/scheduler.rs @@ -71,7 +71,7 @@ pub fn get_current_process() -> Arc { /// The passed function will be executed after setting the CURRENT_THREAD, but before /// setting it back to the RUNNING state. /// -/// # Unsafety +/// # Safety /// /// Interrupts must be disabled when calling this function. It will mutably borrow [`CURRENT_THREAD`], /// so we can't have interrupts on top of that which try to access it while it is borrowed mutably by us, @@ -339,7 +339,7 @@ where /// The passed function should take care to change the protection level, and ensure it cleans up all /// the registers before calling the EIP, in order to avoid leaking information to userspace. /// -/// # Unsafety: +/// # Safety: /// /// Interrupts must be off when calling this function. It will set [`CURRENT_THREAD`], and then /// turn them on, as we are running a new thread, no SpinLockIRQ is held. diff --git a/kernel/src/sync/spin_lock_irq.rs b/kernel/src/sync/spin_lock_irq.rs index f243df185..22d03c647 100644 --- a/kernel/src/sync/spin_lock_irq.rs +++ b/kernel/src/sync/spin_lock_irq.rs @@ -29,7 +29,7 @@ static INTERRUPT_DISABLE_COUNTER: AtomicU8 = AtomicU8::new(0); /// /// Look at documentation for [INTERRUPT_DISABLE_COUNTER] to know more. /// -/// # Unsafety: +/// # Safety /// /// Should be called in pairs with [disable_interrupts] or [decrement_lock_count], /// otherwise the counter will get out of sync and deadlocks will likely occur. @@ -44,7 +44,7 @@ pub unsafe fn enable_interrupts() { /// Used to decrement counter while keeping interrupts disabled before an iret. /// Look at documentation for [INTERRUPT_DISABLE_COUNTER] to know more. /// -/// # Unsafety: +/// # Safety /// /// Should be called in pairs with [enable_interrupts], /// otherwise the counter will get out of sync and deadlocks will likely occur. @@ -61,7 +61,7 @@ pub unsafe fn decrement_lock_count() { /// /// Look at documentation for [INTERRUPT_DISABLE_COUNTER] to know more. /// -/// # Unsafety: +/// # Safety /// /// Should be called in pairs with [enable_interrupts], /// otherwise the counter will get out of sync and deadlocks will likely occur. From 0d026af73d71b71302f8769ca3d99f81dd3def01 Mon Sep 17 00:00:00 2001 From: Kitlith Date: Fri, 1 Nov 2019 19:43:37 -0700 Subject: [PATCH 10/13] Move Safety comments inside the unsafe blocks. It's funny, I could've sworn there was already some tool for this, but couldn't find it when I went to look for it. *shrug* --- kernel/src/i386/interrupt_service_routines.rs | 14 ++++++---- kernel/src/i386/process_switch.rs | 15 ++++++---- kernel/src/main.rs | 6 ++-- kernel/src/sync/spin_lock_irq.rs | 28 ++++++++++++------- 4 files changed, 40 insertions(+), 23 deletions(-) diff --git a/kernel/src/i386/interrupt_service_routines.rs b/kernel/src/i386/interrupt_service_routines.rs index a14a85c02..50c20b8a2 100644 --- a/kernel/src/i386/interrupt_service_routines.rs +++ b/kernel/src/i386/interrupt_service_routines.rs @@ -576,8 +576,10 @@ macro_rules! generate_trap_gate_handler { if $interrupt_context { let _ = INSIDE_INTERRUPT_COUNT.fetch_add(1, Ordering::SeqCst); // Lets SpinLockIrq know that we are an interrupt; interrupts should not be re-enabled. - // Safety: Paired with decrement_lock_count, which is called before exiting the interrupt. - unsafe { disable_interrupts(); } + unsafe { + // Safety: Paired with decrement_lock_count, which is called before exiting the interrupt. + disable_interrupts(); + } } if let PrivilegeLevel::Ring0 = SegmentSelector(userspace_context.cs as u16).rpl() { @@ -599,9 +601,11 @@ macro_rules! generate_trap_gate_handler { if $interrupt_context { let _ = INSIDE_INTERRUPT_COUNT.fetch_sub(1, Ordering::SeqCst); - // Safety: Paired with disable_interrupts, which was called earlier in this wrapper function. - // Additionally, this is called shortly before an iret occurs, inside the asm wrapper. - unsafe { decrement_lock_count(); } + unsafe { + // Safety: Paired with disable_interrupts, which was called earlier in this wrapper function. + // Additionally, this is called shortly before an iret occurs, inside the asm wrapper. + decrement_lock_count(); + } } // if we're returning to userspace, check we haven't been killed diff --git a/kernel/src/i386/process_switch.rs b/kernel/src/i386/process_switch.rs index 409b4f348..c75434e28 100644 --- a/kernel/src/i386/process_switch.rs +++ b/kernel/src/i386/process_switch.rs @@ -349,12 +349,15 @@ fn jump_to_entrypoint(ep: usize, userspace_stack_ptr: usize, arg1: usize, arg2: const_assert_eq!((GdtIndex::UStack as u16) << 3 | 0b11, 0x4B); - // Safety: This is paired with an undropped SpinLockIrq (interrupt_manager) in scheduler::internal_schedule. - // (Normally, this SpinLockIrq evens out with an identical one in the same function in the new process, - // however, when a new process starts this object is not present, therefore we must manually decrement - // the counter.) - // Additionally, an iret occurs later in this function, enabling interrupts. - unsafe { crate::sync::spin_lock_irq::decrement_lock_count(); } + unsafe { + // Safety: This is paired with an undropped SpinLockIrq (interrupt_manager) in scheduler::internal_schedule. + // (Normally, this SpinLockIrq evens out with an identical one in the same function in the new process, + // however, when a new process starts this object is not present, therefore we must manually decrement + // the counter.) + // Additionally, an iret occurs later in this function, enabling interrupts. + crate::sync::spin_lock_irq::decrement_lock_count(); + } + unsafe { asm!(" mov ax,0x33 // ds, es <- UData, Ring 3 diff --git a/kernel/src/main.rs b/kernel/src/main.rs index 5bb3b7ed8..a8c6e9369 100644 --- a/kernel/src/main.rs +++ b/kernel/src/main.rs @@ -252,8 +252,10 @@ pub extern "C" fn common_start(multiboot_info_addr: usize) -> ! { init_cpu_locals(1); // Now that cpu_locals are present, ensure that SpinLockIRQs // are aware that interrupts are disabled. - // Safety: paired with enable_interrupts in interrupt_service_routines::init - unsafe { sync::spin_lock_irq::disable_interrupts(); } + unsafe { + // Safety: paired with enable_interrupts in interrupt_service_routines::init + sync::spin_lock_irq::disable_interrupts(); + } info!("Enabling interrupts"); unsafe { i386::interrupt_service_routines::init(); } diff --git a/kernel/src/sync/spin_lock_irq.rs b/kernel/src/sync/spin_lock_irq.rs index 22d03c647..2858ae031 100644 --- a/kernel/src/sync/spin_lock_irq.rs +++ b/kernel/src/sync/spin_lock_irq.rs @@ -120,8 +120,10 @@ impl SpinLockIRQ { impl SpinLockIRQ { /// Disables interrupts and locks the mutex. pub fn lock(&self) -> SpinLockIRQGuard<'_, T> { - // Safety: Paired with enable_interrupts in the impl of Drop for SpinLockIrqGuard. - unsafe { disable_interrupts(); } + unsafe { + // Safety: Paired with enable_interrupts in the impl of Drop for SpinLockIrqGuard. + disable_interrupts(); + } // TODO: Disable preemption. // TODO: Spin acquire @@ -133,9 +135,11 @@ impl SpinLockIRQ { /// Disables interrupts and locks the mutex. pub fn try_lock(&self) -> Option> { - // Safety: Paired with enable_interrupts in the impl of Drop for SpinLockIrq, - // or in case a guard is not created, later in this function. - unsafe { disable_interrupts(); } + unsafe { + // Safety: Paired with enable_interrupts in the impl of Drop for SpinLockIrq, + // or in case a guard is not created, later in this function. + disable_interrupts(); + } // TODO: Disable preemption. // TODO: Spin acquire @@ -145,8 +149,10 @@ impl SpinLockIRQ { Some(internalguard) => Some(SpinLockIRQGuard(ManuallyDrop::new(internalguard))), None => { // We couldn't lock. Restore irqs and return None - // Safety: Paired with disable_interrupts above in the case that a guard is not created. - unsafe { enable_interrupts(); } + unsafe { + // Safety: Paired with disable_interrupts above in the case that a guard is not created. + enable_interrupts(); + } None } } @@ -181,9 +187,11 @@ impl<'a, T: ?Sized + 'a> Drop for SpinLockIRQGuard<'a, T> { // unlock unsafe { ManuallyDrop::drop(&mut self.0); } - // Safety: paired with disable_interrupts in SpinLockIRQ::{lock, try_lock}, which returns - // this guard to re-enable interrupts when it is dropped. - unsafe { enable_interrupts(); } + unsafe { + // Safety: paired with disable_interrupts in SpinLockIRQ::{lock, try_lock}, which returns + // this guard to re-enable interrupts when it is dropped. + enable_interrupts(); + } // TODO: Enable preempt } From a203fb550fe1001488d923d075417ac1b99d76b1 Mon Sep 17 00:00:00 2001 From: Kitlith Date: Sun, 3 Nov 2019 15:39:28 -0800 Subject: [PATCH 11/13] INSIDE_INTERRUPT_COUNT only matters when source is kernel. --- kernel/src/i386/interrupt_service_routines.rs | 22 +++++++++---------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/kernel/src/i386/interrupt_service_routines.rs b/kernel/src/i386/interrupt_service_routines.rs index 50c20b8a2..ed2e18735 100644 --- a/kernel/src/i386/interrupt_service_routines.rs +++ b/kernel/src/i386/interrupt_service_routines.rs @@ -336,7 +336,7 @@ macro_rules! trap_gate_asm { /// kernel_fault_strategy: panic, // what to do if we were in kernelspace when this interruption happened. /// user_fault_strategy: panic, // what to do if we were in userspace when this interruption happened, and feature "panic-on-exception" is enabled. /// handler_strategy: kill, // what to for this interrupt otherwise -/// interrupt_context: true // OPTIONAL: basically: are IRQs disabled. True by default, false used for syscalls. +/// interrupts_disabled: true // OPTIONAL: are IRQs disabled. True by default, false used for syscalls. ///); /// ``` /// @@ -527,7 +527,7 @@ macro_rules! generate_trap_gate_handler { } }; - // handle optional argument interrupt_context. + // handle optional argument interrupts_disabled. ( name: $exception_name:literal, has_errcode: $has_errcode:ident, @@ -545,7 +545,7 @@ macro_rules! generate_trap_gate_handler { kernel_fault_strategy: $kernel_fault_strategy, user_fault_strategy: $user_fault_strategy, handler_strategy: $handler_strategy, - interrupt_context: true + interrupts_disabled: true ); }; @@ -560,7 +560,7 @@ macro_rules! generate_trap_gate_handler { kernel_fault_strategy: $kernel_fault_strategy:ident, user_fault_strategy: $user_fault_strategy:ident, handler_strategy: $handler_strategy:ident, - interrupt_context: $interrupt_context:literal + interrupts_disabled: $interrupts_disabled:literal ) => { generate_trap_gate_handler!(__gen asm_wrapper; $wrapper_asm_fnname, $wrapper_rust_fnname, $has_errcode); @@ -573,8 +573,7 @@ macro_rules! generate_trap_gate_handler { use crate::sync::spin_lock_irq::{disable_interrupts, decrement_lock_count}; use core::sync::atomic::Ordering; - if $interrupt_context { - let _ = INSIDE_INTERRUPT_COUNT.fetch_add(1, Ordering::SeqCst); + if $interrupts_disabled { // Lets SpinLockIrq know that we are an interrupt; interrupts should not be re-enabled. unsafe { // Safety: Paired with decrement_lock_count, which is called before exiting the interrupt. @@ -583,6 +582,7 @@ macro_rules! generate_trap_gate_handler { } if let PrivilegeLevel::Ring0 = SegmentSelector(userspace_context.cs as u16).rpl() { + let _ = INSIDE_INTERRUPT_COUNT.fetch_add(1, Ordering::SeqCst); generate_trap_gate_handler!(__gen kernel_fault; name: $exception_name, userspace_context, errcode: $has_errcode, strategy: $kernel_fault_strategy); } else { // we come from userspace, backup the hardware context in the thread struct @@ -599,8 +599,7 @@ macro_rules! generate_trap_gate_handler { // call the handler generate_trap_gate_handler!(__gen handler; name: $exception_name, userspace_context, errcode: $has_errcode, strategy: $handler_strategy); - if $interrupt_context { - let _ = INSIDE_INTERRUPT_COUNT.fetch_sub(1, Ordering::SeqCst); + if $interrupts_disabled { unsafe { // Safety: Paired with disable_interrupts, which was called earlier in this wrapper function. // Additionally, this is called shortly before an iret occurs, inside the asm wrapper. @@ -609,8 +608,9 @@ macro_rules! generate_trap_gate_handler { } // if we're returning to userspace, check we haven't been killed - if let PrivilegeLevel::Ring3 = SegmentSelector(userspace_context.cs as u16).rpl() { - check_thread_killed(); + match SegmentSelector(userspace_context.cs as u16).rpl() { + PrivilegeLevel::Ring3 => check_thread_killed(), + PrivilegeLevel::Ring0 => INSIDE_INTERRUPT_COUNT.fetch_sub(1, Ordering::SeqCst) } } }; @@ -841,7 +841,7 @@ generate_trap_gate_handler!(name: "Syscall Interrupt", kernel_fault_strategy: panic, // you aren't expected to syscall from the kernel user_fault_strategy: ignore, // don't worry it's fine ;) handler_strategy: syscall_interrupt_dispatcher, - interrupt_context: false + interrupts_disabled: false ); impl UserspaceHardwareContext { From d854a1f46990851c0971714218eba2b597241b67 Mon Sep 17 00:00:00 2001 From: Kitlith Date: Sat, 9 Nov 2019 00:26:49 -0800 Subject: [PATCH 12/13] Redo the macro stuff for incrementing and decrementing counters. --- kernel/src/i386/interrupt_service_routines.rs | 183 +++++++++++------- 1 file changed, 116 insertions(+), 67 deletions(-) diff --git a/kernel/src/i386/interrupt_service_routines.rs b/kernel/src/i386/interrupt_service_routines.rs index ed2e18735..8f51d727f 100644 --- a/kernel/src/i386/interrupt_service_routines.rs +++ b/kernel/src/i386/interrupt_service_routines.rs @@ -336,7 +336,7 @@ macro_rules! trap_gate_asm { /// kernel_fault_strategy: panic, // what to do if we were in kernelspace when this interruption happened. /// user_fault_strategy: panic, // what to do if we were in userspace when this interruption happened, and feature "panic-on-exception" is enabled. /// handler_strategy: kill, // what to for this interrupt otherwise -/// interrupts_disabled: true // OPTIONAL: are IRQs disabled. True by default, false used for syscalls. +/// type: exception // what "type" of interrupt this is ///); /// ``` /// @@ -349,6 +349,10 @@ macro_rules! trap_gate_asm { /// * `ignore`: don't do anything for this interrupt. /// * `kill`: kills the process in which this interrupt originated. /// * `my_handler_func`: calls `my_handler_func` to handle this interrupt. Useful if you want to override a standard strategy. +/// * The possible values for `type` are: +/// * `syscall`: interrupts and usage of regular SpinLocks are allowed. +/// * `exception`: interrupts are disabled, but regular SpinLocks can be accessed if coming from userspace. +/// * `interrupt`: interrupts and usage of regular SpinLocks are disabled. /// /// When providing a custom function as strategy, the function must be of signature: /// @@ -374,6 +378,12 @@ macro_rules! trap_gate_asm { /// /// extern "C" fn $wrapper_rust_fnname(userspace_context: &mut UserspaceHardwareContext) { /// +/// disable_interrupts() // +/// // type +/// if coming from Ring == 0 { // (here: exception) +/// disable_spinlocks() // +/// } // +/// /// if coming from Ring == 0 { /// /// kernel_panic(&PanicOrigin::KernelFault { // @@ -410,6 +420,12 @@ macro_rules! trap_gate_asm { /// ProcessStruct::kill_current_process(); // /// } /// +/// enable_interrupts() // +/// // type +/// if coming from Ring == 0 { // (here: exception) +/// enable_spinlocks() // +/// } // +/// /// // if we're returning to userspace, check we haven't been killed /// if comming from Ring == 3 { /// check_thread_killed(); @@ -527,26 +543,53 @@ macro_rules! generate_trap_gate_handler { } }; - // handle optional argument interrupts_disabled. - ( - name: $exception_name:literal, - has_errcode: $has_errcode:ident, - wrapper_asm_fnname: $wrapper_asm_fnname:ident, - wrapper_rust_fnname: $wrapper_rust_fnname:ident, - kernel_fault_strategy: $kernel_fault_strategy:ident, - user_fault_strategy: $user_fault_strategy:ident, - handler_strategy: $handler_strategy:ident - ) => { - generate_trap_gate_handler!( - name: $exception_name, - has_errcode: $has_errcode, - wrapper_asm_fnname: $wrapper_asm_fnname, - wrapper_rust_fnname: $wrapper_rust_fnname, - kernel_fault_strategy: $kernel_fault_strategy, - user_fault_strategy: $user_fault_strategy, - handler_strategy: $handler_strategy, - interrupts_disabled: true - ); + /* Counter manipulation */ + + // syscall + (__gen increment syscall; $_privlvl:path) => { + /* ignored -- interrupts are enabled, spinlocks are enabled */ + }; + (__gen decrement syscall; $_privlvl:path) => { + /* ignored -- interrupts are enabled, spinlocks are enabled */ + }; + + // exception + (__gen increment exception; $privlvl:path) => { + unsafe { + // Safety: Paired with decrement_lock_count in (__gen decrement exception; _) + crate::sync::spin_lock_irq::disable_interrupts(); + } + if $privlvl == PrivilegeLevel::Ring0 { + crate::i386::interrupt_service_routines::INSIDE_INTERRUPT_COUNT.fetch_add(1, core::sync::atomic::Ordering::SeqCst); + } + + }; + (__gen decrement exception; $privlvl:path) => { + unsafe { + // Safety: Paired with disable_interrupts in (__gen increment exception; _) + // Additionally, this is called shortly before an iret occurs, inside the asm wrapper. + crate::sync::spin_lock_irq::decrement_lock_count(); + } + if $privlvl == PrivilegeLevel::Ring0 { + crate::i386::interrupt_service_routines::INSIDE_INTERRUPT_COUNT.fetch_sub(1, core::sync::atomic::Ordering::SeqCst); + } + }; + + // interrupt + (__gen increment interrupt; $_privlvl:path) => { + unsafe { + // Safety: Paired with decrement_lock_count in (__gen decrement interrupt; _) + crate::sync::spin_lock_irq::disable_interrupts(); + } + crate::i386::interrupt_service_routines::INSIDE_INTERRUPT_COUNT.fetch_add(1, core::sync::atomic::Ordering::SeqCst); + }; + (__gen decrement interrupt; $_privlvl:path) => { + unsafe { + // Safety: Paired with disable_interrupts in (__gen increment interrupt; _) + // Additionally, this is called shortly before an iret occurs, inside the asm wrapper. + crate::sync::spin_lock_irq::decrement_lock_count(); + } + crate::i386::interrupt_service_routines::INSIDE_INTERRUPT_COUNT.fetch_sub(1, core::sync::atomic::Ordering::SeqCst); }; /* The full wrapper */ @@ -560,29 +603,21 @@ macro_rules! generate_trap_gate_handler { kernel_fault_strategy: $kernel_fault_strategy:ident, user_fault_strategy: $user_fault_strategy:ident, handler_strategy: $handler_strategy:ident, - interrupts_disabled: $interrupts_disabled:literal + type: $type:ident ) => { generate_trap_gate_handler!(__gen asm_wrapper; $wrapper_asm_fnname, $wrapper_rust_fnname, $has_errcode); /// Auto generated function. See [generate_trap_gate_handler]. extern "C" fn $wrapper_rust_fnname(userspace_context: &mut UserspaceHardwareContext) { - use crate::i386::structures::gdt::SegmentSelector; - use crate::i386::interrupt_service_routines::INSIDE_INTERRUPT_COUNT; - use crate::sync::spin_lock_irq::{disable_interrupts, decrement_lock_count}; - use core::sync::atomic::Ordering; - - if $interrupts_disabled { - // Lets SpinLockIrq know that we are an interrupt; interrupts should not be re-enabled. - unsafe { - // Safety: Paired with decrement_lock_count, which is called before exiting the interrupt. - disable_interrupts(); - } - } - if let PrivilegeLevel::Ring0 = SegmentSelector(userspace_context.cs as u16).rpl() { - let _ = INSIDE_INTERRUPT_COUNT.fetch_add(1, Ordering::SeqCst); + let privilege_level = SegmentSelector(userspace_context.cs as u16).rpl(); + + // mark interrupts and spinlocks disabled as apporpriate + generate_trap_gate_handler!(__gen increment $type; privilege_level); + + if let PrivilegeLevel::Ring0 = privilege_level { generate_trap_gate_handler!(__gen kernel_fault; name: $exception_name, userspace_context, errcode: $has_errcode, strategy: $kernel_fault_strategy); } else { // we come from userspace, backup the hardware context in the thread struct @@ -599,18 +634,12 @@ macro_rules! generate_trap_gate_handler { // call the handler generate_trap_gate_handler!(__gen handler; name: $exception_name, userspace_context, errcode: $has_errcode, strategy: $handler_strategy); - if $interrupts_disabled { - unsafe { - // Safety: Paired with disable_interrupts, which was called earlier in this wrapper function. - // Additionally, this is called shortly before an iret occurs, inside the asm wrapper. - decrement_lock_count(); - } - } + // re-enable interrupts and spinlocks as appropriate + generate_trap_gate_handler!(__gen decrement $type; privilege_level); // if we're returning to userspace, check we haven't been killed - match SegmentSelector(userspace_context.cs as u16).rpl() { - PrivilegeLevel::Ring3 => check_thread_killed(), - PrivilegeLevel::Ring0 => INSIDE_INTERRUPT_COUNT.fetch_sub(1, Ordering::SeqCst) + if let PrivilegeLevel::Ring3 = SegmentSelector(userspace_context.cs as u16).rpl() { + check_thread_killed() } } }; @@ -626,7 +655,8 @@ generate_trap_gate_handler!(name: "Divide Error Exception", wrapper_rust_fnname: divide_by_zero_exception_rust_wrapper, kernel_fault_strategy: panic, user_fault_strategy: panic, - handler_strategy: kill + handler_strategy: kill, + type: exception ); generate_trap_gate_handler!(name: "Debug Exception", @@ -635,7 +665,8 @@ generate_trap_gate_handler!(name: "Debug Exception", wrapper_rust_fnname: debug_exception_rust_wrapper, kernel_fault_strategy: panic, user_fault_strategy: panic, - handler_strategy: panic + handler_strategy: panic, + type: exception ); generate_trap_gate_handler!(name: "An unexpected non-maskable (but still kinda maskable) interrupt occurred", @@ -644,7 +675,8 @@ generate_trap_gate_handler!(name: "An unexpected non-maskable (but still kinda m wrapper_rust_fnname: nmi_exception_rust_wrapper, kernel_fault_strategy: panic, user_fault_strategy: panic, - handler_strategy: panic + handler_strategy: panic, + type: exception ); generate_trap_gate_handler!(name: "Breakpoint Exception", @@ -653,7 +685,8 @@ generate_trap_gate_handler!(name: "Breakpoint Exception", wrapper_rust_fnname: breakpoint_exception_rust_wrapper, kernel_fault_strategy: ignore, user_fault_strategy: ignore, - handler_strategy: panic + handler_strategy: panic, + type: exception ); generate_trap_gate_handler!(name: "Overflow Exception", @@ -662,7 +695,8 @@ generate_trap_gate_handler!(name: "Overflow Exception", wrapper_rust_fnname: overflow_exception_rust_wrapper, kernel_fault_strategy: panic, user_fault_strategy: panic, - handler_strategy: kill + handler_strategy: kill, + type: exception ); generate_trap_gate_handler!(name: "BOUND Range Exceeded Exception", @@ -671,7 +705,8 @@ generate_trap_gate_handler!(name: "BOUND Range Exceeded Exception", wrapper_rust_fnname: bound_range_exceeded_exception_rust_wrapper, kernel_fault_strategy: panic, user_fault_strategy: panic, - handler_strategy: kill + handler_strategy: kill, + type: exception ); generate_trap_gate_handler!(name: "Invalid opcode Exception", @@ -680,7 +715,8 @@ generate_trap_gate_handler!(name: "Invalid opcode Exception", wrapper_rust_fnname: invalid_opcode_exception_rust_wrapper, kernel_fault_strategy: panic, user_fault_strategy: panic, - handler_strategy: kill + handler_strategy: kill, + type: exception ); generate_trap_gate_handler!(name: "Device Not Available Exception", @@ -689,7 +725,8 @@ generate_trap_gate_handler!(name: "Device Not Available Exception", wrapper_rust_fnname: device_not_available_exception_rust_wrapper, kernel_fault_strategy: panic, user_fault_strategy: panic, - handler_strategy: kill + handler_strategy: kill, + type: exception ); /// Double fault handler. Panics the kernel unconditionally. @@ -705,7 +742,8 @@ generate_trap_gate_handler!(name: "Invalid TSS Exception", wrapper_rust_fnname: invalid_tss_exception_rust_wrapper, kernel_fault_strategy: panic, user_fault_strategy: panic, - handler_strategy: panic + handler_strategy: panic, + type: exception ); generate_trap_gate_handler!(name: "Segment Not Present Exception", @@ -714,7 +752,8 @@ generate_trap_gate_handler!(name: "Segment Not Present Exception", wrapper_rust_fnname: segment_not_present_exception_rust_wrapper, kernel_fault_strategy: panic, user_fault_strategy: panic, - handler_strategy: kill + handler_strategy: kill, + type: exception ); generate_trap_gate_handler!(name: "Stack Fault Exception", @@ -723,7 +762,8 @@ generate_trap_gate_handler!(name: "Stack Fault Exception", wrapper_rust_fnname: stack_fault_exception_rust_wrapper, kernel_fault_strategy: panic, user_fault_strategy: panic, - handler_strategy: kill + handler_strategy: kill, + type: exception ); generate_trap_gate_handler!(name: "General Protection Fault Exception", @@ -732,7 +772,8 @@ generate_trap_gate_handler!(name: "General Protection Fault Exception", wrapper_rust_fnname: general_protection_fault_exception_rust_wrapper, kernel_fault_strategy: panic, user_fault_strategy: panic, - handler_strategy: kill + handler_strategy: kill, + type: exception ); generate_trap_gate_handler!(name: "Page Fault Exception", @@ -741,7 +782,8 @@ generate_trap_gate_handler!(name: "Page Fault Exception", wrapper_rust_fnname: page_fault_exception_rust_wrapper, kernel_fault_strategy: kernel_page_fault_panic, user_fault_strategy: user_page_fault_panic, - handler_strategy: user_page_fault_handler + handler_strategy: user_page_fault_handler, + type: exception ); /// Overriding the default panic strategy so we can display cr2 @@ -786,7 +828,8 @@ generate_trap_gate_handler!(name: "x87 FPU floating-point error", wrapper_rust_fnname: x87_floating_point_exception_rust_wrapper, kernel_fault_strategy: panic, user_fault_strategy: panic, - handler_strategy: kill + handler_strategy: kill, + type: exception ); generate_trap_gate_handler!(name: "Alignment Check Exception", @@ -795,7 +838,8 @@ generate_trap_gate_handler!(name: "Alignment Check Exception", wrapper_rust_fnname: alignment_check_exception_rust_wrapper, kernel_fault_strategy: panic, user_fault_strategy: panic, - handler_strategy: kill + handler_strategy: kill, + type: exception ); generate_trap_gate_handler!(name: "Machine-Check Exception", @@ -804,7 +848,8 @@ generate_trap_gate_handler!(name: "Machine-Check Exception", wrapper_rust_fnname: machinee_check_exception_rust_wrapper, kernel_fault_strategy: panic, user_fault_strategy: panic, - handler_strategy: panic + handler_strategy: panic, + type: exception ); generate_trap_gate_handler!(name: "SIMD Floating-Point Exception", @@ -813,7 +858,8 @@ generate_trap_gate_handler!(name: "SIMD Floating-Point Exception", wrapper_rust_fnname: simd_floating_point_exception_rust_wrapper, kernel_fault_strategy: panic, user_fault_strategy: panic, - handler_strategy: kill + handler_strategy: kill, + type: exception ); generate_trap_gate_handler!(name: "Virtualization Exception", @@ -822,7 +868,8 @@ generate_trap_gate_handler!(name: "Virtualization Exception", wrapper_rust_fnname: virtualization_exception_rust_wrapper, kernel_fault_strategy: panic, user_fault_strategy: panic, - handler_strategy: kill + handler_strategy: kill, + type: exception ); generate_trap_gate_handler!(name: "Security Exception", @@ -831,7 +878,8 @@ generate_trap_gate_handler!(name: "Security Exception", wrapper_rust_fnname: security_exception_rust_wrapper, kernel_fault_strategy: panic, user_fault_strategy: panic, - handler_strategy: panic + handler_strategy: panic, + type: exception ); generate_trap_gate_handler!(name: "Syscall Interrupt", @@ -841,7 +889,7 @@ generate_trap_gate_handler!(name: "Syscall Interrupt", kernel_fault_strategy: panic, // you aren't expected to syscall from the kernel user_fault_strategy: ignore, // don't worry it's fine ;) handler_strategy: syscall_interrupt_dispatcher, - interrupts_disabled: false + type: syscall ); impl UserspaceHardwareContext { @@ -1055,7 +1103,8 @@ macro_rules! irq_handler { wrapper_rust_fnname: $rust_wrapper_name, kernel_fault_strategy: ignore, // irqs can happen while we're in kernel mode, don't worry, it's fine ;) user_fault_strategy: ignore, // don't worry it's fine ;) - handler_strategy: $handler_name + handler_strategy: $handler_name, + type: interrupt ); )* From 9ebe582df5b9309924d9b60b1af656f945e74eb2 Mon Sep 17 00:00:00 2001 From: Kitlith Date: Sat, 9 Nov 2019 12:07:12 -0800 Subject: [PATCH 13/13] Fix documentation nits Co-Authored-By: Robin Lambertz --- kernel/src/i386/interrupt_service_routines.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/kernel/src/i386/interrupt_service_routines.rs b/kernel/src/i386/interrupt_service_routines.rs index 8f51d727f..32e68c5bd 100644 --- a/kernel/src/i386/interrupt_service_routines.rs +++ b/kernel/src/i386/interrupt_service_routines.rs @@ -350,8 +350,8 @@ macro_rules! trap_gate_asm { /// * `kill`: kills the process in which this interrupt originated. /// * `my_handler_func`: calls `my_handler_func` to handle this interrupt. Useful if you want to override a standard strategy. /// * The possible values for `type` are: -/// * `syscall`: interrupts and usage of regular SpinLocks are allowed. -/// * `exception`: interrupts are disabled, but regular SpinLocks can be accessed if coming from userspace. +/// * `syscall`: interrupts are enabled, and usage of regular SpinLocks are allowed. +/// * `exception`: interrupts are disabled, but regular SpinLocks can be accessed if the exception comes from userspace. /// * `interrupt`: interrupts and usage of regular SpinLocks are disabled. /// /// When providing a custom function as strategy, the function must be of signature: