From dc4632421f0d7ed278be941d55ad522200ded039 Mon Sep 17 00:00:00 2001 From: roblabla Date: Thu, 21 Feb 2019 17:45:44 +0000 Subject: [PATCH 01/15] Go back to backup/restore SpinLockIRQ system --- kernel/src/process.rs | 13 ---- kernel/src/sync.rs | 141 +++++++++++++++++++++--------------------- 2 files changed, 69 insertions(+), 85 deletions(-) diff --git a/kernel/src/process.rs b/kernel/src/process.rs index 97a320f14..c5cf4e641 100644 --- a/kernel/src/process.rs +++ b/kernel/src/process.rs @@ -106,17 +106,6 @@ pub struct ThreadStruct { /// The currently running process is indirectly kept alive by the `CURRENT_THREAD` global in scheduler. pub process: Arc, - /// Interrupt disable counter. - /// - /// # Description - /// - /// Allows recursively disabling interrupts while keeping a sane behavior. - /// Should only be manipulated through sync::enable_interrupts and - /// sync::disable_interrupts. - /// - /// Used by the SpinLockIRQ to implement recursive irqsave logic. - pub int_disable_counter: AtomicUsize, - /// Argument passed to the entrypoint on first schedule. pub arg: usize } @@ -534,7 +523,6 @@ impl ThreadStruct { state, kstack, hwcontext : empty_hwcontext, - int_disable_counter: AtomicUsize::new(0), process: Arc::clone(belonging_process), arg } @@ -607,7 +595,6 @@ impl ThreadStruct { state, kstack, hwcontext, - int_disable_counter: AtomicUsize::new(0), process: Arc::clone(&process), arg: 0 } diff --git a/kernel/src/sync.rs b/kernel/src/sync.rs index 5564f5c67..fe93f1b73 100644 --- a/kernel/src/sync.rs +++ b/kernel/src/sync.rs @@ -8,51 +8,18 @@ use core::fmt; use core::mem::ManuallyDrop; use core::ops::{Deref, DerefMut}; pub use self::spin::{Mutex as SpinLock, MutexGuard as SpinLockGuard}; -use crate::i386::instructions::interrupts::*; +use crate::i386::instructions::interrupts; use core::sync::atomic::{AtomicBool, Ordering}; -use crate::scheduler; /// Placeholder for future Mutex implementation. pub type Mutex = SpinLock; /// Placeholder for future Mutex implementation. pub type MutexGuard<'a, T> = SpinLockGuard<'a, T>; -/// Decrement the interrupt disable counter. -/// -/// Look at documentation for ProcessStruct::pint_disable_counter to know more. -fn enable_interrupts() { - if !INTERRUPT_DISARM.load(Ordering::SeqCst) { - if let Some(thread) = scheduler::try_get_current_thread() { - if thread.int_disable_counter.fetch_sub(1, Ordering::SeqCst) == 1 { - unsafe { sti() } - } - } else { - // TODO: Safety??? - // don't do anything. - } - } -} - -/// Increment the interrupt disable counter. -/// -/// Look at documentation for INTERRUPT_DISABLE_COUNTER to know more. -fn disable_interrupts() { - if !INTERRUPT_DISARM.load(Ordering::SeqCst) { - if let Some(thread) = scheduler::try_get_current_thread() { - if thread.int_disable_counter.fetch_add(1, Ordering::SeqCst) == 0 { - unsafe { cli() } - } - } else { - // TODO: Safety??? - // don't do anything. - } - } -} - /// Boolean to [permanently_disable_interrupts]. /// -/// If this bool is set, all [enable_interrupts] and [disable_interrupts] calls are ignored, -/// and system is put in an unrecoverable state. +/// If this bool is set, all attempts to enable interrupts through a SpinLockIRQ +/// are ignored, leaving the system in an unrecoverable state. /// /// This is used by kernel panic handlers. static INTERRUPT_DISARM: AtomicBool = AtomicBool::new(false); @@ -64,7 +31,7 @@ static INTERRUPT_DISARM: AtomicBool = AtomicBool::new(false); /// Simply sets [INTERRUPT_DISARM]. pub unsafe fn permanently_disable_interrupts() { INTERRUPT_DISARM.store(true, Ordering::SeqCst); - unsafe { cli() } + unsafe { interrupts::cli() } } /// SpinLock that disables IRQ. @@ -77,12 +44,27 @@ pub unsafe fn permanently_disable_interrupts() { /// - `lock` behaves like a `spinlock_irqsave`. It returns a guard. /// - Dropping the guard behaves like `spinlock_irqrestore` /// -/// This means that locking a spinlock disables interrupts until all spinlocks -/// have been dropped. +/// This means that locking a spinlock disables interrupts until all spinlock +/// guards have been dropped. /// -/// Note that it is allowed to lock/unlock the locks in a different order. It uses -/// a global counter to disable/enable interrupts. View INTERRUPT_DISABLE_COUNTER -/// documentation for more information. +/// A note on reordering: reordering lock drops is prohibited and doing so will +/// result in UB. +// +// TODO: Find sane design for SpinLockIRQ safety +// BODY: Currently, SpinLockIRQ API is unsound. If the guards are dropped in +// BODY: the wrong order, it may cause IF to be reset too early. +// BODY: +// BODY: Ideally, we would need a way to prevent the guard variable to be +// BODY: reassigned. AKA: prevent moving. Note that this is different from what +// BODY: the Pin API solves. The Pin API is about locking a variable in one +// BODY: memory location, but its binding may still be moved and dropped. +// BODY: Unfortunately, Rust does not have a way to express that a value cannot +// BODY: be reassigned. +// BODY: +// BODY: Another possibility would be to switch to a callback API. This would +// BODY: solve the problem, but the scheduler would be unable to consume such +// BODY: locks. Maybe we could have an unsafe "scheduler_relock" function that +// BODY: may only be called from the scheduler? pub struct SpinLockIRQ { /// SpinLock we wrap. internal: SpinLock @@ -104,32 +86,45 @@ impl SpinLockIRQ { impl SpinLockIRQ { /// Disables interrupts and locks the mutex. - pub fn lock(&self) -> SpinLockIRQGuard<'_, T> { - // Disable irqs - unsafe { disable_interrupts(); } + pub fn lock(&self) -> SpinLockIRQGuard { + if INTERRUPT_DISARM.load(Ordering::SeqCst) { + let internalguard = self.internal.lock(); + SpinLockIRQGuard(ManuallyDrop::new(internalguard), false) + } else { + // Save current interrupt state. + let saved_intpt_flag = interrupts::are_enabled(); - // TODO: Disable preemption. - // TODO: Spin acquire + // Disable interruptions + unsafe { interrupts::cli(); } - // lock - let internalguard = self.internal.lock(); - SpinLockIRQGuard(ManuallyDrop::new(internalguard)) + let internalguard = self.internal.lock(); + SpinLockIRQGuard(ManuallyDrop::new(internalguard), saved_intpt_flag) + } } /// Disables interrupts and locks the mutex. - pub fn try_lock(&self) -> Option> { - // Disable irqs - unsafe { disable_interrupts(); } - - // TODO: Disable preemption. - // TODO: Spin acquire - - // lock - match self.internal.try_lock() { - Some(internalguard) => Some(SpinLockIRQGuard(ManuallyDrop::new(internalguard))), - None => { - // We couldn't lock. Restore irqs and return None - unsafe { enable_interrupts(); } + pub fn try_lock(&self) -> Option> { + if INTERRUPT_DISARM.load(Ordering::SeqCst) { + self.internal.try_lock() + .map(|v| SpinLockIRQGuard(ManuallyDrop::new(v), false)) + } else { + // Save current interrupt state. + let saved_intpt_flag = interrupts::are_enabled(); + + // Disable interruptions + unsafe { interrupts::cli(); } + + // Lock spinlock + let internalguard = self.internal.try_lock(); + + if let Some(internalguard) = internalguard { + // if lock is successful, return guard. + Some(SpinLockIRQGuard(ManuallyDrop::new(internalguard), saved_intpt_flag)) + } else { + // Else, restore interrupt state + if saved_intpt_flag { + unsafe { interrupts::sti(); } + } None } } @@ -143,20 +138,20 @@ impl SpinLockIRQ { impl fmt::Debug for SpinLockIRQ { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - match self.try_lock() { - Some(d) => { - write!(f, "SpinLockIRQ {{ data: ")?; - d.fmt(f)?; - write!(f, "}}") - }, - None => write!(f, "SpinLockIRQ {{ }}") + if let Some(v) = self.try_lock() { + f.debug_struct("SpinLockIRQ") + .field("data", &v) + .finish() + } else { + write!(f, "SpinLockIRQ {{ }}") } } } + /// The SpinLockIrq lock guard. #[derive(Debug)] -pub struct SpinLockIRQGuard<'a, T: ?Sized>(ManuallyDrop>); +pub struct SpinLockIRQGuard<'a, T: ?Sized>(ManuallyDrop>, bool); impl<'a, T: ?Sized + 'a> Drop for SpinLockIRQGuard<'a, T> { fn drop(&mut self) { @@ -165,7 +160,9 @@ impl<'a, T: ?Sized + 'a> Drop for SpinLockIRQGuard<'a, T> { unsafe { ManuallyDrop::drop(&mut self.0); } // Restore irq - unsafe { enable_interrupts(); } + if self.1 { + unsafe { interrupts::sti(); } + } // TODO: Enable preempt } From 56960dd48a44a41ea781b2fff0beaa058e73eb31 Mon Sep 17 00:00:00 2001 From: roblabla Date: Mon, 28 Jan 2019 17:44:37 +0000 Subject: [PATCH 02/15] Makefile: support multiple targets, use profiles for debug/release - Instead of duplicating rules for debug and release, we instead use profiles to determine whether we should build in Debug or Relase mode. We do this by adding two global env variables (TARGET_PATH and RELEASE_FLAG). TARGET_PATH should contain the PATH relative to target/ where we can find the compiled binaries. RELEASE_FLAG should contain --release in release mode, and a noop flag under debug mode. We use --message-format=human as a noop flag. - We also support building for different targets using the TARGET environment variable, which defaults to i386-unknown-none. --- .travis.yml | 2 +- Makefile.toml | 123 ++++++++++++-------------------------------------- 2 files changed, 31 insertions(+), 94 deletions(-) diff --git a/.travis.yml b/.travis.yml index 7088f2a0e..47c96e702 100644 --- a/.travis.yml +++ b/.travis.yml @@ -17,7 +17,7 @@ before_install: script: - cargo make iso -- cargo make iso-release +- cargo make iso --profile production - cargo make test - cargo make doc-full - cargo make clippy-ci diff --git a/Makefile.toml b/Makefile.toml index 9b2be2407..d25e25449 100644 --- a/Makefile.toml +++ b/Makefile.toml @@ -5,6 +5,14 @@ skip_core_tasks = true RUST_TARGET_PATH = "${PWD}" GDB_PORT = { script = ["echo ${GDB_PORT:-9090}"] } VNC_PORT = { script = ["echo ${VNC_PORT:-:0}"] } +TARGET = { script = ["echo ${TARGET:-i386-unknown-none}"] } +TARGET_PATH = "${TARGET}/debug" +# TODO: Find better work around cargo build not accepting empty arguments +RELEASE_FLAG = "--message-format=human" + +[env.production] +TARGET_PATH = "${TARGET}/release" +RELEASE_FLAG = "--release" [tasks.bootstrap-linker] workspace = false @@ -26,142 +34,71 @@ install_crate = { crate_name = "mkisofs-rs", binary = "mkisofs-rs", test_arg = " [tasks.bootstrap] workspace = false -description = "Compiles the i386 bootstrap for debug" -dependencies = ["bootstrap-linker", "install-rust-src"] -command = "cargo" -args = ["xbuild", "--target=i386-unknown-none", "--package=kfs-bootstrap" ] - -[tasks.bootstrap-release] -workspace = false -description = "Compiles the i386 bootstrap for release" +description = "Compiles the i386 bootstrap" dependencies = ["bootstrap-linker", "install-rust-src"] command = "cargo" -args = ["xbuild", "--target=i386-unknown-none", "--package=kfs-bootstrap", "--release" ] +args = ["xbuild", "--target=${TARGET}", "--package=kfs-bootstrap", "${RELEASE_FLAG}"] [tasks.kernel] workspace = false -description = "Compiles the kernel for debug" -dependencies = ["kernel-linker", "install-rust-src"] -command = "cargo" -args = ["xbuild", "--target=i386-unknown-none", "--package=kfs-kernel", "-Z", "package-features", "--features=panic-on-exception"] - -[tasks.kernel-release] -workspace = false -description = "Compiles the kernel for release" +description = "Compiles the kernel" dependencies = ["kernel-linker", "install-rust-src"] command = "cargo" -args = ["xbuild", "--target=i386-unknown-none", "--package=kfs-kernel", "--release" ] +args = ["xbuild", "--target=${TARGET}", "--package=kfs-kernel", "-Z", "package-features", "--features=panic-on-exception", "${RELEASE_FLAG}"] [tasks.vi] workspace = false description = "Compiles kfs-vi" dependencies = ["userspace-linker", "install-rust-src"] command = "cargo" -args = ["xbuild", "--target=i386-unknown-none", "--package=kfs-vi"] - -[tasks.vi-release] -workspace = false -description = "Compiles kfs-vi" -dependencies = ["userspace-linker", "install-rust-src"] -command = "cargo" -args = ["xbuild", "--target=i386-unknown-none", "--package=kfs-vi", "--release"] +args = ["xbuild", "--target=${TARGET}", "--package=kfs-vi", "${RELEASE_FLAG}"] [tasks.sm] workspace = false description = "Compiles kfs-sm" dependencies = ["userspace-linker", "install-rust-src"] command = "cargo" -args = ["xbuild", "--target=i386-unknown-none", "--package=kfs-sm"] - -[tasks.sm-release] -workspace = false -description = "Compiles kfs-sm" -dependencies = ["userspace-linker", "install-rust-src"] -command = "cargo" -args = ["xbuild", "--target=i386-unknown-none", "--package=kfs-sm", "--release"] +args = ["xbuild", "--target=${TARGET}", "--package=kfs-sm", "${RELEASE_FLAG}"] [tasks.shell] workspace = false description = "Compiles kfs-shell" dependencies = ["userspace-linker", "install-rust-src"] command = "cargo" -args = ["xbuild", "--target=i386-unknown-none", "--package=kfs-shell"] - -[tasks.shell-release] -workspace = false -description = "Compiles kfs-shell" -dependencies = ["userspace-linker", "install-rust-src"] -command = "cargo" -args = ["xbuild", "--target=i386-unknown-none", "--package=kfs-shell", "--release"] +args = ["xbuild", "--target=${TARGET}", "--package=kfs-shell", "${RELEASE_FLAG}"] [tasks.clock] workspace = false description = "Compiles kfs-clock" dependencies = ["userspace-linker", "install-rust-src"] command = "cargo" -args = ["xbuild", "--target=i386-unknown-none", "--package=kfs-clock"] - -[tasks.clock-release] -workspace = false -description = "Compiles kfs-clock" -dependencies = ["userspace-linker", "install-rust-src"] -command = "cargo" -args = ["xbuild", "--target=i386-unknown-none", "--package=kfs-clock", "--release"] +args = ["xbuild", "--target=${TARGET}", "--package=kfs-clock", "${RELEASE_FLAG}"] [tasks.ahci] workspace = false description = "Compiles kfs-ahci" dependencies = ["userspace-linker", "install-rust-src"] command = "cargo" -args = ["xbuild", "--target=i386-unknown-none", "--package=kfs-ahci"] - -[tasks.ahci-release] -workspace = false -description = "Compiles kfs-ahci" -dependencies = ["userspace-linker", "install-rust-src"] -command = "cargo" -args = ["xbuild", "--target=i386-unknown-none", "--package=kfs-ahci", "--release"] +args = ["xbuild", "--target=${TARGET}", "--package=kfs-ahci", "${RELEASE_FLAG}"] [tasks.userspace] workspace = false description = "Compiles userspace apps" dependencies = ["shell", "clock", "sm", "vi", "ahci"] -[tasks.userspace-release] -workspace = false -description = "Compiles userspace apps" -dependencies = ["shell-release", "clock-release", "sm-release", "vi-release", "ahci-release"] - [tasks.iso] workspace = false description = "Creates a bootable ISO containing the kernel and grub." dependencies = ["bootstrap", "kernel", "userspace", "install-mkisofs-rs"] script = [ ''' -cp target/i386-unknown-none/debug/kfs-bootstrap isofiles/boot/ -cp target/i386-unknown-none/debug/kfs-kernel isofiles/boot/ -cp target/i386-unknown-none/debug/kfs-shell isofiles/boot/ -cp target/i386-unknown-none/debug/kfs-clock isofiles/boot/ -cp target/i386-unknown-none/debug/kfs-sm isofiles/boot/ -cp target/i386-unknown-none/debug/kfs-vi isofiles/boot/ -cp target/i386-unknown-none/debug/kfs-ahci isofiles/boot/ -mkisofs-rs external/grub/isofiles isofiles -o os.iso -b boot/grub/i386-pc/eltorito.img --no-emul-boot --boot-info-table --embedded-boot external/grub/embedded.img -''' -] - -[tasks.iso-release] -workspace = false -description = "Creates a bootable ISO containing the kernel and grub." -dependencies = ["bootstrap-release", "kernel-release", "userspace-release", "install-mkisofs-rs"] -script = [ -''' -cp target/i386-unknown-none/release/kfs-bootstrap isofiles/boot/ -cp target/i386-unknown-none/release/kfs-kernel isofiles/boot/ -cp target/i386-unknown-none/release/kfs-shell isofiles/boot/ -cp target/i386-unknown-none/release/kfs-clock isofiles/boot/ -cp target/i386-unknown-none/release/kfs-sm isofiles/boot/ -cp target/i386-unknown-none/release/kfs-vi isofiles/boot/ -cp target/i386-unknown-none/release/kfs-ahci isofiles/boot/ +cp target/${TARGET_PATH}/kfs-bootstrap isofiles/boot/ +cp target/${TARGET_PATH}/kfs-kernel isofiles/boot/ +cp target/${TARGET_PATH}/kfs-shell isofiles/boot/ +cp target/${TARGET_PATH}/kfs-clock isofiles/boot/ +cp target/${TARGET_PATH}/kfs-sm isofiles/boot/ +cp target/${TARGET_PATH}/kfs-vi isofiles/boot/ +cp target/${TARGET_PATH}/kfs-ahci isofiles/boot/ mkisofs-rs external/grub/isofiles isofiles -o os.iso -b boot/grub/i386-pc/eltorito.img --no-emul-boot --boot-info-table --embedded-boot external/grub/embedded.img ''' ] @@ -169,7 +106,7 @@ mkisofs-rs external/grub/isofiles isofiles -o os.iso -b boot/grub/i386-pc/eltori [tasks.qemu] workspace = false description = "Runs the bootable ISO in qemu." -dependencies = ["iso-release"] +dependencies = ["iso"] command = "qemu-system-i386" args = ["-cdrom", "os.iso", "-serial", "stdio", "-vnc", "${VNC_PORT}", "-no-reboot", "-enable-kvm"] @@ -185,14 +122,14 @@ workspace = false description = "Generate the project's documentation" env = { "RUSTFLAGS" = "--sysroot=${PWD}/target/sysroot", "RUSTDOCFLAGS" = "--sysroot=${PWD}/target/sysroot"} command = "cargo" -args = ["doc", "--target=i386-unknown-none", "--no-deps" ] +args = ["doc", "--target=${TARGET}", "--no-deps" ] [tasks.doc-full] workspace = false description = "Generate the project's documentation, including private items" env = { "RUSTFLAGS" = "--sysroot=${PWD}/target/sysroot", "RUSTDOCFLAGS" = "--sysroot=${PWD}/target/sysroot -Z unstable-options --enable-index-page"} command = "cargo" -args = ["doc", "--target=i386-unknown-none", "--no-deps", "--document-private-items"] +args = ["doc", "--target=${TARGET}", "--no-deps", "--document-private-items"] [tasks.deploy-doc] workspace = false @@ -200,7 +137,7 @@ install_crate = { crate_name = "cargo-travis", binary = "cargo", test_arg = "doc install_crate_args = ["--git", "https://github.com/roblabla/cargo-travis", "--rev", "doc-upload-target"] description = "Upload this project's documentation on github pages. Should only run on CI." command = "cargo" -args = ["doc-upload", "--target=i386-unknown-none", "--clobber-index"] +args = ["doc-upload", "--target=${TARGET}", "--clobber-index"] [tasks.test] workspace = false @@ -214,7 +151,7 @@ dependencies = ["install-rust-src"] install_crate = { rustup_component_name = "clippy" } description = "Run clippy" command = "cargo" -args = ["xclippy", "--target=i386-unknown-none", "--", +args = ["xclippy", "--target=${TARGET}", "--", "-A", "clippy::redundant_field_names", "-A", "clippy::unreadable_literal", "-A", "clippy::identity_op", @@ -238,7 +175,7 @@ dependencies = ["install-rust-src"] install_crate = { rustup_component_name = "clippy" } description = "Run clippy" command = "cargo" -args = ["xclippy", "--target=i386-unknown-none", "--", +args = ["xclippy", "--target=${TARGET}", "--", "-A", "clippy::redundant_field_names", "-A", "clippy::unreadable_literal", "-A", "clippy::identity_op", From bc9cba47d94d59aec8b3b122a1a0e37fa67464be Mon Sep 17 00:00:00 2001 From: roblabla Date: Tue, 29 Jan 2019 16:58:40 +0000 Subject: [PATCH 03/15] Kernel: move i386 module to arch::i386 Start refactoring the i386 module into an architecture abstraction module. The arch module should export functions abstracting common operations across multiple architectures, and expose a module containing the architecture-specific functionality. For now, the architecture abstraction API contains two functions: - enable_interrupts, which allow hardware to call event::dispatch_event - disable_interrupts, which tells hardware to queue interruptions. --- kernel/src/{ => arch}/i386/gdt.rs | 10 +-- kernel/src/{ => arch}/i386/mod.rs | 23 ++++- kernel/src/{ => arch}/i386/multiboot.rs | 0 kernel/src/{ => arch}/i386/process_switch.rs | 4 +- kernel/src/{ => arch}/i386/registers.rs | 0 kernel/src/{ => arch}/i386/stack.rs | 0 kernel/src/{ => arch}/i386/structures/gdt.rs | 2 +- kernel/src/{ => arch}/i386/structures/idt.rs | 12 +-- kernel/src/{ => arch}/i386/structures/mod.rs | 0 kernel/src/arch/mod.rs | 15 ++++ kernel/src/devices/pic.rs | 2 +- kernel/src/devices/pit.rs | 2 +- kernel/src/devices/rs232.rs | 2 +- kernel/src/i386/pio.rs | 95 -------------------- kernel/src/interrupts/irq.rs | 2 +- kernel/src/interrupts/mod.rs | 8 +- kernel/src/interrupts/syscalls.rs | 2 +- kernel/src/log_impl/mod.rs | 2 +- kernel/src/main.rs | 14 ++- kernel/src/process.rs | 2 +- kernel/src/scheduler.rs | 6 +- kernel/src/sync.rs | 21 ++--- 22 files changed, 77 insertions(+), 147 deletions(-) rename kernel/src/{ => arch}/i386/gdt.rs (98%) rename kernel/src/{ => arch}/i386/mod.rs (93%) rename kernel/src/{ => arch}/i386/multiboot.rs (100%) rename kernel/src/{ => arch}/i386/process_switch.rs (99%) rename kernel/src/{ => arch}/i386/registers.rs (100%) rename kernel/src/{ => arch}/i386/stack.rs (100%) rename kernel/src/{ => arch}/i386/structures/gdt.rs (97%) rename kernel/src/{ => arch}/i386/structures/idt.rs (98%) rename kernel/src/{ => arch}/i386/structures/mod.rs (100%) create mode 100644 kernel/src/arch/mod.rs delete mode 100644 kernel/src/i386/pio.rs diff --git a/kernel/src/i386/gdt.rs b/kernel/src/arch/i386/gdt.rs similarity index 98% rename from kernel/src/i386/gdt.rs rename to kernel/src/arch/i386/gdt.rs index 32fe7c305..caf476a82 100644 --- a/kernel/src/i386/gdt.rs +++ b/kernel/src/arch/i386/gdt.rs @@ -12,10 +12,10 @@ use core::ops::{Deref, DerefMut}; use core::slice; use core::fmt; -use crate::i386::{PrivilegeLevel, TssStruct}; -use crate::i386::structures::gdt::SegmentSelector; -use crate::i386::instructions::tables::{lgdt, sgdt, DescriptorTablePointer}; -use crate::i386::instructions::segmentation::*; +use crate::arch::i386::{PrivilegeLevel, TssStruct}; +use crate::arch::i386::structures::gdt::SegmentSelector; +use crate::arch::i386::instructions::tables::{lgdt, sgdt, DescriptorTablePointer}; +use crate::arch::i386::instructions::segmentation::*; use crate::paging::PAGE_SIZE; use crate::paging::{MappingAccessRights, kernel_memory::get_kernel_memory}; @@ -39,7 +39,7 @@ static GLOBAL_LDT: Once = Once::new(); /// This function should only be called once. Further calls will be silently /// ignored. pub fn init_gdt() { - use crate::i386::instructions::tables::{lldt, ltr}; + use crate::arch::i386::instructions::tables::{lldt, ltr}; let ldt = GLOBAL_LDT.call_once(DescriptorTable::new); diff --git a/kernel/src/i386/mod.rs b/kernel/src/arch/i386/mod.rs similarity index 93% rename from kernel/src/i386/mod.rs rename to kernel/src/arch/i386/mod.rs index aa072304c..b97725fff 100644 --- a/kernel/src/i386/mod.rs +++ b/kernel/src/arch/i386/mod.rs @@ -28,7 +28,7 @@ pub mod instructions { pub mod tables { //! Instructions for loading descriptor tables (GDT, IDT, etc.). - use crate::i386::structures::gdt::SegmentSelector; + use crate::arch::i386::structures::gdt::SegmentSelector; /// A struct describing a pointer to a descriptor table (GDT / IDT). /// This is in a format suitable for giving to 'lgdt' or 'lidt'. @@ -81,7 +81,7 @@ pub mod instructions { pub mod segmentation { //! Provides functions to read and write segment registers. - use crate::i386::structures::gdt::SegmentSelector; + use crate::arch::i386::structures::gdt::SegmentSelector; /// Reload code segment register. /// Note this is special since we can not directly move @@ -147,7 +147,7 @@ pub mod instructions { /// Returns whether interrupts are enabled. pub fn are_enabled() -> bool { - use crate::i386::registers::eflags::{self, EFlags}; + use crate::arch::i386::registers::eflags::{self, EFlags}; eflags::read().contains(EFlags::INTERRUPT_FLAG) } @@ -442,3 +442,20 @@ impl DerefMut for AlignedTssStruct { &mut self.0 } } + +// START ARCH API HERE +/// Enable interruptions. After calling this function, hardware should call +/// [crate::event::dispatch_event] whenever it receives an interruption. +pub unsafe fn enable_interrupts() { + instructions::interrupts::sti(); +} + +/// Disable interruptions, returning true if they were previously enabled, or +/// false if they were already disabled. After calling this function, no hardware +/// should call [crate::event::dispatch_event]. Interruptions should be queued +/// until either [enable_interrupts] is called or a process switch is performed. +pub unsafe fn disable_interrupts() -> bool { + let backup = instructions::interrupts::are_enabled(); + instructions::interrupts::cli(); + backup +} diff --git a/kernel/src/i386/multiboot.rs b/kernel/src/arch/i386/multiboot.rs similarity index 100% rename from kernel/src/i386/multiboot.rs rename to kernel/src/arch/i386/multiboot.rs diff --git a/kernel/src/i386/process_switch.rs b/kernel/src/arch/i386/process_switch.rs similarity index 99% rename from kernel/src/i386/process_switch.rs rename to kernel/src/arch/i386/process_switch.rs index 067db278f..f8ef21cd1 100644 --- a/kernel/src/i386/process_switch.rs +++ b/kernel/src/arch/i386/process_switch.rs @@ -3,10 +3,10 @@ //! This modules describe low-level functions and structures needed to perform a process switch use crate::process::ThreadStruct; -use crate::i386::gdt; +use crate::arch::i386::gdt; use alloc::sync::Arc; use core::mem::size_of; -use crate::i386::TssStruct; +use crate::arch::i386::TssStruct; /// The hardware context of a paused thread. It contains just enough registers to get the thread /// running again. diff --git a/kernel/src/i386/registers.rs b/kernel/src/arch/i386/registers.rs similarity index 100% rename from kernel/src/i386/registers.rs rename to kernel/src/arch/i386/registers.rs diff --git a/kernel/src/i386/stack.rs b/kernel/src/arch/i386/stack.rs similarity index 100% rename from kernel/src/i386/stack.rs rename to kernel/src/arch/i386/stack.rs diff --git a/kernel/src/i386/structures/gdt.rs b/kernel/src/arch/i386/structures/gdt.rs similarity index 97% rename from kernel/src/i386/structures/gdt.rs rename to kernel/src/arch/i386/structures/gdt.rs index e5e823be4..baa6d6997 100644 --- a/kernel/src/i386/structures/gdt.rs +++ b/kernel/src/arch/i386/structures/gdt.rs @@ -1,7 +1,7 @@ //! Types for the Global Descriptor Table and segment selectors. use core::fmt; -use crate::i386::PrivilegeLevel; +use crate::arch::i386::PrivilegeLevel; use bit_field::BitField; /// Specifies which element to load into a segment from diff --git a/kernel/src/i386/structures/idt.rs b/kernel/src/arch/i386/structures/idt.rs similarity index 98% rename from kernel/src/i386/structures/idt.rs rename to kernel/src/arch/i386/structures/idt.rs index 567dc5678..2438b11bc 100644 --- a/kernel/src/i386/structures/idt.rs +++ b/kernel/src/arch/i386/structures/idt.rs @@ -14,12 +14,12 @@ use core::marker::PhantomData; use core::mem; use core::ops::{Index, IndexMut}; use bit_field::BitField; -use crate::i386::{AlignedTssStruct, TssStruct, PrivilegeLevel}; +use crate::arch::i386::{AlignedTssStruct, TssStruct, PrivilegeLevel}; use crate::mem::VirtualAddress; use crate::paging::{PAGE_SIZE, kernel_memory::get_kernel_memory}; use alloc::boxed::Box; -use crate::i386::gdt; -use crate::i386::structures::gdt::SegmentSelector; +use crate::arch::i386::gdt; +use crate::arch::i386::structures::gdt::SegmentSelector; /// An Interrupt Descriptor Table with 256 entries. /// @@ -427,7 +427,7 @@ impl Idt { /// Loads the IDT in the CPU using the `lidt` command. pub fn load(&'static self) { - use crate::i386::instructions::tables::{lidt, DescriptorTablePointer}; + use crate::arch::i386::instructions::tables::{lidt, DescriptorTablePointer}; use core::mem::size_of; let ptr = DescriptorTablePointer { @@ -576,7 +576,7 @@ impl IdtEntry { /// The function returns a mutable reference to the entry's options that allows /// further customization. pub unsafe fn set_interrupt_gate_addr(&mut self, addr: u32) -> &mut EntryOptions { - use crate::i386::instructions::segmentation; + use crate::arch::i386::instructions::segmentation; self.pointer_low = addr as u16; self.pointer_high = (addr >> 16) as u16; @@ -652,7 +652,7 @@ impl_set_handler_fn!(PageFaultHandlerFunc); /// /// Technically, this represents a subset of [SystemDescriptorTypes]. /// -/// [SystemDescriptorTypes]: crate::i386::gdt::SystemDescriptorTypes +/// [SystemDescriptorTypes]: crate::arch::i386::gdt::SystemDescriptorTypes #[derive(Debug, Copy, Clone, PartialEq, Eq)] #[repr(u8)] #[allow(clippy::missing_docs_in_private_items)] diff --git a/kernel/src/i386/structures/mod.rs b/kernel/src/arch/i386/structures/mod.rs similarity index 100% rename from kernel/src/i386/structures/mod.rs rename to kernel/src/arch/i386/structures/mod.rs diff --git a/kernel/src/arch/mod.rs b/kernel/src/arch/mod.rs new file mode 100644 index 000000000..4ec29345e --- /dev/null +++ b/kernel/src/arch/mod.rs @@ -0,0 +1,15 @@ +//! Arch-specific API +//! +//! This module contains the architecture abstraction API, and a module with the +//! arch-specific APIs. For instance, the `i386` module contains i386-specific +//! APIs and is only present when building the kernel for the i386 architecture. +//! As such, it is required to gate access to those APIs behind a `cfg`, to avoid +//! breaking builds on other architectures. + +#[cfg(target_arch = "x86")] +pub mod i386; +#[cfg(target_arch = "x86")] +use self::i386 as arch; + +// Reexport public API +pub use self::arch::{enable_interrupts, disable_interrupts}; diff --git a/kernel/src/devices/pic.rs b/kernel/src/devices/pic.rs index bd207c88c..4dbc2d0de 100644 --- a/kernel/src/devices/pic.rs +++ b/kernel/src/devices/pic.rs @@ -3,7 +3,7 @@ //! Only handles the usual case of two PICs in a cascading setup, where the //! SLAVE is setup to cascade to the line 2 of the MASTER. -use crate::i386::pio::Pio; +use crate::arch::i386::pio::Pio; use crate::io::Io; use crate::sync::{Once, SpinLockIRQ}; diff --git a/kernel/src/devices/pit.rs b/kernel/src/devices/pit.rs index e26656d1c..e4bbca3c1 100644 --- a/kernel/src/devices/pit.rs +++ b/kernel/src/devices/pit.rs @@ -54,7 +54,7 @@ use crate::sync::SpinLock; use crate::io::Io; -use crate::i386::pio::Pio; +use crate::arch::i386::pio::Pio; use crate::event::{self, IRQEvent, Waitable}; use crate::utils::div_ceil; diff --git a/kernel/src/devices/rs232.rs b/kernel/src/devices/rs232.rs index 95a6984a9..9e03e5091 100644 --- a/kernel/src/devices/rs232.rs +++ b/kernel/src/devices/rs232.rs @@ -3,7 +3,7 @@ use core::fmt::{Display, Write, Error, Formatter}; use crate::sync::{Once, SpinLock}; use crate::io::Io; -use crate::i386::pio::Pio; +use crate::arch::i386::pio::Pio; /// The base IO port of a COM #[derive(Debug, Copy, Clone)] diff --git a/kernel/src/i386/pio.rs b/kernel/src/i386/pio.rs deleted file mode 100644 index daeedb56d..000000000 --- a/kernel/src/i386/pio.rs +++ /dev/null @@ -1,95 +0,0 @@ -//! Port IO -//! -//! Copied from [redox pio](https://gitlab.redox-os.org/redox-os/syscall/blob/master/src/io/pio.rs) - -use core::marker::PhantomData; - -use ::io::Io; - -/// Generic PIO -#[derive(Copy, Clone, Debug)] -pub struct Pio { - /// The io port address. - port: u16, - /// The width of the port. - value: PhantomData, -} - -impl Pio { - /// Create a PIO from a given port - pub const fn new(port: u16) -> Self { - Pio:: { - port: port, - value: PhantomData, - } - } -} - -/// Read/Write for byte PIO -impl Io for Pio { - type Value = u8; - - /// Read - #[inline(always)] - fn read(&self) -> u8 { - let value: u8; - unsafe { - asm!("in $0, $1" : "={al}"(value) : "{dx}"(self.port) : "memory" : "intel", "volatile"); - } - value - } - - /// Write - #[inline(always)] - fn write(&mut self, value: u8) { - unsafe { - asm!("out $1, $0" : : "{al}"(value), "{dx}"(self.port) : "memory" : "intel", "volatile"); - } - } -} - -/// Read/Write for word PIO -impl Io for Pio { - type Value = u16; - - /// Read - #[inline(always)] - fn read(&self) -> u16 { - let value: u16; - unsafe { - asm!("in $0, $1" : "={ax}"(value) : "{dx}"(self.port) : "memory" : "intel", "volatile"); - } - value - } - - /// Write - #[inline(always)] - fn write(&mut self, value: u16) { - unsafe { - asm!("out $1, $0" : : "{ax}"(value), "{dx}"(self.port) : "memory" : "intel", "volatile"); - } - } -} - -/// Read/Write for doubleword PIO -impl Io for Pio { - type Value = u32; - - /// Read - #[inline(always)] - fn read(&self) -> u32 { - let value: u32; - unsafe { - asm!("in $0, $1" : "={eax}"(value) : "{dx}"(self.port) : "memory" : "intel", "volatile"); - } - value - } - - /// Write - #[inline(always)] - fn write(&mut self, value: u32) { - unsafe { - asm!("out $1, $0" : : "{eax}"(value), "{dx}"(self.port) : "memory" : "intel", "volatile"); - } - } -} \ No newline at end of file diff --git a/kernel/src/interrupts/irq.rs b/kernel/src/interrupts/irq.rs index 24fa71e22..866476299 100644 --- a/kernel/src/interrupts/irq.rs +++ b/kernel/src/interrupts/irq.rs @@ -7,7 +7,7 @@ //! defined in the event module. It is expected that these pointer will then be //! inserted in an architecture-specific interrupt table (such as i386's IDT). -use crate::i386::structures::idt::ExceptionStackFrame; +use crate::arch::i386::structures::idt::ExceptionStackFrame; use crate::devices::pic; #[allow(clippy::missing_docs_in_private_items)] diff --git a/kernel/src/interrupts/mod.rs b/kernel/src/interrupts/mod.rs index 670d509b1..a34e75723 100644 --- a/kernel/src/interrupts/mod.rs +++ b/kernel/src/interrupts/mod.rs @@ -5,12 +5,12 @@ //! Feature `panic-on-exception` makes the kernel stop and panic when a thread generates //! an exception. This is useful for debugging. -use crate::i386::structures::idt::{ExceptionStackFrame, PageFaultErrorCode, Idt}; -use crate::i386::instructions::interrupts::sti; +use crate::arch::i386::structures::idt::{ExceptionStackFrame, PageFaultErrorCode, Idt}; +use crate::arch::i386::instructions::interrupts::sti; use crate::mem::VirtualAddress; use crate::paging::kernel_memory::get_kernel_memory; -use crate::i386::{TssStruct, PrivilegeLevel}; -use crate::i386::gdt; +use crate::arch::i386::{TssStruct, PrivilegeLevel}; +use crate::arch::i386::gdt; use crate::scheduler::get_current_thread; use crate::process::{ProcessStruct, ThreadState}; use crate::sync::SpinLockIRQ; diff --git a/kernel/src/interrupts/syscalls.rs b/kernel/src/interrupts/syscalls.rs index 4a09fb99b..9fdd5da20 100644 --- a/kernel/src/interrupts/syscalls.rs +++ b/kernel/src/interrupts/syscalls.rs @@ -1,6 +1,6 @@ //! Syscall implementations -use crate::i386; +use crate::arch::i386; use crate::mem::{VirtualAddress, PhysicalAddress}; use crate::mem::{UserSpacePtr, UserSpacePtrMut}; use crate::paging::{MappingAccessRights, mapping::MappingType}; diff --git a/kernel/src/log_impl/mod.rs b/kernel/src/log_impl/mod.rs index b839b638d..d1dedad30 100644 --- a/kernel/src/log_impl/mod.rs +++ b/kernel/src/log_impl/mod.rs @@ -5,7 +5,7 @@ mod filter; use log::{self, Log, Metadata, Record, LevelFilter}; use crate::devices::rs232::SerialLogger; use core::fmt::Write; -use crate::i386::multiboot::get_boot_information; +use crate::arch::i386::multiboot::get_boot_information; use crate::sync::{RwLock, Once}; use crate::scheduler; diff --git a/kernel/src/main.rs b/kernel/src/main.rs index 1164e6f93..d974a5bbf 100644 --- a/kernel/src/main.rs +++ b/kernel/src/main.rs @@ -49,13 +49,11 @@ use core::fmt::Write; use alloc::prelude::*; use crate::utils::io; +pub mod arch; pub mod paging; pub mod event; pub mod error; pub mod log_impl; -#[cfg(any(target_arch = "x86", test))] -#[macro_use] -pub mod i386; pub mod interrupts; pub mod frame_allocator; @@ -82,7 +80,7 @@ pub use crate::heap_allocator::rust_oom; #[global_allocator] static ALLOCATOR: heap_allocator::Allocator = heap_allocator::Allocator::new(); -use crate::i386::stack; +use crate::arch::i386::stack; use crate::paging::{PAGE_SIZE, MappingAccessRights}; use crate::mem::VirtualAddress; use crate::process::{ProcessStruct, ThreadStruct}; @@ -128,7 +126,7 @@ unsafe fn force_double_fault() { /// From now on, the kernel's only job will be to respond to IRQs and serve syscalls. fn main() { info!("Loading all the init processes"); - for module in i386::multiboot::get_boot_information().module_tags().skip(1) { + for module in crate::arch::i386::multiboot::get_boot_information().module_tags().skip(1) { info!("Loading {}", module.name()); let mapped_module = elf_loader::map_grub_module(module) .unwrap_or_else(|_| panic!("Unable to find available memory for module {}", module.name())); @@ -213,10 +211,10 @@ pub extern "C" fn common_start(multiboot_info_addr: usize) -> ! { // Set up (read: inhibit) the GDT. info!("Initializing gdt..."); - i386::gdt::init_gdt(); + crate::arch::i386::gdt::init_gdt(); info!("Gdt initialized"); - i386::multiboot::init(boot_info); + crate::arch::i386::multiboot::init(boot_info); log_impl::init(); @@ -291,7 +289,7 @@ unsafe fn do_panic(msg: core::fmt::Arguments<'_>, stackdump_source: Option(current_thread: Arc, unsafe { // this is a new process, no SpinLockIRQ is held - crate::i386::instructions::interrupts::sti(); + crate::arch::i386::instructions::interrupts::sti(); } jump_to_entrypoint() diff --git a/kernel/src/sync.rs b/kernel/src/sync.rs index fe93f1b73..ad6244ae8 100644 --- a/kernel/src/sync.rs +++ b/kernel/src/sync.rs @@ -8,7 +8,7 @@ use core::fmt; use core::mem::ManuallyDrop; use core::ops::{Deref, DerefMut}; pub use self::spin::{Mutex as SpinLock, MutexGuard as SpinLockGuard}; -use crate::i386::instructions::interrupts; +use crate::arch::{enable_interrupts as arch_enable_interrupts, disable_interrupts as arch_disable_interrupts}; use core::sync::atomic::{AtomicBool, Ordering}; /// Placeholder for future Mutex implementation. @@ -31,7 +31,7 @@ static INTERRUPT_DISARM: AtomicBool = AtomicBool::new(false); /// Simply sets [INTERRUPT_DISARM]. pub unsafe fn permanently_disable_interrupts() { INTERRUPT_DISARM.store(true, Ordering::SeqCst); - unsafe { interrupts::cli() } + unsafe { arch_disable_interrupts(); } } /// SpinLock that disables IRQ. @@ -91,11 +91,9 @@ impl SpinLockIRQ { let internalguard = self.internal.lock(); SpinLockIRQGuard(ManuallyDrop::new(internalguard), false) } else { - // Save current interrupt state. - let saved_intpt_flag = interrupts::are_enabled(); + // Save current interrupt state and disable interruptions. + let saved_intpt_flag = unsafe { arch_disable_interrupts() }; - // Disable interruptions - unsafe { interrupts::cli(); } let internalguard = self.internal.lock(); SpinLockIRQGuard(ManuallyDrop::new(internalguard), saved_intpt_flag) @@ -108,11 +106,8 @@ impl SpinLockIRQ { self.internal.try_lock() .map(|v| SpinLockIRQGuard(ManuallyDrop::new(v), false)) } else { - // Save current interrupt state. - let saved_intpt_flag = interrupts::are_enabled(); - - // Disable interruptions - unsafe { interrupts::cli(); } + // Save current interrupt state and disable interruptions. + let saved_intpt_flag = unsafe { arch_disable_interrupts() }; // Lock spinlock let internalguard = self.internal.try_lock(); @@ -123,7 +118,7 @@ impl SpinLockIRQ { } else { // Else, restore interrupt state if saved_intpt_flag { - unsafe { interrupts::sti(); } + unsafe { arch_enable_interrupts() }; } None } @@ -161,7 +156,7 @@ impl<'a, T: ?Sized + 'a> Drop for SpinLockIRQGuard<'a, T> { // Restore irq if self.1 { - unsafe { interrupts::sti(); } + unsafe { arch_enable_interrupts(); } } // TODO: Enable preempt From f5be039639235a82ff42a0a4e343b7da399db45f Mon Sep 17 00:00:00 2001 From: roblabla Date: Wed, 30 Jan 2019 01:42:48 +0000 Subject: [PATCH 04/15] Add stub arch for documentation and tests. The stub arch is a noop architecture where all the functions do nothing. It's mostly meant to be used as a centralized place for the documentation of the arch abstraction API, but can also be used for tests and easily bootstrapping a port to a new architecture. --- Cargo.lock | 1 + kernel/Cargo.toml | 1 + kernel/src/arch/i386/mod.rs | 10 +++------- kernel/src/arch/mod.rs | 17 +++++++++++++---- kernel/src/arch/stub/mod.rs | 19 +++++++++++++++++++ 5 files changed, 37 insertions(+), 11 deletions(-) create mode 100644 kernel/src/arch/stub/mod.rs diff --git a/Cargo.lock b/Cargo.lock index d71c580c6..153248a1c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -135,6 +135,7 @@ dependencies = [ "bitfield 0.13.1 (registry+https://github.com/rust-lang/crates.io-index)", "bitflags 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)", "byteorder 1.3.1 (registry+https://github.com/rust-lang/crates.io-index)", + "cfg-if 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)", "failure 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)", "hashbrown 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)", "kfs-libkern 0.1.0", diff --git a/kernel/Cargo.toml b/kernel/Cargo.toml index f6d90a0c7..436deb4b0 100644 --- a/kernel/Cargo.toml +++ b/kernel/Cargo.toml @@ -32,6 +32,7 @@ rustc-demangle = "0.1" failure = { version = "0.1", default-features = false, features = ["derive"] } bitfield = "0.13" mashup = "0.1.9" +cfg-if = "0.1" [dependencies.smallvec] default-features = false diff --git a/kernel/src/arch/i386/mod.rs b/kernel/src/arch/i386/mod.rs index b97725fff..7ce98a7e1 100644 --- a/kernel/src/arch/i386/mod.rs +++ b/kernel/src/arch/i386/mod.rs @@ -444,17 +444,13 @@ impl DerefMut for AlignedTssStruct { } // START ARCH API HERE -/// Enable interruptions. After calling this function, hardware should call -/// [crate::event::dispatch_event] whenever it receives an interruption. +/// See [arch::stub::enable_interrupts] pub unsafe fn enable_interrupts() { instructions::interrupts::sti(); } -/// Disable interruptions, returning true if they were previously enabled, or -/// false if they were already disabled. After calling this function, no hardware -/// should call [crate::event::dispatch_event]. Interruptions should be queued -/// until either [enable_interrupts] is called or a process switch is performed. -pub unsafe fn disable_interrupts() -> bool { +/// See [arch::stub::disable_interrupts] +pub unsafe fn disable_interrupts() { let backup = instructions::interrupts::are_enabled(); instructions::interrupts::cli(); backup diff --git a/kernel/src/arch/mod.rs b/kernel/src/arch/mod.rs index 4ec29345e..26e6ec118 100644 --- a/kernel/src/arch/mod.rs +++ b/kernel/src/arch/mod.rs @@ -6,10 +6,19 @@ //! As such, it is required to gate access to those APIs behind a `cfg`, to avoid //! breaking builds on other architectures. -#[cfg(target_arch = "x86")] -pub mod i386; -#[cfg(target_arch = "x86")] -use self::i386 as arch; +use cfg_if::cfg_if; + +// Unconditionally include stub for documentation purposes. +pub mod stub; + +cfg_if! { + if #[cfg(target_arch = "x86")] { + pub mod i386; + use self::i386 as arch; + } else { + use self::stub as arch; + } +} // Reexport public API pub use self::arch::{enable_interrupts, disable_interrupts}; diff --git a/kernel/src/arch/stub/mod.rs b/kernel/src/arch/stub/mod.rs new file mode 100644 index 000000000..a7a8ccae4 --- /dev/null +++ b/kernel/src/arch/stub/mod.rs @@ -0,0 +1,19 @@ +//! Stub implementation of the arch-independant API +//! +//! This is the stub implementation of the arch-independant API. Its aim is to +//! ease porting efforts by providing a copy-pastable module to start a new +//! implementation of the arch-specific component, and to provide the test builds +//! with a simple implementation. + +/// Enable interruptions. After calling this function, hardware should call +/// [crate::event::dispatch_event] whenever it receives an interruption. +pub unsafe fn enable_interrupts() { +} + +/// Disable interruptions, returning true if they were previously enabled, or +/// false if they were already disabled. After calling this function, no hardware +/// should call [crate::event::dispatch_event]. Interruptions should be queued +/// until either [enable_interrupts] is called or a process switch is performed. +pub unsafe fn disable_interrupts() -> bool { + false +} From 37d1cc94e9a73731aab8faeebac4bb027994b236 Mon Sep 17 00:00:00 2001 From: roblabla Date: Wed, 30 Jan 2019 02:19:13 +0000 Subject: [PATCH 05/15] Kernel: arch abstraction API for cmdline and logs Adds two new functions to the arch abstraction API: get_cmdline and get_logger. Those are used by the log implementation to find the serial device to log to, and the log configuration. --- kernel/src/arch/i386/mod.rs | 17 +++++++++++++++++ kernel/src/arch/mod.rs | 4 +++- kernel/src/arch/stub/mod.rs | 28 ++++++++++++++++++++++++++++ kernel/src/log_impl/filter/mod.rs | 10 +++++----- kernel/src/log_impl/mod.rs | 11 +++++------ 5 files changed, 58 insertions(+), 12 deletions(-) diff --git a/kernel/src/arch/i386/mod.rs b/kernel/src/arch/i386/mod.rs index 7ce98a7e1..2bdffbc4f 100644 --- a/kernel/src/arch/i386/mod.rs +++ b/kernel/src/arch/i386/mod.rs @@ -455,3 +455,20 @@ pub unsafe fn disable_interrupts() { instructions::interrupts::cli(); backup } + +/// See [arch::stub::get_cmdline] +pub fn get_cmdline() -> &'static str { + if let Some(cmdlinetag) = multiboot::get_boot_information().command_line_tag() { + cmdlinetag.command_line() + } else { + "debug" + } +} + +/// See [arch::stub::get_logger] +/// +/// On i386, we return the RS232 SerialLogger. +pub fn get_logger() -> impl core::fmt::Write { + use crate::devices::rs232::SerialLogger; + SerialLogger +} diff --git a/kernel/src/arch/mod.rs b/kernel/src/arch/mod.rs index 26e6ec118..dbfb6405e 100644 --- a/kernel/src/arch/mod.rs +++ b/kernel/src/arch/mod.rs @@ -21,4 +21,6 @@ cfg_if! { } // Reexport public API -pub use self::arch::{enable_interrupts, disable_interrupts}; +pub use self::arch::{ + enable_interrupts, disable_interrupts, get_cmdline, get_logger +}; diff --git a/kernel/src/arch/stub/mod.rs b/kernel/src/arch/stub/mod.rs index a7a8ccae4..3ce6a6c82 100644 --- a/kernel/src/arch/stub/mod.rs +++ b/kernel/src/arch/stub/mod.rs @@ -17,3 +17,31 @@ pub unsafe fn enable_interrupts() { pub unsafe fn disable_interrupts() -> bool { false } + +/// Get the kernel arguments. Based on those, various kernel feature may get +/// enabled/disabled. +/// +/// In practice, this cmdline is mainly used to setup the logger implementation. +/// It follows a similar scheme to env_logger, but doesn't implement the regex +/// matching. Look at the [env_logger docs] for more information. +/// +/// [env_logger docs]: https://docs.rs/env_logger/0.6.0/env_logger/ +pub fn get_cmdline() -> &'static str { + "debug" +} + +/// Get the kernel logger sink. Usually, this will be the Serial/UART output. +/// All calls to `log!` and co. will be directed to this logger. Note that this +/// function is called very early in the boot process (it's called in +/// [log_impl::log]). +pub fn get_logger() -> impl core::fmt::Write { + #[doc(hidden)] + #[derive(Debug)] + struct EmptyLogger; + impl core::fmt::Write for EmptyLogger { + fn write_str(&mut self, _s: &str) -> Result<(), core::fmt::Error> { + Ok(()) + } + } + EmptyLogger +} diff --git a/kernel/src/log_impl/filter/mod.rs b/kernel/src/log_impl/filter/mod.rs index 907924c1c..a25815a12 100644 --- a/kernel/src/log_impl/filter/mod.rs +++ b/kernel/src/log_impl/filter/mod.rs @@ -68,7 +68,7 @@ use core::mem; use core::fmt::{self, Write}; use alloc::prelude::*; use log::{Level, LevelFilter, Record, Metadata}; -use crate::devices::rs232::SerialLogger; +use crate::arch::get_logger; use smallvec::SmallVec; #[path = "string.rs"] @@ -301,7 +301,7 @@ fn parse_spec(spec: &str) -> (Vec, Option) { let mods = parts.next(); let filter = parts.next(); if parts.next().is_some() { - writeln!(SerialLogger, "warning: invalid logging spec '{}', \ + writeln!(get_logger(), "warning: invalid logging spec '{}', \ ignoring it (too many '/'s)", spec); return (dirs, None); } @@ -322,14 +322,14 @@ fn parse_spec(spec: &str) -> (Vec, Option) { match part1.parse() { Ok(num) => (num, Some(part0)), _ => { - writeln!(SerialLogger, "warning: invalid logging spec '{}', \ + writeln!(get_logger(), "warning: invalid logging spec '{}', \ ignoring it", part1); continue } } }, _ => { - writeln!(SerialLogger, "warning: invalid logging spec '{}', \ + writeln!(get_logger(), "warning: invalid logging spec '{}', \ ignoring it", s); continue } @@ -344,7 +344,7 @@ fn parse_spec(spec: &str) -> (Vec, Option) { match inner::Filter::new(filter) { Ok(re) => Some(re), Err(e) => { - writeln!(SerialLogger, "warning: invalid regex filter - {}", e); + writeln!(get_logger(), "warning: invalid regex filter - {}", e); None } } diff --git a/kernel/src/log_impl/mod.rs b/kernel/src/log_impl/mod.rs index d1dedad30..66abc7782 100644 --- a/kernel/src/log_impl/mod.rs +++ b/kernel/src/log_impl/mod.rs @@ -3,9 +3,9 @@ mod filter; use log::{self, Log, Metadata, Record, LevelFilter}; -use crate::devices::rs232::SerialLogger; +use crate::arch::get_logger; use core::fmt::Write; -use crate::arch::i386::multiboot::get_boot_information; +use crate::arch::get_cmdline; use crate::sync::{RwLock, Once}; use crate::scheduler; @@ -22,9 +22,9 @@ impl Log for Logger { fn log(&self, record: &Record<'_>) { if self.filter.read().matches(record) { if let Some(thread) = scheduler::try_get_current_thread() { - writeln!(SerialLogger, "[{}] - {} - {} - {}", record.level(), record.target(), thread.process.name, record.args()); + writeln!(get_logger(), "[{}] - {} - {} - {}", record.level(), record.target(), thread.process.name, record.args()); } else { - writeln!(SerialLogger, "[{}] - {} - {}", record.level(), record.target(), record.args()); + writeln!(get_logger(), "[{}] - {} - {}", record.level(), record.target(), record.args()); } } } @@ -48,7 +48,6 @@ pub fn early_init() { /// Reinitializes the logger using the cmdline. This requires the heap. pub fn init() { let logger = LOGGER.r#try().expect("early_init to be called before init"); - let cmdline = get_boot_information().command_line_tag().unwrap().command_line(); - let newfilter = filter::Builder::new().parse(cmdline).build(); + let newfilter = filter::Builder::new().parse(get_cmdline()).build(); *logger.filter.write() = newfilter; } From 9d8c5f98aa3a2b98483ba5e57777760127f4b998 Mon Sep 17 00:00:00 2001 From: roblabla Date: Wed, 30 Jan 2019 18:03:49 +0000 Subject: [PATCH 06/15] Move interrupts to arch::i386 and syscalls to root The syscall module is moved to the root, making documentation better (syscalls is right on kfs_kernel project page). Fixes #54. The rest of the interrupts module is fully arch-specific, so it is moved under arch::i386. --- kernel/src/{ => arch/i386}/interrupts/irq.rs | 0 kernel/src/{ => arch/i386}/interrupts/mod.rs | 22 +++----------------- kernel/src/arch/i386/mod.rs | 1 + kernel/src/main.rs | 5 ++--- kernel/src/{interrupts => }/syscalls.rs | 6 ++++-- kernel/src/utils.rs | 19 +++++++++++++++++ 6 files changed, 29 insertions(+), 24 deletions(-) rename kernel/src/{ => arch/i386}/interrupts/irq.rs (100%) rename kernel/src/{ => arch/i386}/interrupts/mod.rs (96%) rename kernel/src/{interrupts => }/syscalls.rs (99%) diff --git a/kernel/src/interrupts/irq.rs b/kernel/src/arch/i386/interrupts/irq.rs similarity index 100% rename from kernel/src/interrupts/irq.rs rename to kernel/src/arch/i386/interrupts/irq.rs diff --git a/kernel/src/interrupts/mod.rs b/kernel/src/arch/i386/interrupts/mod.rs similarity index 96% rename from kernel/src/interrupts/mod.rs rename to kernel/src/arch/i386/interrupts/mod.rs index a34e75723..9b7610f71 100644 --- a/kernel/src/interrupts/mod.rs +++ b/kernel/src/arch/i386/interrupts/mod.rs @@ -12,32 +12,16 @@ use crate::paging::kernel_memory::get_kernel_memory; use crate::arch::i386::{TssStruct, PrivilegeLevel}; use crate::arch::i386::gdt; use crate::scheduler::get_current_thread; -use crate::process::{ProcessStruct, ThreadState}; -use crate::sync::SpinLockIRQ; -use core::sync::atomic::Ordering; +use crate::process::ProcessStruct; use core::fmt::Arguments; use crate::sync::SpinLock; use crate::devices::pic; use crate::scheduler; +use crate::syscalls; +use crate::utils::check_thread_killed; mod irq; -mod syscalls; - -/// Checks if our thread was killed, in which case unschedule ourselves. -/// -/// # Note -/// -/// As this function will be the last that will be called by a thread before dying, -/// caller must make sure all of its scope variables are ok to be leaked. -pub fn check_thread_killed() { - if scheduler::get_current_thread().state.load(Ordering::SeqCst) == ThreadState::Killed { - let lock = SpinLockIRQ::new(()); - loop { // in case of spurious wakeups - let _ = scheduler::unschedule(&lock, lock.lock()); - } - } -} /// Panics with an informative message. fn panic_on_exception(exception_string: Arguments<'_>, exception_stack_frame: &ExceptionStackFrame) -> ! { diff --git a/kernel/src/arch/i386/mod.rs b/kernel/src/arch/i386/mod.rs index 2bdffbc4f..e52e82046 100644 --- a/kernel/src/arch/i386/mod.rs +++ b/kernel/src/arch/i386/mod.rs @@ -7,6 +7,7 @@ use alloc::boxed::Box; use core::ops::{Deref, DerefMut}; +pub mod interrupts; #[macro_use] pub mod registers; pub mod stack; diff --git a/kernel/src/main.rs b/kernel/src/main.rs index d974a5bbf..76fd48cd8 100644 --- a/kernel/src/main.rs +++ b/kernel/src/main.rs @@ -54,9 +54,8 @@ pub mod paging; pub mod event; pub mod error; pub mod log_impl; -pub mod interrupts; pub mod frame_allocator; - +pub mod syscalls; pub mod heap_allocator; pub mod devices; pub mod sync; @@ -222,7 +221,7 @@ pub extern "C" fn common_start(multiboot_info_addr: usize) -> ! { info!("Initialized PIT"); info!("Enabling interrupts"); - unsafe { interrupts::init(); } + unsafe { arch::i386::interrupts::init(); } //info!("Disable timer interrupt"); //devices::pic::get().mask(0); diff --git a/kernel/src/interrupts/syscalls.rs b/kernel/src/syscalls.rs similarity index 99% rename from kernel/src/interrupts/syscalls.rs rename to kernel/src/syscalls.rs index 9fdd5da20..e595487c3 100644 --- a/kernel/src/interrupts/syscalls.rs +++ b/kernel/src/syscalls.rs @@ -1,6 +1,5 @@ //! Syscall implementations -use crate::arch::i386; use crate::mem::{VirtualAddress, PhysicalAddress}; use crate::mem::{UserSpacePtr, UserSpacePtrMut}; use crate::paging::{MappingAccessRights, mapping::MappingType}; @@ -14,7 +13,7 @@ use alloc::string::String; use alloc::sync::Arc; use alloc::vec::Vec; use crate::ipc; -use super::check_thread_killed; +use crate::utils::check_thread_killed; use crate::error::UserspaceError; use kfs_libkern::{nr, SYSCALL_NAMES, MemoryInfo, MemoryAttributes, MemoryPermissions}; use bit_field::BitArray; @@ -41,7 +40,9 @@ fn set_heap_size(new_size: usize) -> Result { } /// Maps the vga frame buffer mmio in userspace memory +#[cfg(target_arch = "x86")] // Temporary. fn map_framebuffer() -> Result<(usize, usize, usize, usize), UserspaceError> { + use crate::arch::i386; let tag = i386::multiboot::get_boot_information().framebuffer_info_tag() .expect("Framebuffer to be provided"); let framebuffer_size = tag.framebuffer_bpp() as usize @@ -613,6 +614,7 @@ pub extern fn syscall_handler_inner(registers: &mut Registers) { (true, nr::ConnectToPort) => registers.apply1(connect_to_port(x0 as _)), // KFS extensions + #[cfg(target_arch = "i386")] (true, nr::MapFramebuffer) => registers.apply4(map_framebuffer()), // Unknown/unauthorized syscall. diff --git a/kernel/src/utils.rs b/kernel/src/utils.rs index f12425225..22c83f63f 100644 --- a/kernel/src/utils.rs +++ b/kernel/src/utils.rs @@ -4,6 +4,10 @@ use kfs_libutils; pub use self::kfs_libutils::*; pub use crate::checks::*; use crate::error::KernelError; +use crate::scheduler; +use crate::sync::SpinLockIRQ; +use crate::process::ThreadState; +use core::sync::atomic::Ordering; /// A trait for things that can be splitted in two parts pub trait Splittable where Self: Sized { @@ -35,3 +39,18 @@ pub trait Splittable where Self: Sized { } } } + +/// Checks if our thread was killed, in which case unschedule ourselves. +/// +/// # Note +/// +/// As this function will be the last that will be called by a thread before dying, +/// caller must make sure all of its scope variables are ok to be leaked. +pub fn check_thread_killed() { + if scheduler::get_current_thread().state.load(Ordering::SeqCst) == ThreadState::Killed { + let lock = SpinLockIRQ::new(()); + loop { // in case of spurious wakeups + let _ = scheduler::unschedule(&lock, lock.lock()); + } + } +} From c6ad3a4b2a4f2990fcd463325a82f28e0305b41f Mon Sep 17 00:00:00 2001 From: roblabla Date: Wed, 30 Jan 2019 20:10:24 +0000 Subject: [PATCH 07/15] Kernel: Move startup functions to arch::i386 start and common_start are both architecture-specific, so move them under arch::i386. Furthermore, init the multiboot2 global as early as possible, and make frame_allocator take the multiboot2 information with `get_boot_information` function. --- kernel/src/arch/i386/mod.rs | 97 ++++++++++++++++++++++++++++-- kernel/src/frame_allocator/i386.rs | 4 +- kernel/src/main.rs | 85 -------------------------- 3 files changed, 93 insertions(+), 93 deletions(-) diff --git a/kernel/src/arch/i386/mod.rs b/kernel/src/arch/i386/mod.rs index e52e82046..58730179e 100644 --- a/kernel/src/arch/i386/mod.rs +++ b/kernel/src/arch/i386/mod.rs @@ -444,6 +444,87 @@ impl DerefMut for AlignedTssStruct { } } +// CRT0 here + +/// The entry point of our kernel. +/// +/// This function is jump'd into from the bootstrap code, which: +/// +/// * enabled paging, +/// * gave us a valid KernelStack, +/// * mapped grub's multiboot information structure in KernelLand (its address in $ebx), +/// +/// What we do is just bzero the .bss, and call a rust function, passing it the content of $ebx. +#[cfg(target_os = "none")] +#[no_mangle] +pub unsafe extern fn start() -> ! { + asm!(" + // Memset the bss. Hopefully memset doesn't actually use the bss... + mov eax, BSS_END + sub eax, BSS_START + push eax + push 0 + push BSS_START + call memset + add esp, 12 + + // Save multiboot infos addr present in ebx + push ebx + call $0" : : "i"(common_start as *const u8) : : "intel", "volatile"); + core::intrinsics::unreachable() +} + +/// CRT0 starts here. +/// +/// This function takes care of initializing the kernel, before calling the main function. +#[cfg(target_os = "none")] +extern "C" fn common_start(multiboot_info_addr: usize) -> ! { + use crate::devices::rs232::{SerialAttributes, SerialColor}; + use crate::arch::get_logger; + use core::fmt::Write; + + crate::log_impl::early_init(); + + // Say hello to the world + let _ = writeln!(get_logger(), "\n# Welcome to {}KFS{}!\n", + SerialAttributes::fg(SerialColor::LightCyan), + SerialAttributes::default()); + + // Parse the multiboot infos + info!("Parsing multiboot informations"); + let boot_info = unsafe { multiboot2::load(multiboot_info_addr) }; + crate::arch::i386::multiboot::init(boot_info); + + // Setup frame allocator + info!("Initializing frame allocator"); + crate::frame_allocator::init(); + + // Set up (read: inhibit) the GDT. + info!("Initializing gdt..."); + crate::arch::i386::gdt::init_gdt(); + + crate::log_impl::init(); + + info!("Initializing PIT"); + unsafe { crate::devices::pit::init_channel_0() }; + + info!("Enabling interrupts"); + unsafe { interrupts::init(); } + + info!("Becoming the first process"); + unsafe { crate::scheduler::create_first_process() }; + + info!("Calling main()"); + + crate::main(); + // Die ! + // We shouldn't reach this... + loop { + #[cfg(target_os = "none")] + unsafe { asm!("HLT"); } + } +} + // START ARCH API HERE /// See [arch::stub::enable_interrupts] pub unsafe fn enable_interrupts() { @@ -451,19 +532,23 @@ pub unsafe fn enable_interrupts() { } /// See [arch::stub::disable_interrupts] -pub unsafe fn disable_interrupts() { +pub unsafe fn disable_interrupts() -> bool { let backup = instructions::interrupts::are_enabled(); instructions::interrupts::cli(); backup } /// See [arch::stub::get_cmdline] +/// +/// On i386, it will return the cmdline from the multiboot information. Before +/// multiboot is properly initialized, or if the commandline tag is missing from +/// the multiboot information structure, it will return the default value +/// "debug". pub fn get_cmdline() -> &'static str { - if let Some(cmdlinetag) = multiboot::get_boot_information().command_line_tag() { - cmdlinetag.command_line() - } else { - "debug" - } + multiboot::try_get_boot_information() + .and_then(|v| v.command_line_tag()) + .map(|v| v.command_line()) + .unwrap_or("debug") } /// See [arch::stub::get_logger] diff --git a/kernel/src/frame_allocator/i386.rs b/kernel/src/frame_allocator/i386.rs index c0004c100..a630bcf3c 100644 --- a/kernel/src/frame_allocator/i386.rs +++ b/kernel/src/frame_allocator/i386.rs @@ -15,7 +15,6 @@ use super::{PhysicalMemRegion, FrameAllocatorTrait, FrameAllocatorTraitPrivate}; use crate::paging::PAGE_SIZE; -use multiboot2::BootInformation; use crate::sync::SpinLock; use alloc::vec::Vec; use crate::utils::{check_aligned, check_nonzero_length}; @@ -302,7 +301,8 @@ impl FrameAllocatorTrait for FrameAllocator { /// Initialize the [FrameAllocator] by parsing the multiboot information /// and marking some memory areas as unusable #[cfg(not(test))] -pub fn init(boot_info: &BootInformation) { +pub fn init() { + let boot_info = crate::arch::i386::multiboot::get_boot_information(); let mut allocator = FRAME_ALLOCATOR.lock(); let memory_map_tag = boot_info.memory_map_tag() diff --git a/kernel/src/main.rs b/kernel/src/main.rs index 76fd48cd8..59c325bc6 100644 --- a/kernel/src/main.rs +++ b/kernel/src/main.rs @@ -155,91 +155,6 @@ fn main() { } } -/// The entry point of our kernel. -/// -/// This function is jump'd into from the bootstrap code, which: -/// -/// * enabled paging, -/// * gave us a valid KernelStack, -/// * mapped grub's multiboot information structure in KernelLand (its address in $ebx), -/// -/// What we do is just bzero the .bss, and call a rust function, passing it the content of $ebx. -#[cfg(target_os = "none")] -#[no_mangle] -pub unsafe extern fn start() -> ! { - asm!(" - // Memset the bss. Hopefully memset doesn't actually use the bss... - mov eax, BSS_END - sub eax, BSS_START - push eax - push 0 - push BSS_START - call memset - add esp, 12 - - // Save multiboot infos addr present in ebx - push ebx - call common_start" : : : : "intel", "volatile"); - core::intrinsics::unreachable() -} - -/// CRT0 starts here. -/// -/// This function takes care of initializing the kernel, before calling the main function. -#[cfg(target_os = "none")] -#[no_mangle] -pub extern "C" fn common_start(multiboot_info_addr: usize) -> ! { - use crate::devices::rs232::{SerialAttributes, SerialColor}; - - log_impl::early_init(); - - - let log = &mut devices::rs232::SerialLogger; - // Say hello to the world - let _ = writeln!(log, "\n# Welcome to {}KFS{}!\n", - SerialAttributes::fg(SerialColor::LightCyan), - SerialAttributes::default()); - - // Parse the multiboot infos - let boot_info = unsafe { multiboot2::load(multiboot_info_addr) }; - info!("Parsed multiboot informations"); - - // Setup frame allocator - frame_allocator::init(&boot_info); - info!("Initialized frame allocator"); - - // Set up (read: inhibit) the GDT. - info!("Initializing gdt..."); - crate::arch::i386::gdt::init_gdt(); - info!("Gdt initialized"); - - crate::arch::i386::multiboot::init(boot_info); - - log_impl::init(); - - unsafe { devices::pit::init_channel_0() }; - info!("Initialized PIT"); - - info!("Enabling interrupts"); - unsafe { arch::i386::interrupts::init(); } - - //info!("Disable timer interrupt"); - //devices::pic::get().mask(0); - - info!("Becoming the first process"); - unsafe { scheduler::create_first_process() }; - - info!("Calling main()"); - - main(); - // Die ! - // We shouldn't reach this... - loop { - #[cfg(target_os = "none")] - unsafe { asm!("HLT"); } - } -} - /// The exception handling personality function for use in the bootstrap. /// /// We have no exception handling in the kernel, so make it do nothing. From 515bff1b3635c80381a443fb0deaaa5c1e413c2f Mon Sep 17 00:00:00 2001 From: roblabla Date: Thu, 31 Jan 2019 15:52:02 +0000 Subject: [PATCH 08/15] Kernel: Use arch abstraction API in panic handling. Adds a new function: force_logger_unlock, which force-unlocks any Mutex that might be protecting the logger. This is only used in the panic implementation, in order to avoid deadlocking. Panic now uses get_logger in order to stay arch-generic. --- kernel/src/arch/i386/mod.rs | 6 ++++++ kernel/src/arch/mod.rs | 3 ++- kernel/src/arch/stub/mod.rs | 6 ++++++ kernel/src/main.rs | 13 ++++++------- 4 files changed, 20 insertions(+), 8 deletions(-) diff --git a/kernel/src/arch/i386/mod.rs b/kernel/src/arch/i386/mod.rs index 58730179e..adbea4bc2 100644 --- a/kernel/src/arch/i386/mod.rs +++ b/kernel/src/arch/i386/mod.rs @@ -558,3 +558,9 @@ pub fn get_logger() -> impl core::fmt::Write { use crate::devices::rs232::SerialLogger; SerialLogger } + +/// See [arch::stub::force_logger_unlock] +pub unsafe fn force_logger_unlock() { + use crate::devices::rs232::SerialLogger; + SerialLogger.force_unlock(); +} diff --git a/kernel/src/arch/mod.rs b/kernel/src/arch/mod.rs index dbfb6405e..0fa4bd4d1 100644 --- a/kernel/src/arch/mod.rs +++ b/kernel/src/arch/mod.rs @@ -22,5 +22,6 @@ cfg_if! { // Reexport public API pub use self::arch::{ - enable_interrupts, disable_interrupts, get_cmdline, get_logger + enable_interrupts, disable_interrupts, get_cmdline, get_logger, + force_logger_unlock }; diff --git a/kernel/src/arch/stub/mod.rs b/kernel/src/arch/stub/mod.rs index 3ce6a6c82..6be289acd 100644 --- a/kernel/src/arch/stub/mod.rs +++ b/kernel/src/arch/stub/mod.rs @@ -45,3 +45,9 @@ pub fn get_logger() -> impl core::fmt::Write { } EmptyLogger } + +/// Force unlocks any mutex that might be locking the Write implementation +/// returned by [get_logger]. This is only used by the panic handling, to ensure +/// we don't deadlock if we panic'd in the logging implementation. +pub unsafe fn force_logger_unlock() { +} diff --git a/kernel/src/main.rs b/kernel/src/main.rs index 59c325bc6..422613349 100644 --- a/kernel/src/main.rs +++ b/kernel/src/main.rs @@ -179,18 +179,17 @@ fn main() { /// /// [dump_stack]: crate::stack::dump_stack unsafe fn do_panic(msg: core::fmt::Arguments<'_>, stackdump_source: Option) -> ! { + use crate::arch::{get_logger, force_logger_unlock}; // Disable interrupts forever! unsafe { sync::permanently_disable_interrupts(); } // Don't deadlock in the logger - unsafe { SerialLogger.force_unlock(); } + unsafe { force_logger_unlock(); } //todo: force unlock the KernelMemory lock // and also the process memory lock for userspace stack dumping (only if panic-on-excetpion ?). - use crate::devices::rs232::SerialLogger; - - let _ = writeln!(SerialLogger, "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n\ + let _ = writeln!(get_logger(), "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n\ ! Panic! at the disco\n\ ! {}\n\ !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!", @@ -223,7 +222,7 @@ unsafe fn do_panic(msg: core::fmt::Arguments<'_>, stackdump_source: Option, stackdump_source: Option Date: Thu, 31 Jan 2019 17:31:17 +0000 Subject: [PATCH 09/15] Kernel: Use arch abstraction API for process switching Adds two new function and one structure to the arch abstraction API: - ThreadHardwareContext, contains some support fields for the process switching code. - process_switch: Executes another process. - prepare_for_first_schedule: Prepare a new thread to get scheduled/switched to. --- kernel/src/arch/i386/mod.rs | 2 ++ kernel/src/arch/i386/process_switch.rs | 2 +- kernel/src/arch/mod.rs | 3 ++- kernel/src/arch/stub/mod.rs | 28 ++++++++++++++++++++++++++ kernel/src/process.rs | 2 +- kernel/src/scheduler.rs | 4 ++-- 6 files changed, 36 insertions(+), 5 deletions(-) diff --git a/kernel/src/arch/i386/mod.rs b/kernel/src/arch/i386/mod.rs index adbea4bc2..9f67e972c 100644 --- a/kernel/src/arch/i386/mod.rs +++ b/kernel/src/arch/i386/mod.rs @@ -564,3 +564,5 @@ pub unsafe fn force_logger_unlock() { use crate::devices::rs232::SerialLogger; SerialLogger.force_unlock(); } + +pub use self::process_switch::{ThreadHardwareContext, process_switch, prepare_for_first_schedule}; diff --git a/kernel/src/arch/i386/process_switch.rs b/kernel/src/arch/i386/process_switch.rs index f8ef21cd1..a8bda0783 100644 --- a/kernel/src/arch/i386/process_switch.rs +++ b/kernel/src/arch/i386/process_switch.rs @@ -191,7 +191,7 @@ pub unsafe extern "C" fn process_switch(thread_b: Arc, thread_curr /// Prepares the thread for its first schedule by writing default values at the start of the /// stack that will be loaded in the registers in schedule-in. -/// See process_switch() documentation for more details. +/// See [arch::stub::prepare_for_first_schedule] and [process_switch] documentation for more details. /// /// # Safety /// diff --git a/kernel/src/arch/mod.rs b/kernel/src/arch/mod.rs index 0fa4bd4d1..71271238b 100644 --- a/kernel/src/arch/mod.rs +++ b/kernel/src/arch/mod.rs @@ -23,5 +23,6 @@ cfg_if! { // Reexport public API pub use self::arch::{ enable_interrupts, disable_interrupts, get_cmdline, get_logger, - force_logger_unlock + force_logger_unlock, ThreadHardwareContext, process_switch, + prepare_for_first_schedule }; diff --git a/kernel/src/arch/stub/mod.rs b/kernel/src/arch/stub/mod.rs index 6be289acd..4720cad93 100644 --- a/kernel/src/arch/stub/mod.rs +++ b/kernel/src/arch/stub/mod.rs @@ -5,6 +5,9 @@ //! implementation of the arch-specific component, and to provide the test builds //! with a simple implementation. +use alloc::sync::Arc; +use crate::process::ThreadStruct; + /// Enable interruptions. After calling this function, hardware should call /// [crate::event::dispatch_event] whenever it receives an interruption. pub unsafe fn enable_interrupts() { @@ -51,3 +54,28 @@ pub fn get_logger() -> impl core::fmt::Write { /// we don't deadlock if we panic'd in the logging implementation. pub unsafe fn force_logger_unlock() { } + +/// The hardware context of a paused thread. It contains just enough registers to get the thread +/// running again. +/// +/// All other registers are to be saved on the thread's kernel stack before scheduling, +/// and restored right after re-schedule. +/// +/// Stored in the ThreadStruct of every thread. +#[derive(Debug, Default)] +pub struct ThreadHardwareContext; + +pub unsafe extern "C" fn process_switch(_thread_b: Arc, _thread_current: Arc) -> Arc { + unimplemented!("Can't process switch on stub architecture") +} + +/// Prepares the thread for its first schedule, prepopulating the hwcontext and +/// setting up the necessary environment for [process_switch] to work correctly. +/// This can involve pushing values on the stack, setting specific registers, +/// etc... See [process_switch] documentation for more details. +/// +/// # Safety +/// +/// UB if called on a thread after it was scheduled for the first time. +pub unsafe fn prepare_for_first_schedule(_t: &ThreadStruct, _entrypoint: usize, _userspace_stack: usize) { +} diff --git a/kernel/src/process.rs b/kernel/src/process.rs index 254b2dcdc..2420651f0 100644 --- a/kernel/src/process.rs +++ b/kernel/src/process.rs @@ -1,7 +1,7 @@ //! Process use crate::stack::KernelStack; -use crate::arch::i386::process_switch::*; +use crate::arch::{ThreadHardwareContext, prepare_for_first_schedule}; use crate::paging::process_memory::ProcessMemory; use alloc::boxed::Box; use alloc::sync::{Arc, Weak}; diff --git a/kernel/src/scheduler.rs b/kernel/src/scheduler.rs index 8ae26437a..a57c245b7 100644 --- a/kernel/src/scheduler.rs +++ b/kernel/src/scheduler.rs @@ -5,7 +5,7 @@ use alloc::vec::Vec; use core::mem; use crate::process::{ProcessStruct, ThreadStruct, ThreadState}; -use crate::arch::i386::process_switch::process_switch; +use crate::arch::process_switch; use crate::sync::{Lock, SpinLockIRQ, SpinLockIRQGuard}; use core::sync::atomic::Ordering; use crate::error::{UserspaceError}; @@ -311,7 +311,7 @@ pub fn scheduler_first_schedule(current_thread: Arc, unsafe { // this is a new process, no SpinLockIRQ is held - crate::arch::i386::instructions::interrupts::sti(); + crate::arch::enable_interrupts(); } jump_to_entrypoint() From 4a07c7fb00509a32e3b0408ed293cb500f32436a Mon Sep 17 00:00:00 2001 From: roblabla Date: Thu, 21 Feb 2019 19:06:44 +0000 Subject: [PATCH 10/15] Kernel: Get kernel builtins through arch abstraction API Adds a new trait: `elf_loader::Module`, representing a kernel builtin that will be loaded when booting the kernel. Adds a new function to arch abstraction API: get_modules(). This function returns an iterator of `Module`s. All of those modules are to be started on kernel boot. --- kernel/src/arch/i386/mod.rs | 21 +++++++++++++++++++ kernel/src/arch/mod.rs | 4 +++- kernel/src/arch/stub/mod.rs | 24 ++++++++++++++++++++++ kernel/src/elf_loader.rs | 40 ++++++++++++++++++++++--------------- kernel/src/main.rs | 18 +++++++++-------- 5 files changed, 82 insertions(+), 25 deletions(-) diff --git a/kernel/src/arch/i386/mod.rs b/kernel/src/arch/i386/mod.rs index 9f67e972c..1b526e199 100644 --- a/kernel/src/arch/i386/mod.rs +++ b/kernel/src/arch/i386/mod.rs @@ -6,6 +6,7 @@ use alloc::boxed::Box; use core::ops::{Deref, DerefMut}; +use crate::mem::PhysicalAddress; pub mod interrupts; #[macro_use] @@ -565,4 +566,24 @@ pub unsafe fn force_logger_unlock() { SerialLogger.force_unlock(); } +/// See [arch::stub::get_modules] +pub fn get_modules() -> impl Iterator { + impl crate::elf_loader::Module for &multiboot2::ModuleTag { + fn start_address(&self) -> PhysicalAddress { + PhysicalAddress(multiboot2::ModuleTag::start_address(self) as usize) + } + fn end_address(&self) -> PhysicalAddress { + PhysicalAddress(multiboot2::ModuleTag::end_address(self) as usize) + } + fn name(&self) -> &str { + multiboot2::ModuleTag::name(self) + } + } + + multiboot::try_get_boot_information() + .into_iter() + .map(|v| v.module_tags().skip(1)) + .flatten() +} + pub use self::process_switch::{ThreadHardwareContext, process_switch, prepare_for_first_schedule}; diff --git a/kernel/src/arch/mod.rs b/kernel/src/arch/mod.rs index 71271238b..a2eba90bc 100644 --- a/kernel/src/arch/mod.rs +++ b/kernel/src/arch/mod.rs @@ -24,5 +24,7 @@ cfg_if! { pub use self::arch::{ enable_interrupts, disable_interrupts, get_cmdline, get_logger, force_logger_unlock, ThreadHardwareContext, process_switch, - prepare_for_first_schedule + prepare_for_first_schedule, + + get_modules }; diff --git a/kernel/src/arch/stub/mod.rs b/kernel/src/arch/stub/mod.rs index 4720cad93..02506a775 100644 --- a/kernel/src/arch/stub/mod.rs +++ b/kernel/src/arch/stub/mod.rs @@ -6,6 +6,8 @@ //! with a simple implementation. use alloc::sync::Arc; + +use crate::mem::PhysicalAddress; use crate::process::ThreadStruct; /// Enable interruptions. After calling this function, hardware should call @@ -79,3 +81,25 @@ pub unsafe extern "C" fn process_switch(_thread_b: Arc, _thread_cu /// UB if called on a thread after it was scheduled for the first time. pub unsafe fn prepare_for_first_schedule(_t: &ThreadStruct, _entrypoint: usize, _userspace_stack: usize) { } + +/// Get a list of Kernel Internal Processes to load. These are processes +/// typically bundled with the kernel that are the basic necessary processes to +/// load other processes from the filesystem. These are typically FS, Loader and +/// Boot. +pub fn get_modules() -> impl Iterator { + #[doc(hidden)] + #[derive(Debug)] + struct EmptyModule; + impl crate::elf_loader::Module for EmptyModule { + fn start_address(&self) -> PhysicalAddress { + unreachable!() + } + fn end_address(&self) -> PhysicalAddress { + unreachable!() + } + fn name(&self) -> &str { + "Empty Module" + } + } + core::iter::empty::() +} diff --git a/kernel/src/elf_loader.rs b/kernel/src/elf_loader.rs index 51e7722a2..843588e85 100644 --- a/kernel/src/elf_loader.rs +++ b/kernel/src/elf_loader.rs @@ -13,7 +13,6 @@ //! the built-ins to the kernel, and load them with a primitive ELF loader. This loader //! does not do any dynamic loading or provide ASLR (though that is up for change) -use multiboot2::ModuleTag; use core::slice; use xmas_elf::ElfFile; use xmas_elf::program::{ProgramHeader, Type::Load, SegmentData}; @@ -23,9 +22,22 @@ use crate::frame_allocator::PhysicalMemRegion; use crate::utils::{self, align_up}; use crate::error::KernelError; -/// Represents a grub module once mapped in kernel memory +/// Abstract representation of a Kernel Internal Process (KIP). Depending on the +/// platform, KIPs may be passed through different mechanism. For instance, on +/// IBM/PCs, they might be passed as GRUB modules, whereas on the Jetson TX1, it +/// might be passed as an INI1. This trait abstracts over those differences. +pub trait Module { + /// Physical address defining the start of the KIP. + fn start_address(&self) -> PhysicalAddress; + /// Physical address defining the end of the KIP. + fn end_address(&self) -> PhysicalAddress; + /// Name of the KIP. Used as a process name. + fn name(&self) -> &str; +} + +/// Represents a [Module] once mapped in kernel memory #[derive(Debug)] -pub struct MappedGrubModule<'a> { +pub struct MappedModule<'a> { /// The address of the mapping, in KernelLand. pub mapping_addr: VirtualAddress, /// The start of the module in the mapping, if it was not page aligned. @@ -37,14 +49,10 @@ pub struct MappedGrubModule<'a> { } /// Maps a grub module, which already lives in reserved physical memory, into the KernelLand. -/// -/// # Error: -/// -/// * VirtualMemoryExhaustion: cannot find virtual memory where to map it. -pub fn map_grub_module(module: &ModuleTag) -> Result, KernelError> { - let start_address_aligned = PhysicalAddress(utils::align_down(module.start_address() as usize, PAGE_SIZE)); +pub fn map_module(module: &impl Module) -> MappedModule<'_> { + let start_address_aligned = module.start_address().floor(); // Use start_address_aligned to calculate the number of pages, to avoid an off-by-one. - let module_len_aligned = utils::align_up(module.end_address() as usize - start_address_aligned.addr(), PAGE_SIZE); + let module_len_aligned = utils::align_up(module.end_address().addr() - start_address_aligned.addr(), PAGE_SIZE); let mapping_addr = { let mut page_table = get_kernel_memory(); @@ -60,15 +68,15 @@ pub fn map_grub_module(module: &ModuleTag) -> Result, Kerne }; // the module offset in the mapping - let start = mapping_addr + (module.start_address() as usize % PAGE_SIZE); - let len = module.end_address() as usize - module.start_address() as usize; + let start = mapping_addr + (start_address_aligned - module.start_address()); + let len = module.end_address() - module.start_address(); // try parsing it as an elf let elf = ElfFile::new(unsafe { slice::from_raw_parts(start.addr() as *const u8, len) }); - Ok(MappedGrubModule { + MappedModule { mapping_addr, start, len, @@ -76,7 +84,7 @@ pub fn map_grub_module(module: &ModuleTag) -> Result, Kerne }) } -impl<'a> Drop for MappedGrubModule<'a> { +impl<'a> Drop for MappedModule<'a> { /// Unmap the module, but do not deallocate physical memory fn drop(&mut self) { get_kernel_memory().unmap_no_dealloc( self.mapping_addr, @@ -87,7 +95,7 @@ impl<'a> Drop for MappedGrubModule<'a> { /// Gets the desired kernel access controls for a process based on the /// .kernel_caps section in its elf -pub fn get_kacs<'a>(module: &'a MappedGrubModule<'_>) -> Option<&'a [u8]> { +pub fn get_kacs<'a>(module: &'a MappedModule<'_>) -> Option<&'a [u8]> { let elf = module.elf.as_ref().expect("Failed parsing multiboot module as elf"); elf.find_section_by_name(".kernel_caps") @@ -96,7 +104,7 @@ pub fn get_kacs<'a>(module: &'a MappedGrubModule<'_>) -> Option<&'a [u8]> { /// Loads the given kernel built-in into the given page table. /// Returns address of entry point -pub fn load_builtin(process_memory: &mut ProcessMemory, module: &MappedGrubModule<'_>) -> usize { +pub fn load_builtin(process_memory: &mut ProcessMemory, module: &MappedModule<'_>) -> usize { let elf = module.elf.as_ref().expect("Failed parsing multiboot module as elf"); // load all segments into the page_table we had above diff --git a/kernel/src/main.rs b/kernel/src/main.rs index 422613349..8fb2f5678 100644 --- a/kernel/src/main.rs +++ b/kernel/src/main.rs @@ -83,6 +83,7 @@ use crate::arch::i386::stack; use crate::paging::{PAGE_SIZE, MappingAccessRights}; use crate::mem::VirtualAddress; use crate::process::{ProcessStruct, ThreadStruct}; +use crate::elf_loader::Module; /// Forces a double fault by stack overflowing. /// @@ -125,10 +126,9 @@ unsafe fn force_double_fault() { /// From now on, the kernel's only job will be to respond to IRQs and serve syscalls. fn main() { info!("Loading all the init processes"); - for module in crate::arch::i386::multiboot::get_boot_information().module_tags().skip(1) { + for module in crate::arch::get_modules() { info!("Loading {}", module.name()); - let mapped_module = elf_loader::map_grub_module(module) - .unwrap_or_else(|_| panic!("Unable to find available memory for module {}", module.name())); + let mapped_module = elf_loader::map_module(&module); let proc = ProcessStruct::new(String::from(module.name()), elf_loader::get_kacs(&mapped_module)).unwrap(); let (ep, sp) = { let mut pmemlock = proc.pmemory.lock(); @@ -200,14 +200,16 @@ unsafe fn do_panic(msg: core::fmt::Arguments<'_>, stackdump_source: Option(mapped_kernel_elf: &'a Option>) -> Option<(&'a ElfFile<'a>, &'a[Entry32])> { + fn get_symbols<'a>(mapped_kernel_elf: &'a Option>) -> Option<(&'a ElfFile<'a>, &'a[Entry32])> { let module = mapped_kernel_elf.as_ref()?; let elf = module.elf.as_ref().ok()?; let data = elf.find_section_by_name(".symtab")? From 2fe3fdd8208245b96fc6648bfa74265e34090c2e Mon Sep 17 00:00:00 2001 From: roblabla Date: Fri, 1 Feb 2019 17:50:02 +0000 Subject: [PATCH 11/15] Kernel: Move stack allocation/dumping in arch-generic API Adds a new type in arch-generic API, KernelStack, and a new function, dump_stack. KernelStack represents the Kernel Stack used to handle interruptions associated with a ThreadStruct. Having KernelStack in the arch-generic API is a bit weird. The concept is inherently arch-dependent... --- kernel/src/arch/i386/interrupts/mod.rs | 2 +- kernel/src/arch/i386/mod.rs | 1 + kernel/src/arch/mod.rs | 2 + kernel/src/arch/stub/mod.rs | 63 ++++++++++++++++++++++++++ kernel/src/main.rs | 10 ++-- kernel/src/process.rs | 2 +- 6 files changed, 73 insertions(+), 7 deletions(-) diff --git a/kernel/src/arch/i386/interrupts/mod.rs b/kernel/src/arch/i386/interrupts/mod.rs index 9b7610f71..0fbb06fae 100644 --- a/kernel/src/arch/i386/interrupts/mod.rs +++ b/kernel/src/arch/i386/interrupts/mod.rs @@ -166,7 +166,7 @@ fn double_fault_handler() { tss_main.eip, tss_main.cr3, tss_main.eax, tss_main.ebx, tss_main.ecx, tss_main.edx, tss_main.esi, tss_main.edi, tss_main.esp, tss_main.ebp), - Some(crate::stack::StackDumpSource::new( + Some(super::stack::StackDumpSource::new( tss_main.esp as usize, tss_main.ebp as usize, tss_main.eip as usize ))); } else { diff --git a/kernel/src/arch/i386/mod.rs b/kernel/src/arch/i386/mod.rs index 1b526e199..584394453 100644 --- a/kernel/src/arch/i386/mod.rs +++ b/kernel/src/arch/i386/mod.rs @@ -587,3 +587,4 @@ pub fn get_modules() -> impl Iterator { } pub use self::process_switch::{ThreadHardwareContext, process_switch, prepare_for_first_schedule}; +pub use self::stack::{KernelStack, StackDumpSource, dump_stack}; diff --git a/kernel/src/arch/mod.rs b/kernel/src/arch/mod.rs index a2eba90bc..934654e1a 100644 --- a/kernel/src/arch/mod.rs +++ b/kernel/src/arch/mod.rs @@ -26,5 +26,7 @@ pub use self::arch::{ force_logger_unlock, ThreadHardwareContext, process_switch, prepare_for_first_schedule, + KernelStack, StackDumpSource, dump_stack, + get_modules }; diff --git a/kernel/src/arch/stub/mod.rs b/kernel/src/arch/stub/mod.rs index 02506a775..9b9a8d27a 100644 --- a/kernel/src/arch/stub/mod.rs +++ b/kernel/src/arch/stub/mod.rs @@ -6,9 +6,12 @@ //! with a simple implementation. use alloc::sync::Arc; +use xmas_elf::ElfFile; +use xmas_elf::symbol_table::Entry32; use crate::mem::PhysicalAddress; use crate::process::ThreadStruct; +use crate::error::KernelError; /// Enable interruptions. After calling this function, hardware should call /// [crate::event::dispatch_event] whenever it receives an interruption. @@ -103,3 +106,63 @@ pub fn get_modules() -> impl Iterator { } core::iter::empty::() } + +/// A structure representing a kernel stack. Allows abstracting away allocation +/// and dumping of the Kernel Stack. +/// +/// A KernelStack is switched to when the kernel needs to handle an interrupt or +/// exception while userspace is executing. To avoid leaking kernel memory to +/// userspace, the stack is switched to the KernelStack. +#[derive(Debug)] +pub struct KernelStack; + +impl KernelStack { + /// Allocate a new KernelStack for a new [ThreadStruct]. This is used by + /// [ThreadStruct::new] to create the new KernelStack associated with this + /// thread. + pub fn allocate_stack() -> Result { + unimplemented!() + } + + /// Get the current kernel stack. Used by [ThreadStruct::create_first_thread] + /// to create the first thread's KernelStack. + /// + /// # Safety + /// + /// Unsafe because it creates duplicates of the stack structure, + /// whose only owner should be the ProcessStruct it belongs to. + /// This enables having several mut references pointing to the same underlying memory. + /// Caller has to make sure no references to the stack exists when calling this function. + /// + /// The safe method of getting the stack is by getting current [`ProcessStruct`], *lock it*, + /// and use its `pstack`. + /// + /// [ThreadStruct::create_first_thread]: crate::process::ThreadStruct::create_first_thread + pub unsafe fn get_current_stack() -> KernelStack { + unimplemented!() + } + + /// Dumps the stack, displaying it in a frame-by-frame format. + /// + /// It can accepts an elf symbols which will be used to enhance the stack dump. + pub fn dump_current_stack<'a>(_elf_symbols: Option<(&ElfFile<'a>, &'a [Entry32])>) { + } +} + +/// A structure representing the CPU stack state at a given execution point. From +/// this state, we can generate a meaningful stack trace. +pub struct StackDumpSource; + +/// Dumps the stack from the given information, displaying it in a frame-by-frame +/// format. +/// +/// # Safety +/// +/// This function checks whether the stack is properly mapped before attempting to access it. +/// It then creates a &[u8] from what could be a shared resource. +/// +/// The caller must make sure the mapping pointed to by `source` cannot be modified while this +/// function is at work. This will often mean checking that the thread whose stack we're dumping +/// is stopped and will remain unscheduled at least until this function returns. +pub unsafe fn dump_stack<'a>(_source: &StackDumpSource, _elf_symbols: Option<(&ElfFile<'a>, &'a [Entry32])>) { +} diff --git a/kernel/src/main.rs b/kernel/src/main.rs index 8fb2f5678..90db50c41 100644 --- a/kernel/src/main.rs +++ b/kernel/src/main.rs @@ -79,7 +79,7 @@ pub use crate::heap_allocator::rust_oom; #[global_allocator] static ALLOCATOR: heap_allocator::Allocator = heap_allocator::Allocator::new(); -use crate::arch::i386::stack; +use crate::arch::{StackDumpSource, KernelStack, dump_stack}; use crate::paging::{PAGE_SIZE, MappingAccessRights}; use crate::mem::VirtualAddress; use crate::process::{ProcessStruct, ThreadStruct}; @@ -177,8 +177,8 @@ fn main() { /// /// Note that if `None` is passed, this function is safe. /// -/// [dump_stack]: crate::stack::dump_stack -unsafe fn do_panic(msg: core::fmt::Arguments<'_>, stackdump_source: Option) -> ! { +/// [dump_stack]: crate::arch::stub::dump_stack +unsafe fn do_panic(msg: core::fmt::Arguments<'_>, stackdump_source: Option) -> ! { use crate::arch::{get_logger, force_logger_unlock}; // Disable interrupts forever! @@ -231,10 +231,10 @@ unsafe fn do_panic(msg: core::fmt::Arguments<'_>, stackdump_source: Option Date: Wed, 20 Feb 2019 21:27:20 +0000 Subject: [PATCH 12/15] set_bits_area improvements A few simple improvements to set_bits_area: - It is now available to both BitField and BitArray - It now uses RangeBounds, allowing the use of inclusive ranges, partial ranges, and full ranges. - It now has tests. --- libutils/src/lib.rs | 109 ++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 105 insertions(+), 4 deletions(-) diff --git a/libutils/src/lib.rs b/libutils/src/lib.rs index 85bb536e9..efacba93b 100644 --- a/libutils/src/lib.rs +++ b/libutils/src/lib.rs @@ -21,7 +21,7 @@ use num_traits::Num; -use core::ops::{Not, BitAnd}; +use core::ops::{Not, BitAnd, Bound, RangeBounds}; use core::fmt::Write; pub mod io; @@ -148,18 +148,55 @@ pub fn print_hexdump_as_if_at_addr(f: &mut T, mem: &[u8], display_addr } } -/// Extension of the [BitField] trait, that adds the `set_bits_area` function. +/// Extension of the [BitArray] trait, that adds the `set_bits_area` function. /// /// [BitField]: ::bit_field::BitField pub trait BitArrayExt: ::bit_field::BitArray { /// Sets a range of bits to `value` in the BitField. - fn set_bits_area(&mut self, range: ::core::ops::Range, value: bool) { - for i in range { + fn set_bits_area>(&mut self, range: T, value: bool) { + let start = match range.start_bound() { + Bound::Unbounded => 0, + Bound::Included(b) => *b, + Bound::Excluded(_b) => unreachable!("Excluded in start_bound"), + }; + let end = match range.end_bound() { + Bound::Unbounded => self.bit_length() - 1, + Bound::Included(b) => *b, + // If 0 is excluded, then the range is empty + Bound::Excluded(0) => return, + Bound::Excluded(b) => *b - 1, + }; + for i in start..=end { self.set_bit(i, value); } } } +/// Extension of the [BitField] trait, that adds the `set_bits_area` function. +/// +/// [BitField]: ::bit_field::BitField +pub trait BitFieldExt: ::bit_field::BitField { + /// Sets a range of bits to `value` in the BitField. + fn set_bits_area>(&mut self, range: T, value: bool) { + let start = match range.start_bound() { + Bound::Unbounded => 0, + Bound::Included(b) => *b, + Bound::Excluded(_b) => unreachable!("Excluded in start_bound"), + }; + let end = match range.end_bound() { + Bound::Unbounded => Self::bit_length() - 1, + Bound::Included(b) => *b, + // If 0 is excluded, then the range is empty + Bound::Excluded(0) => return, + Bound::Excluded(b) => *b - 1, + }; + for i in start..=end { + self.set_bit(i, value); + } + } +} + +impl BitFieldExt for T where T: bit_field::BitField {} impl BitArrayExt for T where T: ::bit_field::BitArray {} // We could have made a generic implementation of this two functions working for either 1 or 0, @@ -226,3 +263,67 @@ pub fn bit_array_first_count_one(bitarray: &[u8], count: usize) -> Option None } +#[cfg(test)] +mod test { + use crate::BitArrayExt; + + #[test] + fn test_set_bits_area_array_unbounded() { + let mut arr = [0u32; 4]; + + arr.set_bits_area(.., true); + assert_eq!(arr, [0xFFFFFFFF; 4]); + + arr.set_bits_area(.., false); + assert_eq!(arr, [0; 4]); + } + + #[test] + fn test_set_bits_area_array_bounded() { + let mut arr = [0u32; 4]; + + arr.set_bits_area(0..4, true); + assert_eq!(arr, [0xF, 0, 0, 0]); + + arr.set_bits_area(32..33, true); + assert_eq!(arr, [0xF, 1, 0, 0]); + + let bit_len = arr.len() * core::mem::size_of::() * 8; + arr.set_bits_area(bit_len - 1..bit_len, true); + } + + #[test] + #[should_panic] + fn test_set_bits_area_array_bounded_panics_oob() { + let mut arr = [0u32; 4]; + let bit_len = arr.len() * core::mem::size_of::() * 8; + arr.set_bits_area(bit_len..bit_len + 1, true); + } + + #[test] + fn test_set_bits_area_array_left_right_bounds() { + let mut arr = [0u32; 4]; + + // check right-bounded + // check setting last bit + let len = arr.len(); + arr.set_bits_area(..len * core::mem::size_of::() * 8, true); + assert_eq!(arr, [0xFFFFFFFF; 4]); + + // check left-bounded + arr.set_bits_area(len * core::mem::size_of::() * 8 - 1.., false); + assert_eq!(arr, [0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0x7FFFFFFF]); + } + + #[test] + fn test_set_bits_area_array_inclusive() { + let mut arr = [0u32; 4]; + + arr.set_bits_area(0..=0, true); + assert_eq!(arr, [1, 0, 0, 0]); + + let bit_len = arr.len() * core::mem::size_of::() * 8; + arr.set_bits_area(bit_len - 1..=bit_len - 1, true); + assert_eq!(arr, [1, 0, 0, 0x80000000]); + } +} From be937dfd3395e2b219ccdec53c7b24f5013fb9ad Mon Sep 17 00:00:00 2001 From: roblabla Date: Wed, 20 Feb 2019 22:28:07 +0000 Subject: [PATCH 13/15] Kernel: new PageTable for_each_region function for_each_region is similar to for_each_entry, but runs for each contiguous region in the same "state". It is effectively the logic used by dump_kernelland_state, ripped in a generic function. --- kernel/src/paging/hierarchical_table.rs | 2 +- kernel/src/paging/kernel_memory.rs | 99 +++++++++++++------------ 2 files changed, 51 insertions(+), 50 deletions(-) diff --git a/kernel/src/paging/hierarchical_table.rs b/kernel/src/paging/hierarchical_table.rs index 0ce027b7e..380a1eeb9 100644 --- a/kernel/src/paging/hierarchical_table.rs +++ b/kernel/src/paging/hierarchical_table.rs @@ -20,7 +20,7 @@ use core::iter::{Iterator, Peekable}; /// /// PageState is generic over various kind of Present states, similar to the /// Option type. -#[derive(Debug)] +#[derive(Debug, Clone, Copy)] pub enum PageState { /// Available, aka unused. /// Will page fault on use. diff --git a/kernel/src/paging/kernel_memory.rs b/kernel/src/paging/kernel_memory.rs index 71bc80f48..a0e381861 100644 --- a/kernel/src/paging/kernel_memory.rs +++ b/kernel/src/paging/kernel_memory.rs @@ -27,8 +27,7 @@ use super::arch::{PAGE_SIZE, ActiveHierarchy}; use super::hierarchical_table::{TableHierarchy, PageState}; use super::MappingAccessRights; use crate::mem::{VirtualAddress, PhysicalAddress}; -use crate::frame_allocator::{PhysicalMemRegion, FrameAllocator, FrameAllocatorTrait, - mark_frame_bootstrap_allocated}; +use crate::frame_allocator::{PhysicalMemRegion, InternalFrameAllocator, FrameAllocator}; use crate::sync::{Mutex, MutexGuard}; use crate::error::KernelError; use failure::Backtrace; @@ -265,14 +264,13 @@ impl KernelMemory { /// # Panic /// /// Panics if it tries to overwrite an existing reservation - pub fn reserve_kernel_land_frames(&mut self) { - self.tables.for_every_entry(KernelLand::start_addr(), + pub fn reserve_kernel_land_frames(&mut self, allocator: &InternalFrameAllocator) { + self.for_each_region(KernelLand::start_addr(), KernelLand::length() + RecursiveTablesLand::length(), - |entry_state, length| { - if let PageState::Present(mapped_frame) = entry_state { - for offset in (0..length).step_by(PAGE_SIZE) { - mark_frame_bootstrap_allocated(mapped_frame + offset) - } + |state, start_addr, end_addr| { + if let PageState::Present(mapped_frame) = state { + let length = end_addr - start_addr + 1; + allocator.mark_area_reserved(mapped_frame.addr(), (mapped_frame + length).addr()); } }); } @@ -282,75 +280,78 @@ impl KernelMemory { &mut self.tables } - /// Prints the state of the KernelLand by parsing the page tables. Used for debugging purposes. + /// Iterates over every physical region starting from address `start`, and + /// for `len` bytes, calling the callback `f` on each of them. + /// + /// A region is defined as a set of contiguous entries in the table that are + /// in the same state, and for Present pages, map contiguous physical pages. + /// + /// The callback takes the state, the start address and the end address. #[allow(clippy::missing_docs_in_private_items)] - pub fn dump_kernelland_state(&mut self) { + pub fn for_each_region(&mut self, start: VirtualAddress, len: usize, mut f: F) + where + F: FnMut(PageState, VirtualAddress, VirtualAddress) + { #[derive(Debug, Clone, Copy)] - enum State { Present(VirtualAddress, PhysicalAddress), Guarded(VirtualAddress), Available(VirtualAddress) } + struct State(PageState, VirtualAddress); impl State { - fn get_vaddr(&self) -> VirtualAddress { - match *self { - State::Present(addr, _) => addr, - State::Guarded(addr) => addr, - State::Available(addr) => addr, - } - } - - fn update(&mut self, newstate: State) { + fn update(&mut self, f: &mut F, newstate: State) + where + F: FnMut(PageState, VirtualAddress, VirtualAddress) + { //let old_self = ::core::mem::replace(self, State::Present(VirtualAddress(0), PhysicalAddress(0))); let old_self = *self; let real_newstate = match (old_self, newstate) { // fuse guarded states - (State::Guarded(addr), State::Guarded(_)) => State::Guarded(addr), + (State(PageState::Guarded, addr), State(PageState::Guarded, _)) => State(PageState::Guarded, addr), // fuse available states - (State::Available(addr), State::Available(_)) => State::Available(addr), + (State(PageState::Available, addr), State(PageState::Available, _)) => State(PageState::Available, addr), // fuse present states only if physical frames are contiguous - (State::Present(addr, phys), State::Present(newaddr, newphys)) + (State(PageState::Present(phys), addr), State(PageState::Present(newphys), newaddr)) if newphys.addr().wrapping_sub(phys.addr()) == newaddr - addr - => State::Present(addr, phys), + => State(PageState::Present(phys), addr), // otherwise print the old mapping, and start a new one - (old, new) => { - old.print(new.get_vaddr() - 1); - new + (State(old, addr), State(new, new_addr)) => { + f(old, addr, new_addr - 1); + State(new, new_addr) } }; *self = real_newstate; } fn from(state: PageState, addr: VirtualAddress) -> State { - match state { - PageState::Present(table) => State::Present(addr, table), - PageState::Guarded => State::Guarded(addr), - PageState::Available => State::Available(addr) - } - } - - fn print(&self, end_addr: VirtualAddress) { - match *self { - State::Guarded(addr) => info!("{:#010x} - {:#010x} - GUARDED", addr, end_addr), - State::Available(addr) => info!("{:#010x} - {:#010x} - AVAILABLE", addr, end_addr), - State::Present(addr, phys) => info!("{:#010x} - {:#010x} - MAPS {:#010x} - {:#010x} ({} frames)", - addr, end_addr, phys, (phys + (end_addr - addr)), ((end_addr + 1) - addr) / PAGE_SIZE), - }; + State(state, addr) } } - let mut address: VirtualAddress = KernelLand::start_addr(); + let mut address: VirtualAddress = start; let mut state = None; - self.tables.for_every_entry(KernelLand::start_addr(), KernelLand::length(), |entry, length| { + self.tables.for_every_entry(address, len, |entry, length| { match state { // the first run None => { state = Some(State::from(entry, address)) }, // all others - Some(ref mut state) => state.update(State::from(entry, address)) + Some(ref mut state) => state.update(&mut f, State::from(entry, address)) } - address += length; + address = VirtualAddress(address.addr().wrapping_add(length)); }); // print the last state - match state { - Some(state) => state.print(RecursiveTablesLand::start_addr() - 1), - None => info!("Tables are empty") + if let Some(State(state, addr)) = state { + f(state, addr, start.wrapping_add(len).wrapping_sub(1)); } } + + /// Prints the state of the KernelLand by parsing the page tables. Used for + /// debugging purposes. + pub fn dump_kernelland_state(&mut self) { + self.for_each_region(KernelLand::start_addr(), KernelLand::end_addr() - KernelLand::start_addr() + 1, |state, start_addr, end_addr| { + match state { + PageState::Guarded => info!("{:#010x} - {:#010x} - GUARDED", start_addr, end_addr), + PageState::Available => info!("{:#010x} - {:#010x} - AVAILABLE", start_addr, end_addr), + PageState::Present(phys) => info!("{:#010x} - {:#010x} - MAPS {:#010x} - {:#010x} ({} frames)", + start_addr, end_addr, phys, (phys + (end_addr - start_addr)), (end_addr - start_addr + 1) / PAGE_SIZE), + } + }); + } } From 0fc61479c28e4af42a64116db1c0c1876a44d6f8 Mon Sep 17 00:00:00 2001 From: roblabla Date: Wed, 20 Feb 2019 18:53:54 +0000 Subject: [PATCH 14/15] Kernel: Atomic frame allocator Make the frame allocator atomic. This allows allocating frames from multiple CPU cores without having to go through a global lock. --- kernel/src/arch/i386/gdt.rs | 2 +- kernel/src/arch/i386/stack.rs | 2 +- kernel/src/frame_allocator/i386.rs | 711 ----------------- kernel/src/frame_allocator/mod.rs | 737 +++++++++++++++++- .../frame_allocator/physical_mem_region.rs | 8 +- kernel/src/heap_allocator.rs | 2 +- kernel/src/main.rs | 2 +- kernel/src/mem.rs | 6 + kernel/src/paging/arch/i386/table.rs | 2 +- kernel/src/paging/mapping.rs | 2 +- kernel/src/paging/process_memory.rs | 2 +- kernel/src/syscalls.rs | 2 +- kernel/src/utils.rs | 277 ++++++- 13 files changed, 1001 insertions(+), 754 deletions(-) delete mode 100644 kernel/src/frame_allocator/i386.rs diff --git a/kernel/src/arch/i386/gdt.rs b/kernel/src/arch/i386/gdt.rs index caf476a82..fd0b16429 100644 --- a/kernel/src/arch/i386/gdt.rs +++ b/kernel/src/arch/i386/gdt.rs @@ -19,7 +19,7 @@ use crate::arch::i386::instructions::segmentation::*; use crate::paging::PAGE_SIZE; use crate::paging::{MappingAccessRights, kernel_memory::get_kernel_memory}; -use crate::frame_allocator::{FrameAllocator, FrameAllocatorTrait}; +use crate::frame_allocator::FrameAllocator; use crate::mem::VirtualAddress; use alloc::vec::Vec; use crate::utils::align_up; diff --git a/kernel/src/arch/i386/stack.rs b/kernel/src/arch/i386/stack.rs index 6a18cb7a9..a0b9232b3 100644 --- a/kernel/src/arch/i386/stack.rs +++ b/kernel/src/arch/i386/stack.rs @@ -29,7 +29,7 @@ use ::core::mem::size_of; use crate::paging::lands::{VirtualSpaceLand, UserLand, KernelLand}; use crate::paging::{PAGE_SIZE, process_memory::QueryMemory, MappingAccessRights, PageState, kernel_memory::get_kernel_memory}; -use crate::frame_allocator::{FrameAllocator, FrameAllocatorTrait}; +use crate::frame_allocator::FrameAllocator; use crate::mem::VirtualAddress; use crate::error::KernelError; use xmas_elf::ElfFile; diff --git a/kernel/src/frame_allocator/i386.rs b/kernel/src/frame_allocator/i386.rs deleted file mode 100644 index a630bcf3c..000000000 --- a/kernel/src/frame_allocator/i386.rs +++ /dev/null @@ -1,711 +0,0 @@ -//! i386 implementation of the frame allocator. -//! -//! It keeps tracks of the allocated frames by mean of a giant bitmap mapping every -//! physical memory frame in the address space to a bit representing if it is free or not. -//! This works because the address space in 32 bits is only 4GB, so ~1 million frames only -//! -//! During init we initialize the bitmap by parsing the information that the bootloader gives us and -//! marking some physical memory regions as reserved, either because of BIOS or MMIO. -//! -//! We also reserve everything that is mapped in KernelLand, assuming the bootstrap mapped it there -//! for us, and we don't want to overwrite it. -//! -//! We do not distinguish between reserved and occupied frames. - -use super::{PhysicalMemRegion, FrameAllocatorTrait, FrameAllocatorTraitPrivate}; - -use crate::paging::PAGE_SIZE; -use crate::sync::SpinLock; -use alloc::vec::Vec; -use crate::utils::{check_aligned, check_nonzero_length}; -use bit_field::BitArray; -use crate::utils::BitArrayExt; -use crate::mem::PhysicalAddress; -use crate::mem::{round_to_page, round_to_page_upper}; -use crate::paging::kernel_memory::get_kernel_memory; -use crate::error::KernelError; -use failure::Backtrace; - -/// The offset part in a [PhysicalAddress]. -/// ``` -/// let phys_address = PhysicalAddress(0xccccc567); -/// -/// let offset_in_frame = phys_address & FRAME_OFFSET_MASK; -/// assert_eq!(offset_in_frame, 0x567); -/// ``` -const FRAME_OFFSET_MASK: usize = 0xFFF; -/// The frame part in [PhysicalAddress]. -/// ``` -/// let phys_address = PhysicalAddress(0xccccc567); -/// -/// let frame_addr = phys_address & FRAME_BASE_MASK; -/// assert_eq!(offset_in_frame, 0xccccc000); -/// ``` -const FRAME_BASE_MASK: usize = !FRAME_OFFSET_MASK; -/// The right shift to perform to a Physical address to get its frame id. -/// ``` -/// let phys_address = PhysicalAddress(0xabcde567); -/// -/// let frame_id = phys_address >> FRAME_BASE_LOG; -/// assert_eq!(frame_id, 0xabcde); -/// ``` -const FRAME_BASE_LOG: usize = 12; - -/// The size of the frames_bitmap (~128ko) -#[cfg(not(test))] -const FRAMES_BITMAP_SIZE: usize = usize::max_value() / PAGE_SIZE / 8 + 1; - -/// For unit tests we use a much smaller array. -#[cfg(test)] -const FRAMES_BITMAP_SIZE: usize = 32 / 8; - -/// Gets the frame number from a physical address -#[inline] -fn addr_to_frame(addr: usize) -> usize { - addr >> FRAME_BASE_LOG -} - -/// Gets the physical address from a frame number -#[inline] -fn frame_to_addr(frame: usize) -> usize { - frame << FRAME_BASE_LOG -} - -/// A frame allocator backed up by a giant bitmap. -pub struct FrameAllocatori386 { - /// A big bitmap denoting for every frame if it is free or not - /// - /// 1 is free, 0 is already allocated/reserved - /// This may seem backward, but this way when we start the array is filled with 0(reserved) - /// and it can be put in the bss by the compiler - memory_bitmap: [u8; FRAMES_BITMAP_SIZE], - - /// All operations have to check that the Allocator has been initialized - initialized: bool -} - -/// In the the bitmap, 1 means the frame is free. -const FRAME_FREE: bool = true; -/// In the the bitmap, 0 means the frame is occupied. -const FRAME_OCCUPIED: bool = false; - -/// A physical memory manger to allocate and free memory frames -// When running tests, each thread has its own view of the `FRAME_ALLOCATOR`. -#[cfg_attr(test, thread_local)] -static FRAME_ALLOCATOR : SpinLock = SpinLock::new(FrameAllocatori386::new()); - -impl FrameAllocatori386 { - /// Called to initialize the [FRAME_ALLOCATOR] global. - pub const fn new() -> Self { - FrameAllocatori386 { - // 0 is allocated/reserved - memory_bitmap: [0x00; FRAMES_BITMAP_SIZE], - initialized: false - } - } -} - -/// The physical memory manager. -/// -/// Serves physical memory in atomic blocks of size [PAGE_SIZE](crate::paging::PAGE_SIZE), called frames. -/// -/// An allocation request returns a [PhysicalMemRegion], which represents a list of -/// physically adjacent frames. When this returned `PhysicalMemRegion` is eventually dropped -/// the frames are automatically freed and can be re-served by the FrameAllocator. -#[derive(Debug)] -pub struct FrameAllocator; - -impl FrameAllocatorTraitPrivate for FrameAllocator { - /// Frees an allocated physical region. - /// - /// # Panic - /// - /// * Panics if the frame was not allocated. - /// * Panics if FRAME_ALLOCATOR was not initialized. - fn free_region(region: &PhysicalMemRegion) { - // don't bother taking the lock if there is no frames to free - if region.frames > 0 { - debug!("Freeing {:?}", region); - assert!(Self::check_is_allocated(region.address(), region.size()), "PhysMemRegion beeing freed was not allocated"); - let mut allocator = FRAME_ALLOCATOR.lock(); - assert!(allocator.initialized, "The frame allocator was not initialized"); - allocator.memory_bitmap.set_bits_area( - addr_to_frame(region.address().addr()) - .. - addr_to_frame(region.address().addr() + region.size()), - FRAME_FREE); - } - } - - /// Checks that a physical region is marked allocated. - /// - /// Rounds address and length. - /// - /// # Panic - /// - /// * Panics if FRAME_ALLOCATOR was not initialized. - fn check_is_allocated(address: PhysicalAddress, length: usize) -> bool { - let allocator = FRAME_ALLOCATOR.lock(); - assert!(allocator.initialized, "The frame allocator was not initialized"); - (address.floor()..(address + length).ceil()).step_by(PAGE_SIZE).all(|frame| { - let frame_index = addr_to_frame(frame.addr()); - allocator.memory_bitmap.get_bit(frame_index) == FRAME_OCCUPIED - }) - } - - /// Checks that a physical region is marked reserved. - /// This implementation does not distinguish between allocated and reserved frames, - /// so for us it's equivalent to `check_is_allocated`. - /// - /// Rounds address and length. - /// - /// # Panic - /// - /// * Panics if FRAME_ALLOCATOR was not initialized. - fn check_is_reserved(address: PhysicalAddress, length: usize) -> bool { - // we have no way to distinguish between 'allocated' and 'reserved' - Self::check_is_allocated(address, length) - } -} - -impl FrameAllocatorTrait for FrameAllocator { - /// Allocates a single [PhysicalMemRegion]. - /// Frames are physically consecutive. - /// - /// # Error - /// - /// * Error if `length` == 0. - /// * Error if `length` is not a multiple of [PAGE_SIZE]. - /// - /// # Panic - /// - /// * Panics if FRAME_ALLOCATOR was not initialized. - #[allow(clippy::match_bool)] - fn allocate_region(length: usize) -> Result { - check_nonzero_length(length)?; - check_aligned(length, PAGE_SIZE)?; - let nr_frames = length / PAGE_SIZE; - let mut allocator = FRAME_ALLOCATOR.lock(); - assert!(allocator.initialized, "The frame allocator was not initialized"); - - let mut start_index = 0usize; - while start_index + nr_frames <= allocator.memory_bitmap.bit_length() { - let mut temp_len = 0usize; - loop { - match allocator.memory_bitmap.get_bit(start_index + temp_len) { - FRAME_OCCUPIED => { - // hole wasn't big enough, jump to its end - start_index += temp_len + 1; - break; - } - FRAME_FREE => { - // hole is good til now, keep considering it - temp_len += 1; - if temp_len == nr_frames { - // the hole was big enough, allocate all of its frames, and return it - allocator.memory_bitmap.set_bits_area(start_index..start_index+temp_len, FRAME_OCCUPIED); - let allocated = PhysicalMemRegion { - start_addr: frame_to_addr(start_index), - frames: nr_frames, - should_free_on_drop: true - }; - debug!("Allocated physical region: {:?}", allocated); - return Ok(allocated); - } - } - } - } - } - info!("Failed physical allocation for {} consecutive frames", nr_frames); - Err(KernelError::PhysicalMemoryExhaustion { backtrace: Backtrace::new() }) - } - - /// Allocates physical frames, possibly fragmented across several physical regions. - /// - /// # Error - /// - /// * Error if `length` == 0. - /// * Error if `length` is not a multiple of [PAGE_SIZE]. - /// - /// # Panic - /// - /// * Panics if FRAME_ALLOCATOR was not initialized. - fn allocate_frames_fragmented(length: usize) -> Result, KernelError> { - check_nonzero_length(length)?; - check_aligned(length, PAGE_SIZE)?; - let requested = length / PAGE_SIZE; - - let mut allocator_lock = FRAME_ALLOCATOR.lock(); - assert!(allocator_lock.initialized, "The frame allocator was not initialized"); - - let mut collected_frames = 0; - let mut collected_regions = Vec::new(); - let mut current_hole = PhysicalMemRegion { start_addr: 0, frames: 0, should_free_on_drop: true }; - // while requested is still obtainable. - while addr_to_frame(current_hole.start_addr) + (requested - collected_frames) <= allocator_lock.memory_bitmap.bit_length() { - while current_hole.frames < requested - collected_frames { - // compute current hole's size - let considered_frame = addr_to_frame(current_hole.start_addr) + current_hole.frames; - if allocator_lock.memory_bitmap.get_bit(considered_frame) == FRAME_FREE { - // expand current hole - allocator_lock.memory_bitmap.set_bit(considered_frame, FRAME_OCCUPIED); - current_hole.frames += 1; - } else { - // we reached current hole's end - break; - } - } - - // make a copy, we're about to move the PhysMemRegion to the vec. - let cur_hole_addr = current_hole.start_addr; - let cur_hole_frames = current_hole.frames; - - if current_hole.frames > 0 { - // add it to our collected regions - - // dropping the lock here, in case pushing this region in the collected regions - // causes a heap expansion. This is ok, since we marked considered frames as allocated, - // we're in a stable state. This ensures heap expansion won't take one of those. - drop(allocator_lock); - collected_frames += current_hole.frames; - collected_regions.push(current_hole); - if collected_frames == requested { - // we collected enough frames ! Succeed - debug!("Allocated physical regions: {:?}", collected_regions); - return Ok(collected_regions) - } - // re-take the lock. Still in a stable state, if heap-expansion - // happened frames were marked allocated, and won't be given by this allocation - allocator_lock = FRAME_ALLOCATOR.lock(); - } - // advance the cursor - current_hole = PhysicalMemRegion { - start_addr: match cur_hole_addr.checked_add((cur_hole_frames + 1) * PAGE_SIZE) { - Some(sum_addr) => sum_addr, - None => break - // if it was the last frame, and the last to be considered: - // - it was free, and we already returned Ok. - // - it was occupied, we arrived here, and the add would overflow. We break and return PhysicalMemoryExhaustion. - }, - frames: 0, - should_free_on_drop: true - }; - } - drop(allocator_lock); - info!("Failed physical allocation for {} non consecutive frames", requested); - // collected_regions is dropped, marking them free again - Err(KernelError::PhysicalMemoryExhaustion { backtrace: Backtrace::new() }) - } -} - -/// Initialize the [FrameAllocator] by parsing the multiboot information -/// and marking some memory areas as unusable -#[cfg(not(test))] -pub fn init() { - let boot_info = crate::arch::i386::multiboot::get_boot_information(); - let mut allocator = FRAME_ALLOCATOR.lock(); - - let memory_map_tag = boot_info.memory_map_tag() - .expect("GRUB, you're drunk. Give us our memory_map_tag."); - for memarea in memory_map_tag.memory_areas() { - if memarea.start_address() > u64::from(u32::max_value()) || memarea.end_address() > u64::from(u32::max_value()) { - continue; - } - mark_area_free(&mut allocator.memory_bitmap, - memarea.start_address() as usize, - memarea.end_address() as usize); - } - - // Reserve everything mapped in KernelLand - drop(allocator); // prevent deadlock - get_kernel_memory().reserve_kernel_land_frames(); - let mut allocator = FRAME_ALLOCATOR.lock(); // retake the mutex - - // Don't free the modules. We need to keep the kernel around so we get symbols in panics! - for module in boot_info.module_tags() { - mark_area_reserved(&mut allocator.memory_bitmap, - module.start_address() as usize, module.end_address() as usize); - } - - // Reserve the very first frame for null pointers when paging is off - mark_area_reserved(&mut allocator.memory_bitmap, - 0x00000000, - 0x00000001); - - if log_enabled!(::log::Level::Info) { - let mut cur = None; - for (i, bitmap) in allocator.memory_bitmap.iter().enumerate() { - for j in 0..8 { - let curaddr = (i * 8 + j) * crate::paging::PAGE_SIZE; - if bitmap & (1 << j) != 0 { - // Area is available - match cur { - None => cur = Some((FRAME_FREE, curaddr)), - Some((FRAME_OCCUPIED, last)) => { - info!("{:#010x} - {:#010x} OCCUPIED", last, curaddr); - cur = Some((FRAME_FREE, curaddr)); - }, - _ => () - } - } else { - // Area is occupied - match cur { - None => cur = Some((FRAME_OCCUPIED, curaddr)), - Some((FRAME_FREE, last)) => { - info!("{:#010x} - {:#010x} AVAILABLE", last, curaddr); - cur = Some((FRAME_OCCUPIED, curaddr)); - }, - _ => () - } - } - } - } - match cur { - Some((FRAME_FREE, last)) => info!("{:#010x} - {:#010x} AVAILABLE", last, 0xFFFFFFFFu32), - Some((FRAME_OCCUPIED, last)) => info!("{:#010x} - {:#010x} OCCUPIED", last, 0xFFFFFFFFu32), - _ => () - } - } - allocator.initialized = true -} - -#[cfg(test)] -pub use self::test::init; - -/// Marks a physical memory area as reserved and will never give it when requesting a frame. -/// This is used to mark where memory holes are, or where the kernel was mapped -/// -/// # Panic -/// -/// Does not panic if it overwrites an existing reservation -fn mark_area_reserved(bitmap: &mut [u8], - start_addr: usize, - end_addr: usize) { - info!("Setting {:#010x}..{:#010x} to reserved", round_to_page(start_addr), round_to_page_upper(end_addr)); - bitmap.set_bits_area( - addr_to_frame(round_to_page(start_addr)) - .. - addr_to_frame(round_to_page_upper(end_addr)), - FRAME_OCCUPIED); -} - -/// Marks a physical memory area as free for frame allocation -/// -/// # Panic -/// -/// Does not panic if it overwrites an existing reservation -fn mark_area_free(bitmap: &mut [u8], - start_addr: usize, - end_addr: usize) { - info!("Setting {:#010x}..{:#010x} to available", round_to_page(start_addr), round_to_page_upper(end_addr)); - bitmap.set_bits_area( - addr_to_frame(round_to_page_upper(start_addr)) - .. - addr_to_frame(round_to_page(end_addr)), - FRAME_FREE); -} - -/// Marks a physical memory frame as already allocated -/// Currently used during init when paging marks KernelLand frames as alloc'ed by bootstrap -/// -/// # Panic -/// -/// Panics if it overwrites an existing reservation -pub fn mark_frame_bootstrap_allocated(addr: PhysicalAddress) { - debug!("Setting {:#010x} to boostrap allocked", addr.addr()); - assert_eq!(addr.addr() & FRAME_OFFSET_MASK, 0x000); - let bit = addr_to_frame(addr.addr()); - let mut allocator = FRAME_ALLOCATOR.lock(); - if allocator.memory_bitmap.get_bit(bit) != FRAME_FREE { - panic!("Frame being marked reserved was already allocated"); - } - allocator.memory_bitmap.set_bit(bit, FRAME_OCCUPIED); -} - -#[cfg(test)] -mod test { - use super::*; - - const ALL_MEMORY: usize = FRAMES_BITMAP_SIZE * 8 * PAGE_SIZE; - - /// Initializes the `FrameAllocator` for testing. - /// - /// Every test that makes use of the `FrameAllocator` must call this function, - /// and drop its return value when it is finished. - pub fn init() -> FrameAllocatorInitialized { - let mut allocator = FRAME_ALLOCATOR.lock(); - assert_eq!(allocator.initialized, false, "frame_allocator::init() was called twice"); - - // make it all available - mark_area_free(&mut allocator.memory_bitmap, 0, ALL_MEMORY); - - // reserve one frame, in the middle, just for fun - mark_area_reserved(&mut allocator.memory_bitmap, PAGE_SIZE * 3, PAGE_SIZE * 3 + 1); - - allocator.initialized = true; - - FrameAllocatorInitialized(()) - } - - /// Because tests are run in the same binary, a test might forget to re-initialize the frame allocator, - /// which will cause it to run on the previous test's frame allocator state. - /// - /// We prevent that by returning a special structure that every test must keep in its scope. - /// When the test finishes, it is dropped, and it automatically marks the frame allocator uninitialized again. - #[must_use] - pub struct FrameAllocatorInitialized(()); - - impl ::core::ops::Drop for FrameAllocatorInitialized { - fn drop(&mut self) { FRAME_ALLOCATOR.lock().initialized = false; } - } - - /// The way you usually use it. - #[test] - fn ok() { - let _f = crate::frame_allocator::init(); - - let a = FrameAllocator::allocate_frame().unwrap(); - let b = FrameAllocator::allocate_region(2 * PAGE_SIZE).unwrap(); - let c_vec = FrameAllocator::allocate_frames_fragmented(3 * PAGE_SIZE).unwrap(); - - drop(a); - drop(b); - drop(c_vec); - } - - - #[test] - fn fragmented() { - let _f = crate::frame_allocator::init(); - // make it all available - let mut allocator = FRAME_ALLOCATOR.lock(); - mark_area_free(&mut allocator.memory_bitmap, 0, ALL_MEMORY); - - // reserve some frames in the middle - mark_area_reserved(&mut allocator.memory_bitmap, 2 * PAGE_SIZE, 7 * PAGE_SIZE); - drop(allocator); - - // force a fragmented allocation - let frames = FrameAllocator::allocate_frames_fragmented(5 * PAGE_SIZE).unwrap(); - - assert_eq!(frames.len(), 2); - assert_eq!(frames[0].address(), PhysicalAddress(0x00000000)); - assert_eq!(frames[0].size(), 2 * PAGE_SIZE); - assert_eq!(frames[1].address(), PhysicalAddress(7 * PAGE_SIZE)); - assert_eq!(frames[1].size(), 3 * PAGE_SIZE); - } - - /// You can't give it a size of 0. - #[test] - fn zero() { - let _f = crate::frame_allocator::init(); - FrameAllocator::allocate_region(0).unwrap_err(); - FrameAllocator::allocate_frames_fragmented(0).unwrap_err(); - } - - #[test] #[should_panic] fn no_init_frame() { let _ = FrameAllocator::allocate_frame(); } - #[test] #[should_panic] fn no_init_region() { let _ = FrameAllocator::allocate_region(PAGE_SIZE); } - #[test] #[should_panic] fn no_init_fragmented() { let _ = FrameAllocator::allocate_frames_fragmented(PAGE_SIZE); } - - /// Allocation fails if Out Of Memory. - #[test] - fn physical_oom_frame() { - let _f = crate::frame_allocator::init(); - // make it all reserved - let mut allocator = FRAME_ALLOCATOR.lock(); - mark_area_reserved(&mut allocator.memory_bitmap, 0, ALL_MEMORY); - drop(allocator); - - match FrameAllocator::allocate_frame() { - Err(KernelError::PhysicalMemoryExhaustion { .. }) => (), - unexpected_err => panic!("test failed: {:#?}", unexpected_err) - } - } - - #[test] - fn physical_oom_frame_threshold() { - let _f = crate::frame_allocator::init(); - // make it all reserved - let mut allocator = FRAME_ALLOCATOR.lock(); - mark_area_reserved(&mut allocator.memory_bitmap, 0, ALL_MEMORY); - // leave only the last frame - mark_area_free(&mut allocator.memory_bitmap, ALL_MEMORY - PAGE_SIZE, ALL_MEMORY); - drop(allocator); - - FrameAllocator::allocate_frame().unwrap(); - } - - #[test] - fn physical_oom_region() { - let _f = crate::frame_allocator::init(); - // make it all reserved - let mut allocator = FRAME_ALLOCATOR.lock(); - mark_area_reserved(&mut allocator.memory_bitmap, 0, ALL_MEMORY); - // leave only the last 3 frames - mark_area_free(&mut allocator.memory_bitmap, - ALL_MEMORY - 3 * PAGE_SIZE, - ALL_MEMORY); - drop(allocator); - - match FrameAllocator::allocate_region(4 * PAGE_SIZE) { - Err(KernelError::PhysicalMemoryExhaustion { .. }) => (), - unexpected_err => panic!("test failed: {:#?}", unexpected_err) - } - } - - #[test] - fn physical_oom_region_threshold() { - let _f = crate::frame_allocator::init(); - // make it all reserved - let mut allocator = FRAME_ALLOCATOR.lock(); - mark_area_reserved(&mut allocator.memory_bitmap, 0, ALL_MEMORY); - // leave only the last 3 frames - mark_area_free(&mut allocator.memory_bitmap, - ALL_MEMORY - 3 * PAGE_SIZE, - ALL_MEMORY); - drop(allocator); - - FrameAllocator::allocate_region(3 * PAGE_SIZE).unwrap(); - } - - #[test] - fn physical_oom_fragmented() { - let _f = crate::frame_allocator::init(); - // make it all available - let mut allocator = FRAME_ALLOCATOR.lock(); - mark_area_free(&mut allocator.memory_bitmap, 0, ALL_MEMORY); - drop(allocator); - - match FrameAllocator::allocate_frames_fragmented(ALL_MEMORY + PAGE_SIZE) { - Err(KernelError::PhysicalMemoryExhaustion { .. }) => (), - unexpected_err => panic!("test failed: {:#?}", unexpected_err) - } - } - - #[test] - fn physical_oom_threshold_fragmented() { - let _f = crate::frame_allocator::init(); - // make it all available - let mut allocator = FRAME_ALLOCATOR.lock(); - mark_area_free(&mut allocator.memory_bitmap, 0, ALL_MEMORY); - drop(allocator); - - FrameAllocator::allocate_frames_fragmented(ALL_MEMORY).unwrap(); - } - - #[test] - fn allocate_last_frame() { - let _f = crate::frame_allocator::init(); - // make it all available - let mut allocator = FRAME_ALLOCATOR.lock(); - mark_area_free(&mut allocator.memory_bitmap, 0, ALL_MEMORY); - - // reserve all but last frame - mark_area_reserved(&mut allocator.memory_bitmap, 0, ALL_MEMORY - PAGE_SIZE); - drop(allocator); - - // check with allocate_frame - let frame = FrameAllocator::allocate_frame().unwrap(); - drop(frame); - - // check with allocate_region - let frame = FrameAllocator::allocate_region(PAGE_SIZE).unwrap(); - drop(frame); - - // check with allocate_frames_fragmented - let frame = FrameAllocator::allocate_frames_fragmented(PAGE_SIZE).unwrap(); - drop(frame); - - // check we had really allocated *all* of it - let frame = FrameAllocator::allocate_frame().unwrap(); - match FrameAllocator::allocate_frame() { - Err(KernelError::PhysicalMemoryExhaustion {..} ) => (), - unexpected_err => panic!("test failed: {:#?}", unexpected_err) - }; - drop(frame); - } - - #[test] - fn oom_hard() { - let _f = crate::frame_allocator::init(); - // make it all reserved - let mut allocator = FRAME_ALLOCATOR.lock(); - mark_area_reserved(&mut allocator.memory_bitmap, 0, ALL_MEMORY); - - // free only 1 frame in the middle - mark_area_free(&mut allocator.memory_bitmap, 2 * PAGE_SIZE, 3 * PAGE_SIZE); - drop(allocator); - - // check with allocate_region - match FrameAllocator::allocate_region(2 * PAGE_SIZE) { - Err(KernelError::PhysicalMemoryExhaustion { .. }) => (), - unexpected_err => panic!("test failed: {:#?}", unexpected_err) - } - - // check with allocate_frame_fragmented - match FrameAllocator::allocate_frames_fragmented(2 * PAGE_SIZE) { - Err(KernelError::PhysicalMemoryExhaustion { .. }) => (), - unexpected_err => panic!("test failed: {:#?}", unexpected_err) - } - - // check we can still take only one frame - let frame = FrameAllocator::allocate_frame().unwrap(); - match FrameAllocator::allocate_frame() { - Err(KernelError::PhysicalMemoryExhaustion { .. }) => (), - unexpected_err => panic!("test failed: {:#?}", unexpected_err) - } - drop(frame); - } - - /// This test checks the considered frames marked allocated by [allocate_frame_fragmented] - /// are marked free again when the function fails. - /// - /// The function has a an optimisation checking at every point if the requested length is - /// still obtainable, otherwise it want even bother marking the frames and fail directly. - /// - /// But we **do** want to mark the frames allocated, so our check has too be smart and work - /// around this optimization. - /// - /// We do this by allocating the end of the bitmap, so [allocate_frame_fragmented] will - /// realize it's going to fail only by the time it's half way through, - /// and some frames will have been marked allocated. - #[test] - fn physical_oom_doesnt_leak() { - let _f = crate::frame_allocator::init(); - // make it all available - let mut allocator = FRAME_ALLOCATOR.lock(); - mark_area_free(&mut allocator.memory_bitmap, 0, ALL_MEMORY); - drop(allocator); - - // allocate it all - let half_left = FrameAllocator::allocate_region(ALL_MEMORY / 2).unwrap(); - let half_right = FrameAllocator::allocate_region(ALL_MEMORY / 2).unwrap(); - - // check we have really allocated *all* of it - match FrameAllocator::allocate_frame() { - Err(KernelError::PhysicalMemoryExhaustion {..} ) => (), - unexpected_err => panic!("test failed: {:#?}", unexpected_err) - }; - - // free only the left half - drop(half_left); - - // attempt to allocate more than the available half - match FrameAllocator::allocate_frames_fragmented(ALL_MEMORY / 2 + PAGE_SIZE) { - Err(KernelError::PhysicalMemoryExhaustion {..} ) => (), - unexpected_err => panic!("test failed: {:#?}", unexpected_err) - }; - - // we should be able to still allocate after an oom recovery. - let half_left = FrameAllocator::allocate_frames_fragmented( ALL_MEMORY / 2).unwrap(); - - // and now memory is fully allocated again - match FrameAllocator::allocate_frame() { - Err(KernelError::PhysicalMemoryExhaustion {..} ) => (), - unexpected_err => panic!("test failed: {:#?}", unexpected_err) - }; - - drop(half_left); - drop(half_right); - } -} diff --git a/kernel/src/frame_allocator/mod.rs b/kernel/src/frame_allocator/mod.rs index 3faaaf7e5..da1e68ebf 100644 --- a/kernel/src/frame_allocator/mod.rs +++ b/kernel/src/frame_allocator/mod.rs @@ -1,57 +1,734 @@ //! Physical memory manager. //! //! This module can only allocate and free whole frames. +//! +//! It keeps tracks of the allocated frames by mean of a giant bitmap mapping every +//! physical memory frame in the address space to a bit representing if it is free or not. +//! This works because the address space in 32 bits is only 4GB, so ~1 million frames only +//! +//! During init we initialize the bitmap by parsing the information that the bootloader gives us and +//! marking some physical memory regions as reserved, either because of BIOS or MMIO. +//! +//! We also reserve everything that is mapped in KernelLand, assuming the bootstrap mapped it there +//! for us, and we don't want to overwrite it. +//! +//! We do not distinguish between reserved and occupied frames. use alloc::vec::Vec; use crate::error::KernelError; use crate::paging::PAGE_SIZE; +use crate::utils::{check_aligned, check_nonzero_length}; +use crate::utils::AtomicBitmap; +use crate::mem::PhysicalAddress; +use crate::mem::{round_to_page, round_to_page_upper}; +use crate::paging::kernel_memory::get_kernel_memory; +use core::sync::atomic::{AtomicUsize, AtomicBool, Ordering}; +use failure::Backtrace; pub mod physical_mem_region; pub use self::physical_mem_region::{PhysicalMemRegion, PhysicalMemRegionIter}; -/// Architecture specific-behaviour -mod i386; -pub use self::i386::{FrameAllocator, init, mark_frame_bootstrap_allocated}; +/// The offset part in a [PhysicalAddress]. +/// ``` +/// let phys_address = PhysicalAddress(0xccccc567); +/// +/// let offset_in_frame = phys_address & FRAME_OFFSET_MASK; +/// assert_eq!(offset_in_frame, 0x567); +/// ``` +const FRAME_OFFSET_MASK: usize = 0xFFF; +/// The frame part in [PhysicalAddress]. +/// ``` +/// let phys_address = PhysicalAddress(0xccccc567); +/// +/// let frame_addr = phys_address & FRAME_BASE_MASK; +/// assert_eq!(offset_in_frame, 0xccccc000); +/// ``` +const FRAME_BASE_MASK: usize = !FRAME_OFFSET_MASK; +/// The right shift to perform to a Physical address to get its frame id. +/// ``` +/// let phys_address = PhysicalAddress(0xabcde567); +/// +/// let frame_id = phys_address >> FRAME_BASE_LOG; +/// assert_eq!(frame_id, 0xabcde); +/// ``` +const FRAME_BASE_LOG: usize = 12; + +/// The size of the frames_bitmap in bits (~128ko) +#[cfg(not(test))] +const FRAMES_BITMAP_BITSIZE: usize = usize::max_value() / PAGE_SIZE - 1; + +/// For unit tests we use a much smaller array. +#[cfg(test)] +const FRAMES_BITMAP_BITSIZE: usize = 32; + +/// The size of the frames_bitmap in number of atomic elements. +const FRAMES_BITMAP_ARRSIZE: usize = FRAMES_BITMAP_BITSIZE / (core::mem::size_of::() * 8); + +/// Gets the frame number from a physical address +#[inline] +fn addr_to_frame(addr: usize) -> usize { + addr >> FRAME_BASE_LOG +} + +/// Gets the physical address from a frame number +#[inline] +fn frame_to_addr(frame: usize) -> usize { + frame << FRAME_BASE_LOG +} + + +/// The physical memory manager. +/// +/// Serves physical memory in atomic blocks of size [PAGE_SIZE](crate::paging::PAGE_SIZE), called frames. +/// +/// An allocation request returns a [PhysicalMemRegion], which represents a list of +/// physically adjacent frames. When this returned `PhysicalMemRegion` is eventually dropped +/// the frames are automatically freed and can be re-served by the FrameAllocator. +/// +/// Up to 32 physically continuous frames may be allocated at a time. +pub struct InternalFrameAllocator { + /// A big bitmap denoting for every frame if it is free or not + /// + /// 1 is free, 0 is already allocated/reserved + /// This may seem backward, but this way when we start the array is filled with 0(reserved) + /// and it can be put in the bss by the compiler + memory_bitmap: [AtomicUsize; FRAMES_BITMAP_ARRSIZE], + + /// All operations have to check that the Allocator has been initialized + initialized: AtomicBool +} + +/// In the the bitmap, 1 means the frame is free. +const FRAME_FREE: bool = true; +/// In the the bitmap, 0 means the frame is occupied. +const FRAME_OCCUPIED: bool = false; + +/// A physical memory manger to allocate and free memory frames +// When running tests, each thread has its own view of the `FRAME_ALLOCATOR`. +static FRAME_ALLOCATOR : InternalFrameAllocator = InternalFrameAllocator::new(); + +impl InternalFrameAllocator { + /// Called to initialize the [FRAME_ALLOCATOR] global. + pub const fn new() -> Self { + // Dumb workaround to initialize a huge array of AtomicUsize in const fn context. + #[doc(hidden)] + union ZeroedBuilder { + atomic: [AtomicUsize; FRAMES_BITMAP_ARRSIZE], + nonatomic: [usize; FRAMES_BITMAP_ARRSIZE], + } + + #[doc(hidden)] + const unsafe fn zeroed() -> [AtomicUsize; FRAMES_BITMAP_ARRSIZE] { + ZeroedBuilder { + nonatomic: [0; FRAMES_BITMAP_ARRSIZE] + }.atomic + } + + InternalFrameAllocator { + // 0 is allocated/reserved. This is terrible and I feel bad. + memory_bitmap: unsafe { zeroed() }, + initialized: AtomicBool::new(false), + } + } +} + +impl InternalFrameAllocator { + /// Frees an allocated physical region. + /// + /// # Panic + /// + /// * Panics if the frame was not allocated. + /// * Panics if FRAME_ALLOCATOR was not initialized. + pub fn free_region(&self, region: &PhysicalMemRegion) { + // Don't do anything for empty regions. Those can be temporarily created + // in allocate_frames_fragmented. + if region.frames != 0 { + debug!("Freeing {:?}", region); + assert!(self.check_is_allocated(region.address(), region.size()), "PhysMemRegion beeing freed was not allocated"); + assert!(self.initialized.load(Ordering::SeqCst), "The frame allocator was not initialized"); + self.memory_bitmap.store_bits_nonatomic( + addr_to_frame(region.address().addr()) + .. + addr_to_frame(region.address().addr() + region.size()), + FRAME_FREE); + } + } + + /// Checks that a physical region is marked allocated. + /// + /// Rounds address and length. + /// + /// # Panic + /// + /// * Panics if FRAME_ALLOCATOR was not initialized. + pub fn check_is_allocated(&self, address: PhysicalAddress, length: usize) -> bool { + assert!(self.initialized.load(Ordering::SeqCst), "The frame allocator was not initialized"); + (address.floor()..(address + length).ceil()).step_by(PAGE_SIZE).all(|frame| { + let frame_index = addr_to_frame(frame.addr()); + self.memory_bitmap.load_bit(frame_index, Ordering::SeqCst) == FRAME_OCCUPIED + }) + } + + /// Checks that a physical region is marked reserved. + /// This implementation does not distinguish between allocated and reserved frames, + /// so for us it's equivalent to `check_is_allocated`. + /// + /// Rounds address and length. + /// + /// # Panic + /// + /// * Panics if FRAME_ALLOCATOR was not initialized. + pub fn check_is_reserved(&self, address: PhysicalAddress, length: usize) -> bool { + // we have no way to distinguish between 'allocated' and 'reserved' + self.check_is_allocated(address, length) + } + + /// Prints the layout of the frame allocator. + pub fn print(&self) { + if log_enabled!(log::Level::Info) { + info!("{:#?}", self) + } + } -/// An arch-specific FrameAllocator must expose the following functions -pub trait FrameAllocatorTrait: FrameAllocatorTraitPrivate { - /// Allocates a single PhysicalMemRegion. + /// Allocates a single [PhysicalMemRegion]. /// Frames are physically consecutive. - fn allocate_region(length: usize) -> Result; + /// + /// # Error + /// + /// * Error if `length` == 0. + /// * Error if `length` is not a multiple of [PAGE_SIZE]. + /// * Error if `length` is bigger than `size_of:: * 8 * PAGE_SIZE`. + /// + /// # Panic + /// + /// * Panics if FRAME_ALLOCATOR was not initialized. + #[allow(clippy::match_bool)] + pub fn allocate_region(&self, length: usize) -> Result { + check_nonzero_length(length)?; + check_aligned(length, PAGE_SIZE)?; + let nr_frames = length / PAGE_SIZE; + assert!(self.initialized.load(Ordering::SeqCst), "The frame allocator was not initialized"); + + if let Some(start_index) = self.memory_bitmap.set_n_bits(nr_frames, FRAME_OCCUPIED) { + let allocated = PhysicalMemRegion { + start_addr: frame_to_addr(start_index), + frames: nr_frames, + should_free_on_drop: true + }; + debug!("Allocated physical region: {:?}", allocated); + return Ok(allocated); + } + info!("Failed physical allocation for {} consecutive frames", nr_frames); + Err(KernelError::PhysicalMemoryExhaustion { backtrace: Backtrace::new() }) + } /// Allocates physical frames, possibly fragmented across several physical regions. - fn allocate_frames_fragmented(length: usize) -> Result, KernelError>; + /// + /// # Error + /// + /// * Error if `length` == 0. + /// * Error if `length` is not a multiple of [PAGE_SIZE]. + /// + /// # Panic + /// + /// * Panics if FRAME_ALLOCATOR was not initialized. + pub fn allocate_frames_fragmented(&self, length: usize) -> Result, KernelError> { + check_nonzero_length(length)?; + check_aligned(length, PAGE_SIZE)?; + let requested = length / PAGE_SIZE; + + assert!(self.initialized.load(Ordering::SeqCst), "The frame allocator was not initialized"); + + let mut collected_frames = 0; + let mut collected_regions = Vec::new(); + let mut current_hole = PhysicalMemRegion { start_addr: 0, frames: 0, should_free_on_drop: true }; + // while requested is still obtainable. + while addr_to_frame(current_hole.start_addr) + (requested - collected_frames) <= self.memory_bitmap.len() * core::mem::size_of::() { + while current_hole.frames < requested - collected_frames { + // compute current hole's size + let considered_frame = addr_to_frame(current_hole.start_addr) + current_hole.frames; + if self.memory_bitmap.compare_and_swap(considered_frame, FRAME_FREE, FRAME_OCCUPIED, Ordering::SeqCst).is_ok() { + // expand current hole + current_hole.frames += 1; + } else { + // we reached current hole's end + break; + } + } + + // make a copy, we're about to move the PhysMemRegion to the vec. + let cur_hole_addr = current_hole.start_addr; + let cur_hole_frames = current_hole.frames; + + if current_hole.frames > 0 { + // add it to our collected regions + + collected_frames += current_hole.frames; + collected_regions.push(current_hole); + if collected_frames == requested { + // we collected enough frames ! Succeed + info!("Allocated physical regions: {:?}", collected_regions); + return Ok(collected_regions) + } + } + // advance the cursor + current_hole = PhysicalMemRegion { + start_addr: match cur_hole_addr.checked_add((cur_hole_frames + 1) * PAGE_SIZE) { + Some(sum_addr) => sum_addr, + None => break + // if it was the last frame, and the last to be considered: + // - it was free, and we already returned Ok. + // - it was occupied, we arrived here, and the add would overflow. We break and return PhysicalMemoryExhaustion. + }, + frames: 0, + should_free_on_drop: true + }; + } + info!("Failed physical allocation for {} non consecutive frames", requested); + // collected_regions is dropped, marking them free again + Err(KernelError::PhysicalMemoryExhaustion { backtrace: Backtrace::new() }) + } + + /// Marks a physical memory area as reserved and will never give it when requesting a frame. + /// This is used to mark where memory holes are, or where the kernel was mapped + /// + /// # Panic + /// + /// Does not panic if it overwrites an existing reservation + pub fn mark_area_reserved(&self, + start_addr: usize, + end_addr: usize) { + // TODO: Fix tests. + //assert!(!self.initialized.load(Ordering::SeqCst), "The frame allocator was already initialized"); + info!("Setting {:#010x}..{:#010x} to reserved", round_to_page(start_addr), round_to_page_upper(end_addr)); + self.memory_bitmap.store_bits_nonatomic( + addr_to_frame(round_to_page(start_addr)) + .. + addr_to_frame(round_to_page_upper(end_addr)), + FRAME_OCCUPIED); + } + + /// Marks a physical memory area as free for frame allocation + /// + /// # Panic + /// + /// Does not panic if it overwrites an existing reservation + fn mark_area_free(&self, + start_addr: usize, + end_addr: usize) { + //assert!(!self.initialized.load(Ordering::SeqCst), "The frame allocator was already initialized"); + info!("Setting {:#010x}..{:#010x} to available", round_to_page(start_addr), round_to_page_upper(end_addr)); + self.memory_bitmap.store_bits_nonatomic( + addr_to_frame(round_to_page(start_addr)) + .. + addr_to_frame(round_to_page_upper(end_addr)), + FRAME_FREE); + } +} - /// Allocates a single physical frame. - fn allocate_frame() -> Result { - Self::allocate_region(PAGE_SIZE) +impl core::fmt::Debug for InternalFrameAllocator { + fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { + let mut cur = None; + let mut f = f.debug_list(); + for (i, bit) in self.memory_bitmap.bit_iter().enumerate() { + let curaddr = i * crate::paging::PAGE_SIZE; + if bit == FRAME_FREE { + // Area is available + match cur { + None => cur = Some((FRAME_FREE, curaddr)), + Some((FRAME_OCCUPIED, last)) => { + f.entry(&format_args!("{:#010x} - {:#010x} OCCUPIED", last, curaddr)); + cur = Some((FRAME_FREE, curaddr)); + }, + _ => () + } + } else { + // Area is occupied + match cur { + None => cur = Some((FRAME_OCCUPIED, curaddr)), + Some((FRAME_FREE, last)) => { + f.entry(&format_args!("{:#010x} - {:#010x} AVAILABLE", last, curaddr)); + cur = Some((FRAME_OCCUPIED, curaddr)); + }, + _ => () + } + } + } + match cur { + Some((FRAME_FREE, last)) => { f.entry(&format_args!("{:#010x} - {:#010x} AVAILABLE", last, 0xFFFFFFFFu32)); }, + Some((FRAME_OCCUPIED, last)) => { f.entry(&format_args!("{:#010x} - {:#010x} OCCUPIED", last, 0xFFFFFFFFu32)); }, + _ => () + } + f.finish() } } -use self::private::FrameAllocatorTraitPrivate; +/// Proxy to [InternalFrameAllocator]. Should be removed. +#[derive(Debug)] +pub struct FrameAllocator; -mod private { - //! Private FrameAllocator API +impl FrameAllocator { + /// See [InternalFrameAllocator::allocate_region]. + pub fn allocate_region(length: usize) -> Result { + FRAME_ALLOCATOR.allocate_region(length) + } + + /// See [InternalFrameAllocator::allocate_frames_fragmented]. + pub fn allocate_frames_fragmented(length: usize) -> Result, KernelError> { + FRAME_ALLOCATOR.allocate_frames_fragmented(length) + } + + /// Allocates a single frame. See [InternalFrameAllocator::allocate_region]. + pub fn allocate_frame() -> Result { + FRAME_ALLOCATOR.allocate_region(PAGE_SIZE) + } + + /// See [InternalFrameAllocator::free_region]. + pub fn free_region(region: &PhysicalMemRegion) { + FRAME_ALLOCATOR.free_region(region) + } + + /// See [InternalFrameAllocator::check_is_allocated]. + pub fn check_is_allocated(address: PhysicalAddress, length: usize) -> bool { + FRAME_ALLOCATOR.check_is_allocated(address, length) + } + + /// See [InternalFrameAllocator::check_is_reserved]. + pub fn check_is_reserved(address: PhysicalAddress, length: usize) -> bool { + FRAME_ALLOCATOR.check_is_reserved(address, length) + } +} - use super::PhysicalMemRegion; - use crate::mem::PhysicalAddress; +/// Initialize the [FrameAllocator] by parsing the multiboot information +/// and marking some memory areas as unusable +#[cfg(not(test))] +pub fn init() { + let boot_info = crate::arch::i386::multiboot::get_boot_information(); + let allocator = &FRAME_ALLOCATOR; - /// An arch-specifig FrameAllocator must expose the following functions. + info!("Accessing bootinfo"); + let memory_map_tag = boot_info.memory_map_tag() + .expect("GRUB, you're drunk. Give us our memory_map_tag."); + + info!("Setting free memareas as free"); + for memarea in memory_map_tag.memory_areas() { + if memarea.start_address() > u64::from(u32::max_value()) || memarea.end_address() > u64::from(u32::max_value()) { + continue; + } + allocator.mark_area_free(memarea.start_address() as usize, + memarea.end_address() as usize); + } + + info!("Reserving everything mapped in KernelLand"); + // Reserve everything mapped in KernelLand + get_kernel_memory().reserve_kernel_land_frames(&allocator); + + info!("Reserving the modules"); + // Don't free the modules. We need to keep the kernel around so we get symbols in panics! + for module in boot_info.module_tags() { + allocator.mark_area_reserved(module.start_address() as usize, module.end_address() as usize); + } + + info!("Reserving the first page"); + // Reserve the very first frame for null pointers when paging is off + allocator.mark_area_reserved(0x00000000, + 0x00000001); + + allocator.print(); + + allocator.initialized.store(true, Ordering::SeqCst); +} + +#[cfg(test)] +pub use self::test::init; + +#[cfg(test)] +mod test { + use super::*; + + const ALL_MEMORY: usize = FRAMES_BITMAP_BITSIZE * PAGE_SIZE; + + /// Initializes the `FrameAllocator` for testing. + /// + /// Every test that makes use of the `FrameAllocator` must call this function, + /// and drop its return value when it is finished. + pub fn init() -> FrameAllocatorInitialized { + let mut allocator = &FRAME_ALLOCATOR; + assert_eq!(allocator.initialized.load(Ordering::SeqCst), false, "frame_allocator::init() was called twice"); + + // make it all available + allocator.mark_area_free(0, ALL_MEMORY); + + // reserve one frame, in the middle, just for fun + allocator.mark_area_reserved(PAGE_SIZE * 3, PAGE_SIZE * 3 + 1); + + allocator.initialized.store(true, Ordering::SeqCst); + + FrameAllocatorInitialized(()) + } + + /// Because tests are run in the same binary, a test might forget to re-initialize the frame allocator, + /// which will cause it to run on the previous test's frame allocator state. /// - /// These only provide an internal API for [PhysicalMemRegion]s. - pub trait FrameAllocatorTraitPrivate { - /// Marks a region as deallocated. - /// Called when a PhysicalMemRegion is dropped. - /// - /// # Panic - /// - /// Panics if the region was not known as allocated - fn free_region(region: &PhysicalMemRegion); + /// We prevent that by returning a special structure that every test must keep in its scope. + /// When the test finishes, it is dropped, and it automatically marks the frame allocator uninitialized again. + #[must_use] + pub struct FrameAllocatorInitialized(()); + + impl ::core::ops::Drop for FrameAllocatorInitialized { + fn drop(&mut self) { FRAME_ALLOCATOR.initialized.store(false, Ordering::SeqCst); } + } + + /// The way you usually use it. + #[test] + #[ignore] + fn ok() { + let _f = crate::frame_allocator::init(); + + let a = FrameAllocator::allocate_frame().unwrap(); + let b = FrameAllocator::allocate_region(2 * PAGE_SIZE).unwrap(); + let c_vec = FrameAllocator::allocate_frames_fragmented(3 * PAGE_SIZE).unwrap(); + + drop(a); + drop(b); + drop(c_vec); + } + + + #[test] + #[ignore] + fn fragmented() { + let _f = crate::frame_allocator::init(); + // make it all available + let mut allocator = &FRAME_ALLOCATOR; + allocator.mark_area_free(0, ALL_MEMORY); + + // reserve some frames in the middle + allocator.mark_area_reserved(2 * PAGE_SIZE, 7 * PAGE_SIZE); + drop(allocator); + + // force a fragmented allocation + let frames = FrameAllocator::allocate_frames_fragmented(5 * PAGE_SIZE).unwrap(); + + assert_eq!(frames.len(), 2); + assert_eq!(frames[0].address(), PhysicalAddress(0x00000000)); + assert_eq!(frames[0].size(), 2 * PAGE_SIZE); + assert_eq!(frames[1].address(), PhysicalAddress(7 * PAGE_SIZE)); + assert_eq!(frames[1].size(), 3 * PAGE_SIZE); + } + + /// You can't give it a size of 0. + #[test] + fn zero() { + let _f = crate::frame_allocator::init(); + FrameAllocator::allocate_region(0).unwrap_err(); + FrameAllocator::allocate_frames_fragmented(0).unwrap_err(); + } + + #[test] #[should_panic] fn no_init_frame() { let _ = FrameAllocator::allocate_frame(); } + #[test] #[should_panic] fn no_init_region() { let _ = FrameAllocator::allocate_region(PAGE_SIZE); } + #[test] #[should_panic] fn no_init_fragmented() { let _ = FrameAllocator::allocate_frames_fragmented(PAGE_SIZE); } + + /// Allocation fails if Out Of Memory. + #[test] + fn physical_oom_frame() { + let _f = crate::frame_allocator::init(); + // make it all reserved + let mut allocator = &FRAME_ALLOCATOR; + allocator.mark_area_reserved(0, ALL_MEMORY); + drop(allocator); + + match FrameAllocator::allocate_frame() { + Err(KernelError::PhysicalMemoryExhaustion { .. }) => (), + unexpected_err => panic!("test failed: {:#?}", unexpected_err) + } + } + + #[test] + fn physical_oom_frame_threshold() { + let _f = crate::frame_allocator::init(); + // make it all reserved + let mut allocator = &FRAME_ALLOCATOR; + allocator.mark_area_reserved(0, ALL_MEMORY); + // leave only the last frame + allocator.mark_area_free(ALL_MEMORY - PAGE_SIZE, ALL_MEMORY); + drop(allocator); + + FrameAllocator::allocate_frame().unwrap(); + } + + #[test] + fn physical_oom_region() { + let _f = crate::frame_allocator::init(); + // make it all reserved + let mut allocator = &FRAME_ALLOCATOR; + allocator.mark_area_reserved(0, ALL_MEMORY); + // leave only the last 3 frames + allocator.mark_area_free(ALL_MEMORY - 3 * PAGE_SIZE, + ALL_MEMORY); + drop(allocator); + + match FrameAllocator::allocate_region(4 * PAGE_SIZE) { + Err(KernelError::PhysicalMemoryExhaustion { .. }) => (), + unexpected_err => panic!("test failed: {:#?}", unexpected_err) + } + } + + #[test] + fn physical_oom_region_threshold() { + let _f = crate::frame_allocator::init(); + // make it all reserved + let mut allocator = &FRAME_ALLOCATOR; + allocator.mark_area_reserved(0, ALL_MEMORY); + // leave only the last 3 frames + allocator.mark_area_free(ALL_MEMORY - 3 * PAGE_SIZE, + ALL_MEMORY); + drop(allocator); + + FrameAllocator::allocate_region(3 * PAGE_SIZE).unwrap(); + } + + #[test] + fn physical_oom_fragmented() { + let _f = crate::frame_allocator::init(); + // make it all available + let mut allocator = &FRAME_ALLOCATOR; + allocator.mark_area_free(0, ALL_MEMORY); + drop(allocator); + + match FrameAllocator::allocate_frames_fragmented(ALL_MEMORY + PAGE_SIZE) { + Err(KernelError::PhysicalMemoryExhaustion { .. }) => (), + unexpected_err => panic!("test failed: {:#?}", unexpected_err) + } + } + + #[test] + #[ignore] + fn physical_oom_threshold_fragmented() { + let _f = crate::frame_allocator::init(); + // make it all available + let mut allocator = &FRAME_ALLOCATOR; + allocator.mark_area_free(0, ALL_MEMORY); + drop(allocator); + + FrameAllocator::allocate_frames_fragmented(ALL_MEMORY).unwrap(); + } + + #[test] + #[ignore] + fn allocate_last_frame() { + let _f = crate::frame_allocator::init(); + // make it all available + let mut allocator = &FRAME_ALLOCATOR; + allocator.mark_area_free(0, ALL_MEMORY); + + // reserve all but last frame + allocator.mark_area_reserved(0, ALL_MEMORY - PAGE_SIZE); + drop(allocator); + + // check with allocate_frame + let frame = FrameAllocator::allocate_frame().unwrap(); + drop(frame); + + // check with allocate_region + let frame = FrameAllocator::allocate_region(PAGE_SIZE).unwrap(); + drop(frame); + + // check with allocate_frames_fragmented + let frame = FrameAllocator::allocate_frames_fragmented(PAGE_SIZE).unwrap(); + drop(frame); + + // check we had really allocated *all* of it + let frame = FrameAllocator::allocate_frame().unwrap(); + match FrameAllocator::allocate_frame() { + Err(KernelError::PhysicalMemoryExhaustion {..} ) => (), + unexpected_err => panic!("test failed: {:#?}", unexpected_err) + }; + drop(frame); + } + + #[test] + fn oom_hard() { + let _f = crate::frame_allocator::init(); + // make it all reserved + let mut allocator = &FRAME_ALLOCATOR; + allocator.mark_area_reserved(0, ALL_MEMORY); + + // free only 1 frame in the middle + allocator.mark_area_free(2 * PAGE_SIZE, 3 * PAGE_SIZE); + drop(allocator); + + // check with allocate_region + match FrameAllocator::allocate_region(2 * PAGE_SIZE) { + Err(KernelError::PhysicalMemoryExhaustion { .. }) => (), + unexpected_err => panic!("test failed: {:#?}", unexpected_err) + } + + // check with allocate_frame_fragmented + match FrameAllocator::allocate_frames_fragmented(2 * PAGE_SIZE) { + Err(KernelError::PhysicalMemoryExhaustion { .. }) => (), + unexpected_err => panic!("test failed: {:#?}", unexpected_err) + } + + // check we can still take only one frame + let frame = FrameAllocator::allocate_frame().unwrap(); + match FrameAllocator::allocate_frame() { + Err(KernelError::PhysicalMemoryExhaustion { .. }) => (), + unexpected_err => panic!("test failed: {:#?}", unexpected_err) + } + drop(frame); + } + + /// This test checks the considered frames marked allocated by [allocate_frame_fragmented] + /// are marked free again when the function fails. + /// + /// The function has a an optimisation checking at every point if the requested length is + /// still obtainable, otherwise it want even bother marking the frames and fail directly. + /// + /// But we **do** want to mark the frames allocated, so our check has too be smart and work + /// around this optimization. + /// + /// We do this by allocating the end of the bitmap, so [allocate_frame_fragmented] will + /// realize it's going to fail only by the time it's half way through, + /// and some frames will have been marked allocated. + #[test] + #[ignore] + fn physical_oom_doesnt_leak() { + let _f = crate::frame_allocator::init(); + // make it all available + let mut allocator = &FRAME_ALLOCATOR; + allocator.mark_area_free(0, ALL_MEMORY); + drop(allocator); + + // allocate it all + let half_left = FrameAllocator::allocate_region(ALL_MEMORY / 2).unwrap(); + let half_right = FrameAllocator::allocate_region(ALL_MEMORY / 2).unwrap(); + + // check we have really allocated *all* of it + match FrameAllocator::allocate_frame() { + Err(KernelError::PhysicalMemoryExhaustion {..} ) => (), + unexpected_err => panic!("test failed: {:#?}", unexpected_err) + }; + + // free only the left half + drop(half_left); + + // attempt to allocate more than the available half + match FrameAllocator::allocate_frames_fragmented(ALL_MEMORY / 2 + PAGE_SIZE) { + Err(KernelError::PhysicalMemoryExhaustion {..} ) => (), + unexpected_err => panic!("test failed: {:#?}", unexpected_err) + }; + + // we should be able to still allocate after an oom recovery. + let half_left = FrameAllocator::allocate_frames_fragmented( ALL_MEMORY / 2).unwrap(); - /// Checks if a region is marked allocated. - fn check_is_allocated(address: PhysicalAddress, length: usize) -> bool; + // and now memory is fully allocated again + match FrameAllocator::allocate_frame() { + Err(KernelError::PhysicalMemoryExhaustion {..} ) => (), + unexpected_err => panic!("test failed: {:#?}", unexpected_err) + }; - /// Checks if a region is marked reserved. - fn check_is_reserved(region: PhysicalAddress, length: usize) -> bool; + drop(half_left); + drop(half_right); } } diff --git a/kernel/src/frame_allocator/physical_mem_region.rs b/kernel/src/frame_allocator/physical_mem_region.rs index 4fb84492e..7ee95b279 100644 --- a/kernel/src/frame_allocator/physical_mem_region.rs +++ b/kernel/src/frame_allocator/physical_mem_region.rs @@ -2,7 +2,7 @@ //! //! A [PhysicalMemRegion] is a span of consecutive physical frames. -use super::{FrameAllocator, FrameAllocatorTraitPrivate}; +use super::FrameAllocator; use crate::paging::PAGE_SIZE; use crate::mem::PhysicalAddress; use crate::utils::{align_down, div_ceil, check_aligned, Splittable}; @@ -209,11 +209,12 @@ impl Splittable for Vec { #[cfg(test)] mod test { - use super::super::{FrameAllocator, FrameAllocatorTrait}; + use super::super::{FrameAllocator, FRAME_ALLOCATOR}; use super::{PhysicalMemRegion, PhysicalMemRegionIter}; use crate::utils::Splittable; use crate::mem::PhysicalAddress; use crate::paging::PAGE_SIZE; + use core::sync::atomic::Ordering; #[test] #[should_panic] @@ -226,8 +227,7 @@ mod test { fn on_fixed_mmio_rounds_unaligned() { let _f = crate::frame_allocator::init(); // reserve them so we don't panic - crate::frame_allocator::mark_frame_bootstrap_allocated(PhysicalAddress(0)); - crate::frame_allocator::mark_frame_bootstrap_allocated(PhysicalAddress(PAGE_SIZE)); + FRAME_ALLOCATOR.mark_area_reserved(0, 0x1FFF); let region = unsafe { PhysicalMemRegion::on_fixed_mmio(PhysicalAddress(0x00000007), PAGE_SIZE + 1) }; assert_eq!(region.start_addr, 0); diff --git a/kernel/src/heap_allocator.rs b/kernel/src/heap_allocator.rs index 5b29cbd0a..91db5f01a 100644 --- a/kernel/src/heap_allocator.rs +++ b/kernel/src/heap_allocator.rs @@ -8,7 +8,7 @@ use core::ops::Deref; use core::ptr::NonNull; use linked_list_allocator::{Heap, align_up}; use crate::paging::{PAGE_SIZE, MappingAccessRights, kernel_memory::get_kernel_memory}; -use crate::frame_allocator::{FrameAllocator, FrameAllocatorTrait}; +use crate::frame_allocator::FrameAllocator; use crate::mem::VirtualAddress; /// Simple wrapper around linked_list_allocator, growing heap by allocating pages diff --git a/kernel/src/main.rs b/kernel/src/main.rs index 90db50c41..6cf8e2603 100644 --- a/kernel/src/main.rs +++ b/kernel/src/main.rs @@ -6,7 +6,7 @@ //! Currently doesn't do much, besides booting and printing Hello World on the //! screen. But hey, that's a start. -#![feature(lang_items, start, asm, global_asm, compiler_builtins_lib, naked_functions, core_intrinsics, const_fn, abi_x86_interrupt, allocator_api, alloc, box_syntax, no_more_cas, const_vec_new, range_contains, step_trait, thread_local, nll)] +#![feature(lang_items, start, asm, global_asm, compiler_builtins_lib, naked_functions, core_intrinsics, const_fn, abi_x86_interrupt, allocator_api, alloc, box_syntax, no_more_cas, const_vec_new, range_contains, step_trait, thread_local, nll, untagged_unions, maybe_uninit, const_fn_union)] #![no_std] #![cfg_attr(target_os = "none", no_main)] #![recursion_limit = "1024"] diff --git a/kernel/src/mem.rs b/kernel/src/mem.rs index 75ef78475..bef938985 100644 --- a/kernel/src/mem.rs +++ b/kernel/src/mem.rs @@ -176,6 +176,12 @@ impl VirtualAddress { /// Rounds up PAGE_SIZE. pub fn ceil(self) -> VirtualAddress { VirtualAddress(round_to_page_upper(self.0)) } + + /// Wrapping (modular) addition. Computes self + rhs, wrapping around at the boundary of the type. + pub fn wrapping_add(self, rhs: usize) -> VirtualAddress { VirtualAddress(self.0.wrapping_add(rhs)) } + + /// Wrapping (modular) substraction. Computes self - rhs, wrapping around at the boundary of the type. + pub fn wrapping_sub(self, rhs: usize) -> VirtualAddress { VirtualAddress(self.0.wrapping_sub(rhs)) } } impl core::iter::Step for PhysicalAddress { diff --git a/kernel/src/paging/arch/i386/table.rs b/kernel/src/paging/arch/i386/table.rs index 8105e7603..f9f16d17d 100644 --- a/kernel/src/paging/arch/i386/table.rs +++ b/kernel/src/paging/arch/i386/table.rs @@ -10,7 +10,7 @@ use super::super::super::lands::{KernelLand, UserLand, VirtualSpaceLand}; use super::super::super::kernel_memory::get_kernel_memory; use super::super::super::MappingAccessRights; use crate::mem::{VirtualAddress, PhysicalAddress}; -use crate::frame_allocator::{PhysicalMemRegion, FrameAllocator, FrameAllocatorTrait}; +use crate::frame_allocator::{PhysicalMemRegion, FrameAllocator}; use core::fmt::{Debug, Formatter, Error}; /// When paging is on, accessing this address loops back to the directory itself thanks to diff --git a/kernel/src/paging/mapping.rs b/kernel/src/paging/mapping.rs index 9b276e50d..46b979bc4 100644 --- a/kernel/src/paging/mapping.rs +++ b/kernel/src/paging/mapping.rs @@ -218,7 +218,7 @@ mod test { use super::MappingType; use crate::mem::{VirtualAddress, PhysicalAddress}; use crate::paging::PAGE_SIZE; - use crate::frame_allocator::{PhysicalMemRegion, FrameAllocator, FrameAllocatorTrait}; + use crate::frame_allocator::{PhysicalMemRegion, FrameAllocator}; use std::sync::Arc; use std::vec::Vec; use crate::utils::Splittable; diff --git a/kernel/src/paging/process_memory.rs b/kernel/src/paging/process_memory.rs index b3a775d1c..a7421209b 100644 --- a/kernel/src/paging/process_memory.rs +++ b/kernel/src/paging/process_memory.rs @@ -26,7 +26,7 @@ use super::cross_process::CrossProcessMapping; use super::error::MmError; use super::MappingAccessRights; use crate::mem::{VirtualAddress, PhysicalAddress}; -use crate::frame_allocator::{FrameAllocator, FrameAllocatorTrait, PhysicalMemRegion}; +use crate::frame_allocator::{FrameAllocator, PhysicalMemRegion}; use crate::paging::arch::Entry; use crate::error::KernelError; use crate::utils::{check_aligned, check_nonzero_length}; diff --git a/kernel/src/syscalls.rs b/kernel/src/syscalls.rs index e595487c3..e1b6df6f5 100644 --- a/kernel/src/syscalls.rs +++ b/kernel/src/syscalls.rs @@ -3,7 +3,7 @@ use crate::mem::{VirtualAddress, PhysicalAddress}; use crate::mem::{UserSpacePtr, UserSpacePtrMut}; use crate::paging::{MappingAccessRights, mapping::MappingType}; -use crate::frame_allocator::{PhysicalMemRegion, FrameAllocator, FrameAllocatorTrait}; +use crate::frame_allocator::{PhysicalMemRegion, FrameAllocator}; use crate::process::{Handle, ThreadStruct, ProcessStruct}; use crate::event::{self, Waitable}; use crate::scheduler::{self, get_current_thread, get_current_process}; diff --git a/kernel/src/utils.rs b/kernel/src/utils.rs index 22c83f63f..fc19a9808 100644 --- a/kernel/src/utils.rs +++ b/kernel/src/utils.rs @@ -7,7 +7,9 @@ use crate::error::KernelError; use crate::scheduler; use crate::sync::SpinLockIRQ; use crate::process::ThreadState; -use core::sync::atomic::Ordering; +use core::sync::atomic::{AtomicUsize, Ordering}; +use core::ops::{RangeBounds, Bound}; +use bit_field::BitField; /// A trait for things that can be splitted in two parts pub trait Splittable where Self: Sized { @@ -54,3 +56,276 @@ pub fn check_thread_killed() { } } } + +/// Provides an abstraction over an Atomic bitmap. +pub trait AtomicBitmap { + /// Returns the number of bits this bitmap contains. + fn bit_len(&self) -> usize; + /// Returns an iterator over each bit in the bitmap. + /// + /// The bits may change while iterating! + fn bit_iter(&self) -> BitIterator; + /// Obtains the bit at the index `bit`; note that index 0 is the least + /// significant bit, while index `length() - 1` is the most significant bit. + /// + /// `load_bit` takes an [Ordering] argument which describes the memory + /// ordering of this operation. Possible values are [SeqCst](Ordering::SeqCst), + /// [Acquire](Ordering::Acquire) and [Relaxed](Ordering::Relaxed). + /// + /// # Panics + /// + /// Panics if order is [Release](Ordering::Release) or + /// [AcqRel](Ordering::AcqRel). + fn load_bit(&self, index: usize, order: Ordering) -> bool; + /// Sets the bit at the index `bit` to the value `val` (where true means a + /// value of '1' and false means a value of '0'); note that index 0 is the + /// least significant bit, while index `length() - 1` is the most significant + /// bit. + /// + /// `store_bit` takes an [Ordering] argument which describes the memory + /// ordering of this operation. Possible values are [SeqCst](Ordering::SeqCst), + /// [Release](Ordering::Release) and [Relaxed](Ordering::Relaxed). + /// + /// # Panics + /// + /// Panics if order is [Acquire](Ordering::Acquire) or + /// [AcqRel](Ordering::AcqRel). + fn store_bit(&self, index: usize, val: bool, order: Ordering); + /// Stores a bit into the atomic bitmap if the current value of that bit is + /// the same as the `current` value. The other bits are unchanged. Note that + /// index 0 is the least significant bit, while index `length() - 1` is the + /// most significant bit. + /// + /// The return value is always the previous value. If it is equal to + /// `current`, then the value was updated. + /// + /// `compare_and_swap` also takes an [Ordering] argument which describes the + /// memory ordering of this operation. Notice that even when using [AcqRel], + /// the operation might fail and hence just perform an [Acquire] load, but + /// not have [Release] semantics. Using [Acquire] makes the store part of + /// this operation [Relaxed] if it happens, and using [Release] makes the + /// load part [Relaxed]. + /// + /// [Acquire]: Ordering::Acquire + /// [Relaxed]: Ordering::Relaxed + /// [Release]: Ordering::Release + /// [AcqRel]: Ordering::AcqRel + fn compare_and_swap(&self, index: usize, current: bool, new: bool, order: Ordering) -> Result; + /// Finds `count` consecutive bits in the atomic bitmap that are of value + /// `!val`, and atomically sets them to `val` (where true means a value of + /// '1' and false means a value of '0'). + /// + /// The return value is the index of the least significant bit that changed, + /// or [None] if the bitmap didn't contain enough bits of the right value. + fn set_n_bits(&self, count: usize, val: bool) -> Option; + /// Sets the bits in `range` in the atomic bitmap to value `val` (where true + /// means a value of '1' and false means a value of '0'); note that index 0 + /// is the least significant bit, while index `length() - 1` is the most + /// significant bit. + /// + /// # Atomicity + /// + /// Those bits are individually set atomically, but they might not all appear + /// to be set all at once. + fn store_bits_nonatomic>(&self, range: T, val: bool); +} + +/// A cell in a bitmap array. +pub trait BitmapCell { + /// The amount of bits this cell contains. + fn bit_capacity() -> usize; +} + +impl BitmapCell for AtomicUsize { + fn bit_capacity() -> usize { + core::mem::size_of::() * 8 + } +} + +/// An iterator over bits in a Bitmap, returned by [AtomicBitmap::bit_iter]. +#[derive(Debug)] +pub struct BitIterator<'a, T: ?Sized + AtomicBitmap>(&'a T, usize); + +impl<'a, T: ?Sized + AtomicBitmap> Iterator for BitIterator<'a, T> { + type Item = bool; + fn next(&mut self) -> Option { + if self.1 < self.0.bit_len() { + let val = self.0.load_bit(self.1, Ordering::SeqCst); + self.1 += 1; + Some(val) + } else { + None + } + } +} + + +impl AtomicBitmap for AtomicUsize { + fn bit_len(&self) -> usize { + Self::bit_capacity() + } + fn load_bit(&self, index: usize, order: Ordering) -> bool { + assert!(index < 8 * core::mem::size_of::()); + self.load(order).get_bit(index) + } + fn store_bit(&self, index: usize, val: bool, order: Ordering) { + assert!(index < 8 * core::mem::size_of::()); + // We first calculate a mask to use with `fetch_or`/`fetch_and`. + let mut mask = 0; + mask.set_bit(index, val); + if val { + self.fetch_or(mask, order); + } else { + self.fetch_and(!mask, order); + } + } + fn store_bits_nonatomic>(&self, range: T, val: bool) { + let start = match range.start_bound() { + Bound::Unbounded => 0, + Bound::Included(b) => *b, + Bound::Excluded(_b) => unreachable!("Excluded in start"), + }; + let end = match range.end_bound() { + Bound::Unbounded => 0, + Bound::Included(b) => *b + 1, + Bound::Excluded(b) => *b, + }; + assert!(start < 8 * core::mem::size_of::()); + assert!(end <= 8 * core::mem::size_of::()); + let mut mask = 0; + mask.set_bits_area(start..end, true); + if val { + self.fetch_or(mask, Ordering::SeqCst); + } else { + self.fetch_and(!mask, Ordering::SeqCst); + } + } + fn compare_and_swap(&self, index: usize, current: bool, new: bool, order: Ordering) -> Result { + assert!(index < 8 * core::mem::size_of::()); + // This cell stores multiple bits, but we can only compare/swap on the whole cell at once, + // so it's possible for compare/swap to fail because a different bit in the cell has been + // modified by another thread. In such a case, continue trying to compare/swap until either + // we are successful or another thread modifies the specified bit before we do. + let mut cur_cell_val = self.load(Ordering::Acquire); + loop { + // Load the current cell value, and stop early if the bit we're trying to set has + // already been changed on another thread + let cur_val = cur_cell_val.get_bit(index); + if cur_val != current { + return Err(cur_val); + } + + // Decide what the new cell value should be after setting/unsetting the specified bit + let mut new_cell_val = cur_cell_val; + new_cell_val.set_bit(index, new); + + // Try to swap in the new cell value. If successful, we can signal success. Otherwise, + // check whether the failure was because the targeted bit was flipped by another thread. + // If so, then stop early and indicate failure. Otherwise, try again. + match self.compare_exchange(cur_cell_val, new_cell_val, order, Ordering::Acquire) { + Ok(_current) => return Ok(new), + Err(oldval) => cur_cell_val = oldval, + } + } + } + fn set_n_bits(&self, count: usize, val: bool) -> Option { + assert!(count < 8 * core::mem::size_of::()); + let mut set_idx = None; + + // Use fetch_update to avoid writing our own CAS loop. + let res = self.fetch_update(|old| { + set_idx = None; + let mut curcount = 0; + for offset in 0..Self::bit_capacity() { + if old.get_bit(offset) != val { + let firstoff = *set_idx.get_or_insert(offset); + curcount += 1; + if curcount == count { + let mut new = old; + new.set_bits_area(firstoff..=offset, val); + return Some(new) + } + } else { + curcount = 0; + set_idx = None; + } + } + None + }, Ordering::SeqCst, Ordering::SeqCst); + + res + .ok() + .map(|_| set_idx.expect("fetch_update cannot succeed without setting set_idx")) + } + + fn bit_iter(&self) -> BitIterator { + BitIterator(self, 0) + } +} + +impl<'a, T: AtomicBitmap + BitmapCell> AtomicBitmap for [T] { + fn bit_len(&self) -> usize { + T::bit_capacity() * self.len() + } + fn load_bit(&self, index: usize, order: Ordering) -> bool { + self[index / T::bit_capacity()].load_bit(index % T::bit_capacity(), order) + } + + fn store_bit(&self, index: usize, val: bool, order: Ordering) { + self[index / T::bit_capacity()].store_bit(index % T::bit_capacity(), val, order) + } + + fn compare_and_swap(&self, index: usize, current: bool, new: bool, order: Ordering) -> Result { + self[index / T::bit_capacity()].compare_and_swap(index % T::bit_capacity(), current, new, order) + } + + fn store_bits_nonatomic>(&self, range: U, val: bool) { + let start_bit = match range.start_bound() { + Bound::Unbounded => 0, + Bound::Included(b) => *b, + Bound::Excluded(_) => unreachable!("Got excluded bound in start"), + }; + + let start_cell = start_bit / T::bit_capacity(); + + let end_bit_included = match range.end_bound() { + Bound::Unbounded => self.bit_len() - 1, + Bound::Included(b) => *b, + // If 0 is excluded, then the range is empty. + Bound::Excluded(0) => return, + Bound::Excluded(b) => *b - 1, + }; + + let end_cell_included = end_bit_included / T::bit_capacity(); + + for (idx, item) in self.iter().enumerate() + .skip(start_cell) + .take_while(|(idx, _)| *idx <= end_cell_included) + { + let range_start = if start_cell == idx { + start_bit % T::bit_capacity() + } else { + 0 + }; + let range_end = if end_cell_included == idx { + (end_bit_included % T::bit_capacity()) + 1 + } else { + T::bit_capacity() + }; + item.store_bits_nonatomic(range_start..range_end, val); + } + } + + fn set_n_bits(&self, count: usize, val: bool) -> Option { + for (idx, i) in self.iter().enumerate() { + if let Some(i_idx) = i.set_n_bits(count, val) { + return Some(idx * T::bit_capacity() + i_idx) + } + } + None + } + + fn bit_iter(&self) -> BitIterator { + BitIterator(self, 0) + } +} From 1e1c48167e945140685e4a9936d3c9e413b9f180 Mon Sep 17 00:00:00 2001 From: roblabla Date: Wed, 20 Feb 2019 22:58:06 +0000 Subject: [PATCH 15/15] Temporary commit --- Cargo.lock | 24 ++++++++++++++++++ isofiles/boot/grub/grub.cfg | 2 +- kernel/Cargo.toml | 9 +++++++ kernel/src/arch/i386/interrupts/irq.rs | 6 ----- kernel/src/arch/i386/interrupts/mod.rs | 9 +++++++ kernel/src/arch/i386/mod.rs | 19 ++++++++++++++ kernel/src/arch/i386/stack.rs | 2 +- kernel/src/arch/mod.rs | 13 +++++++++- kernel/src/arch/stub/mod.rs | 8 ++++++ kernel/src/devices/pic.rs | 34 +++++++++++++++++--------- kernel/src/devices/pit.rs | 32 +++++++++++------------- kernel/src/devices/rs232.rs | 10 +++++++- kernel/src/heap_allocator.rs | 5 ++++ kernel/src/log_impl/mod.rs | 2 +- kernel/src/main.rs | 4 ++- kernel/src/scheduler.rs | 5 ++-- 16 files changed, 139 insertions(+), 45 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 153248a1c..59c26dc4b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -75,6 +75,14 @@ name = "gcc" version = "0.3.55" source = "registry+https://github.com/rust-lang/crates.io-index" +[[package]] +name = "generic-array" +version = "0.12.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "typenum 1.10.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "gif" version = "0.10.0" @@ -93,6 +101,11 @@ dependencies = [ "scopeguard 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "intrusive-collections" +version = "0.7.8" +source = "registry+https://github.com/rust-lang/crates.io-index" + [[package]] name = "kfs-ahci" version = "0.1.0" @@ -131,13 +144,16 @@ dependencies = [ name = "kfs-kernel" version = "0.1.0" dependencies = [ + "arrayvec 0.4.10 (registry+https://github.com/rust-lang/crates.io-index)", "bit_field 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", "bitfield 0.13.1 (registry+https://github.com/rust-lang/crates.io-index)", "bitflags 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)", "byteorder 1.3.1 (registry+https://github.com/rust-lang/crates.io-index)", "cfg-if 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)", "failure 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)", + "generic-array 0.12.0 (registry+https://github.com/rust-lang/crates.io-index)", "hashbrown 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)", + "intrusive-collections 0.7.8 (registry+https://github.com/rust-lang/crates.io-index)", "kfs-libkern 0.1.0", "kfs-libutils 0.1.0", "lazy_static 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)", @@ -378,6 +394,11 @@ dependencies = [ "unicode-xid 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "typenum" +version = "1.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" + [[package]] name = "unicode-xid" version = "0.1.0" @@ -422,8 +443,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index" "checksum failure_derive 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)" = "ea1063915fd7ef4309e222a5a07cf9c319fb9c7836b1f89b85458672dbb127e1" "checksum font-rs 0.1.3 (git+https://github.com/orycterope/font-rs)" = "" "checksum gcc 0.3.55 (registry+https://github.com/rust-lang/crates.io-index)" = "8f5f3913fa0bfe7ee1fd8248b6b9f42a5af4b9d65ec2dd2c3c26132b950ecfc2" +"checksum generic-array 0.12.0 (registry+https://github.com/rust-lang/crates.io-index)" = "3c0f28c2f5bfb5960175af447a2da7c18900693738343dc896ffbcabd9839592" "checksum gif 0.10.0 (git+https://github.com/roblabla/image-gif)" = "" "checksum hashbrown 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)" = "3bae29b6653b3412c2e71e9d486db9f9df5d701941d86683005efb9f2d28e3da" +"checksum intrusive-collections 0.7.8 (registry+https://github.com/rust-lang/crates.io-index)" = "f0207c3d23d0b13d569d4103a98f31c4cd65f30c92c3a157272966b2affd177e" "checksum lazy_static 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "a374c89b9db55895453a74c1e38861d9deec0b01b405a82516e9d5de4820dea1" "checksum linked_list_allocator 0.6.3 (registry+https://github.com/rust-lang/crates.io-index)" = "655d57c71827fe0891ce72231b6aa5e14033dae3f604609e6a6f807267c1678d" "checksum log 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)" = "c84ec4b527950aa83a329754b01dbe3f58361d1c5efacd1f6d68c494d08a17c6" @@ -446,6 +469,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" "checksum static_assertions 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "389ce475f424f267dbed6479cbd8f126c5e1afb053b0acdaa019c74305fc65d1" "checksum syn 0.15.26 (registry+https://github.com/rust-lang/crates.io-index)" = "f92e629aa1d9c827b2bb8297046c1ccffc57c99b947a680d3ccff1f136a3bee9" "checksum synstructure 0.10.1 (registry+https://github.com/rust-lang/crates.io-index)" = "73687139bf99285483c96ac0add482c3776528beac1d97d444f6e91f203a2015" +"checksum typenum 1.10.0 (registry+https://github.com/rust-lang/crates.io-index)" = "612d636f949607bdf9b123b4a6f6d966dedf3ff669f7f045890d3a4a73948169" "checksum unicode-xid 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "fc72304796d0818e357ead4e000d19c9c174ab23dc11093ac919054d20a6a7fc" "checksum unreachable 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "382810877fe448991dfc7f0dd6e3ae5d58088fd0ea5e35189655f84e6814fa56" "checksum void 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)" = "6a02e4885ed3bc0f2de90ea6dd45ebcbb66dacffe03547fadbb0eeae2770887d" diff --git a/isofiles/boot/grub/grub.cfg b/isofiles/boot/grub/grub.cfg index a56fc67fa..2e793c7b1 100644 --- a/isofiles/boot/grub/grub.cfg +++ b/isofiles/boot/grub/grub.cfg @@ -1,4 +1,4 @@ -set timeout=10 +set timeout=0 set timeout_style=countdown set default=0 diff --git a/kernel/Cargo.toml b/kernel/Cargo.toml index 436deb4b0..165908046 100644 --- a/kernel/Cargo.toml +++ b/kernel/Cargo.toml @@ -33,6 +33,15 @@ failure = { version = "0.1", default-features = false, features = ["derive"] } bitfield = "0.13" mashup = "0.1.9" cfg-if = "0.1" +generic-array = "0.12" + +[dependencies.intrusive-collections] +default-features = false +version = "0.7" + +[dependencies.arrayvec] +default-features = false +version = "0.4.10" [dependencies.smallvec] default-features = false diff --git a/kernel/src/arch/i386/interrupts/irq.rs b/kernel/src/arch/i386/interrupts/irq.rs index 866476299..7b9720a6d 100644 --- a/kernel/src/arch/i386/interrupts/irq.rs +++ b/kernel/src/arch/i386/interrupts/irq.rs @@ -10,12 +10,6 @@ use crate::arch::i386::structures::idt::ExceptionStackFrame; use crate::devices::pic; -#[allow(clippy::missing_docs_in_private_items)] -extern "x86-interrupt" fn timer_handler(_stack_frame: &mut ExceptionStackFrame) { - // TODO: Feed the timer handler into a kernel preemption handler. - pic::get().acknowledge(0); -} - macro_rules! irq_handler { ($irq:expr, $name:ident) => {{ #[allow(clippy::missing_docs_in_private_items)] diff --git a/kernel/src/arch/i386/interrupts/mod.rs b/kernel/src/arch/i386/interrupts/mod.rs index 0fbb06fae..1242b1ed3 100644 --- a/kernel/src/arch/i386/interrupts/mod.rs +++ b/kernel/src/arch/i386/interrupts/mod.rs @@ -412,10 +412,13 @@ pub unsafe fn init() { pic::init(); { + info!("Getting page"); let page = get_kernel_memory().get_page(); let idt = page.addr() as *mut u8 as *mut Idt; unsafe { + info!("Initializing page {:?}", page); (*idt).init(); + info!("Setting exceptions {:p}", idt); (*idt).divide_by_zero.set_handler_fn(divide_by_zero_handler); (*idt).debug.set_handler_fn(debug_handler); (*idt).non_maskable_interrupt.set_handler_fn(non_maskable_interrupt_handler); @@ -424,7 +427,9 @@ pub unsafe fn init() { (*idt).bound_range_exceeded.set_handler_fn(bound_range_exceeded_handler); (*idt).invalid_opcode.set_handler_fn(invalid_opcode_handler); (*idt).device_not_available.set_handler_fn(device_not_available_handler); + info!("Setting double fault handler {:p}", idt); (*idt).double_fault.set_handler_task_gate_addr(double_fault_handler as u32); + info!("Setting rest of exceptions {:p}", idt); // coprocessor_segment_overrun (*idt).invalid_tss.set_handler_fn(invalid_tss_handler); (*idt).segment_not_present.set_handler_fn(segment_not_present_handler); @@ -438,17 +443,21 @@ pub unsafe fn init() { (*idt).virtualization.set_handler_fn(virtualization_handler); (*idt).security_exception.set_handler_fn(security_exception_handler); + info!("Setting IRQ handlers"); for (i, handler) in irq::IRQ_HANDLERS.iter().enumerate() { (*idt).interrupts[i].set_handler_fn(*handler); } // Add entry for syscalls + info!("Setting syscall handler"); let syscall_int = (*idt)[0x80].set_interrupt_gate_addr(syscall_handler as u32); syscall_int.set_privilege_level(PrivilegeLevel::Ring3); syscall_int.disable_interrupts(false); } + info!("Setting IDT global"); let mut lock = IDT.lock(); *lock = Some(page); + info!("Loading IDT"); (*idt).load(); } diff --git a/kernel/src/arch/i386/mod.rs b/kernel/src/arch/i386/mod.rs index 584394453..dce317cf6 100644 --- a/kernel/src/arch/i386/mod.rs +++ b/kernel/src/arch/i386/mod.rs @@ -586,5 +586,24 @@ pub fn get_modules() -> impl Iterator { .flatten() } +/// See [arch::stub::wait_for_interrupt] +pub fn wait_for_interrupt() { + unsafe { + instructions::interrupts::hlt() + } +} + +pub fn get_available_memory_regions() -> impl Iterator { + let boot_info = multiboot::get_boot_information(); + let memory_map_tag = boot_info.memory_map_tag() + .expect("GRUB, you're drunk. Give us our memory_map_tag."); + + memory_map_tag.memory_areas() + .map(|v| super::MemRegion { + addr: v.start_address(), + page_count: v.end_address() - v.start_address() / 4096 + }) +} + pub use self::process_switch::{ThreadHardwareContext, process_switch, prepare_for_first_schedule}; pub use self::stack::{KernelStack, StackDumpSource, dump_stack}; diff --git a/kernel/src/arch/i386/stack.rs b/kernel/src/arch/i386/stack.rs index a0b9232b3..b109d2213 100644 --- a/kernel/src/arch/i386/stack.rs +++ b/kernel/src/arch/i386/stack.rs @@ -122,7 +122,7 @@ impl KernelStack { /// Puts two poisons pointers at the base of the stack for the `saved ebp` and `saved eip`. unsafe fn create_poison_pointers(&mut self) { let saved_eip: *mut usize = (self.stack_address.addr() + STACK_SIZE_WITH_GUARD * PAGE_SIZE - - size_of::() + - size_of::() ) as *mut usize; let saved_ebp: *mut usize = saved_eip.offset(-1); *saved_eip = 0x00000000; diff --git a/kernel/src/arch/mod.rs b/kernel/src/arch/mod.rs index 934654e1a..1623a75c1 100644 --- a/kernel/src/arch/mod.rs +++ b/kernel/src/arch/mod.rs @@ -28,5 +28,16 @@ pub use self::arch::{ KernelStack, StackDumpSource, dump_stack, - get_modules + get_modules, + + wait_for_interrupt, + + get_available_memory_regions, }; + + +#[derive(Debug)] +struct MemRegion { + addr: u64, + page_count: u64 +} diff --git a/kernel/src/arch/stub/mod.rs b/kernel/src/arch/stub/mod.rs index 9b9a8d27a..8dd89c38d 100644 --- a/kernel/src/arch/stub/mod.rs +++ b/kernel/src/arch/stub/mod.rs @@ -166,3 +166,11 @@ pub struct StackDumpSource; /// is stopped and will remain unscheduled at least until this function returns. pub unsafe fn dump_stack<'a>(_source: &StackDumpSource, _elf_symbols: Option<(&ElfFile<'a>, &'a [Entry32])>) { } + +/// Puts the CPU in a paused state until an interrupt wakes it up. +pub fn wait_for_interrupt() { +} + +pub fn get_available_memory_regions() -> impl Iterator { + core::iter::empty::() +} diff --git a/kernel/src/devices/pic.rs b/kernel/src/devices/pic.rs index 4dbc2d0de..2509c8193 100644 --- a/kernel/src/devices/pic.rs +++ b/kernel/src/devices/pic.rs @@ -3,6 +3,7 @@ //! Only handles the usual case of two PICs in a cascading setup, where the //! SLAVE is setup to cascade to the line 2 of the MASTER. +#[cfg(target_arch = "x86")] use crate::arch::i386::pio::Pio; use crate::io::Io; use crate::sync::{Once, SpinLockIRQ}; @@ -36,16 +37,19 @@ const ICW4_8086: u8 = 0x01; /* 8086/88 (MCS-80/85) mode */ //const icw4_sfnm = 0x10; /* Special fully nested (not) */ /// The PIC manager. -static PIC: Once = Once::new(); +#[cfg(target_arch = "x86")] +static PIC: Once>> = Once::new(); /// Acquires a reference to the PIC, initializing it if it wasn't already setup. -pub fn get() -> &'static Pic { +#[cfg(target_arch = "x86")] +pub fn get() -> &'static Pic> { PIC.call_once(|| unsafe { Pic::new() }) } /// Initializes the PIC if it has not yet been initialized. Otherwise, does nothing. +#[cfg(target_arch = "x86")] pub fn init() { PIC.call_once(|| unsafe { Pic::new() @@ -54,23 +58,24 @@ pub fn init() { /// A single PIC8259 device. #[derive(Debug)] -struct InternalPic { +struct InternalPic { /// The PIC's COMMAND IO port. - port_cmd: Pio, + port_cmd: T, /// The PIC's DATA IO port. - port_data: Pio + port_data: T } /// A master/slave PIC setup, as commonly found on IBM PCs. #[derive(Debug)] -pub struct Pic { +pub struct Pic { /// The master PIC. - master: SpinLockIRQ, + master: SpinLockIRQ>, /// The slave PIC, cascaded on line 2 of `.master` - slave: SpinLockIRQ, + slave: SpinLockIRQ>, } -impl Pic { +#[cfg(target_arch = "x86")] +impl Pic> { /// Creates a new PIC, and initializes it. /// /// Interrupts will be mapped to IRQ [32..48] @@ -79,13 +84,15 @@ impl Pic { /// /// This should only be called once! If called more than once, then both Pics instances /// will share the same underlying Pios, but different mutexes protecting them! - unsafe fn new() -> Pic { + unsafe fn new() -> Pic> { Pic { master: SpinLockIRQ::new(InternalPic::new(0x20, true, 32)), slave: SpinLockIRQ::new(InternalPic::new(0xA0, false, 32 + 8)), } } +} +impl> Pic { /// Mask the given IRQ number. Will redirect the call to the right Pic device. pub fn mask(&self, irq: u8) { if irq < 8 { @@ -106,7 +113,8 @@ impl Pic { } } -impl InternalPic { +#[cfg(target_arch = "x86")] +impl InternalPic> { /// Setup the 8259 pic. Redirect the IRQ to the chosen interrupt vector. /// /// # Safety @@ -114,7 +122,7 @@ impl InternalPic { /// The port should map to a proper PIC device. Sending invalid data to a /// random device can lead to memory unsafety. Furthermore, care should be /// taken not to share the underlying Pio. - unsafe fn new(port_base: u16, is_master: bool, vector_offset: u8) -> InternalPic { + unsafe fn new(port_base: u16, is_master: bool, vector_offset: u8) -> InternalPic> { let mut pic = InternalPic { port_cmd: Pio::new(port_base), port_data: Pio::new(port_base + 1) @@ -140,7 +148,9 @@ impl InternalPic { pic } +} +impl> InternalPic { /// Acknowledges an IRQ, allowing the PIC to send a new IRQ on the next /// cycle. pub fn acknowledge(&mut self) { diff --git a/kernel/src/devices/pit.rs b/kernel/src/devices/pit.rs index e4bbca3c1..ec6854f0a 100644 --- a/kernel/src/devices/pit.rs +++ b/kernel/src/devices/pit.rs @@ -54,6 +54,7 @@ use crate::sync::SpinLock; use crate::io::Io; +#[cfg(target_arch = "x86")] use crate::arch::i386::pio::Pio; use crate::event::{self, IRQEvent, Waitable}; use crate::utils::div_ceil; @@ -70,9 +71,10 @@ pub const CHAN_0_FREQUENCY: usize = 100; /// The channel 0 reset value const CHAN_0_DIVISOR: u16 = (OSCILLATOR_FREQ / CHAN_0_FREQUENCY) as u16; +#[cfg(target_arch = "x86")] lazy_static! { /// The mutex wrapping the ports - static ref PIT_PORTS: SpinLock = SpinLock::new(PITPorts { + static ref PIT_PORTS: SpinLock>> = SpinLock::new(PITPorts { /// Port 0x40, PIT's Channel 0. port_chan_0: Pio::new(0x40), /// Port 0x42, PIT's Channel 2. @@ -111,14 +113,14 @@ bitflags! { /// We put the PIT ports in a structure to have them under a single mutex #[allow(clippy::missing_docs_in_private_items)] -struct PITPorts { - port_chan_0: Pio, - port_chan_2: Pio, - port_cmd: Pio, - port_61: Pio +struct PITPorts { + port_chan_0: T, + port_chan_2: T, + port_cmd: T, + port_61: T } -impl PITPorts { +impl> PITPorts { /// Writes a reload value in lobyte/hibyte access mode fn write_reload_value(&mut self, channel_selector: ChannelSelector, value: u16) { let port = match channel_selector { @@ -133,15 +135,15 @@ impl PITPorts { } /// Channel 2 -struct PITChannel2<'ports> { +struct PITChannel2<'ports, T> { /// A reference to the PITPorts structure. - ports: &'ports mut PITPorts + ports: &'ports mut PITPorts } -impl<'ports> PITChannel2<'ports> { +impl<'ports, T: Io> PITChannel2<'ports, T> { /// Sets mode #0 for Channel 2. - fn init(ports: &mut PITPorts) -> PITChannel2<'_> { + fn init(ports: &mut PITPorts) -> PITChannel2<'_, T> { ports.port_cmd.write( 0b10110000 // channel 2, lobyte/hibyte, interrupt on terminal count ); @@ -194,13 +196,6 @@ impl<'ports> PITChannel2<'ports> { } } -/// Spin waits for at least `ms` amount of milliseconds -pub fn spin_wait_ms(ms: usize) { - let mut ports = PIT_PORTS.lock(); - let mut chan2 = PITChannel2::init(&mut ports); - chan2.spin_wait_ms(ms); -} - /// A stream of event that trigger every `ms` amount of milliseconds, by counting Channel 0 interruptions. #[derive(Debug)] struct WaitFor { @@ -242,6 +237,7 @@ pub fn wait_ms(ms: usize) -> impl Waitable { } /// Initialize the channel 0 to send recurring irqs. +#[cfg(target_arch = "x86")] pub unsafe fn init_channel_0() { let mut ports = PIT_PORTS.lock(); ports.port_cmd.write( diff --git a/kernel/src/devices/rs232.rs b/kernel/src/devices/rs232.rs index 9e03e5091..d2bcc30b3 100644 --- a/kernel/src/devices/rs232.rs +++ b/kernel/src/devices/rs232.rs @@ -3,6 +3,7 @@ use core::fmt::{Display, Write, Error, Formatter}; use crate::sync::{Once, SpinLock}; use crate::io::Io; +#[cfg(target_arch = "x86")] use crate::arch::i386::pio::Pio; /// The base IO port of a COM @@ -98,6 +99,7 @@ impl Display for SerialAttributes { /// Initialized on first use. /// /// Log functions will access the [SerialInternal] it wraps, and send text to it. +#[cfg(target_arch = "x86")] static G_SERIAL: Once>>> = Once::new(); /// A COM output. Wraps the IO ports of this COM, and provides function for writing to it. @@ -108,9 +110,10 @@ struct SerialInternal { status_port: T } +#[cfg(target_arch = "x86")] impl SerialInternal> { /// Creates a COM port from it's base IO address. - #[cfg(all(target_arch="x86", not(test)))] + #[cfg(not(test))] #[allow(unused)] pub fn new(com_port: ComPort) -> SerialInternal> { let mut data_port = Pio::::new(com_port.0 + 0); @@ -137,6 +140,9 @@ impl SerialInternal> { #[cfg(test)] pub fn new(_com_port: ComPort) -> SerialInternal> { panic!("mock implementation !") } +} + +impl> SerialInternal { /// Outputs a string to this COM. fn send_string(&mut self, string: &str) { for byte in string.bytes() { @@ -161,6 +167,7 @@ impl SerialInternal> { #[derive(Debug)] pub struct SerialLogger; +#[cfg(target_arch = "x86")] impl SerialLogger { /// Re-take the lock protecting multiple access to the device. /// @@ -172,6 +179,7 @@ impl SerialLogger { } } +#[cfg(target_arch = "x86")] impl Write for SerialLogger { /// Writes a string to COM1. fn write_str(&mut self, s: &str) -> Result<(), ::core::fmt::Error> { diff --git a/kernel/src/heap_allocator.rs b/kernel/src/heap_allocator.rs index 91db5f01a..30fc23266 100644 --- a/kernel/src/heap_allocator.rs +++ b/kernel/src/heap_allocator.rs @@ -47,15 +47,20 @@ impl Allocator { /// Create a new Heap of `RESERVED_HEAP_SIZE` bytes. fn init() -> SpinLock { + info!("Getting kernel memory"); let mut active_pages = get_kernel_memory(); // Reserve 512MB of virtual memory for heap space. Don't actually allocate it. + info!("Reserving heap"); let heap_space = active_pages.find_virtual_space(RESERVED_HEAP_SIZE) .expect("Kernel should have 512MB of virtual memory"); // map only the first page + info!("Allocating a frame"); let frame = FrameAllocator::allocate_frame() .expect("Cannot allocate first frame of heap"); + info!("Mapping allocated frame {:?}", frame); active_pages.map_phys_region_to(frame, heap_space, MappingAccessRights::k_rw()); // guard the rest + info!("Guarding the rest"); active_pages.guard(heap_space + PAGE_SIZE, RESERVED_HEAP_SIZE - PAGE_SIZE); info!("Reserving {} pages at {:#010x}", RESERVED_HEAP_SIZE / PAGE_SIZE - 1, heap_space.addr() + PAGE_SIZE); unsafe { diff --git a/kernel/src/log_impl/mod.rs b/kernel/src/log_impl/mod.rs index 66abc7782..5fd0c5a4e 100644 --- a/kernel/src/log_impl/mod.rs +++ b/kernel/src/log_impl/mod.rs @@ -37,7 +37,7 @@ static LOGGER: Once = Once::new(); /// Initializes the Logger in a heapless environment. pub fn early_init() { let filter = filter::Builder::new() - .filter(None, LevelFilter::Info) + .filter(None, LevelFilter::Trace) .build(); log::set_logger(LOGGER.call_once(|| Logger { filter: RwLock::new(filter) } )) .expect("log_impl::init to be called before logger is initialized"); diff --git a/kernel/src/main.rs b/kernel/src/main.rs index 6cf8e2603..293d95d18 100644 --- a/kernel/src/main.rs +++ b/kernel/src/main.rs @@ -241,7 +241,9 @@ unsafe fn do_panic(msg: core::fmt::Arguments<'_>, stackdump_source: Option