From 3263ba31b2050fab933000a6c611ca6d0f605d91 Mon Sep 17 00:00:00 2001 From: "Cliff L. Biffle" Date: Mon, 18 Nov 2024 08:25:20 -0700 Subject: [PATCH] Allow kernel to be built with stable toolchain. The only nightly feature still used in the kernel is `#[naked]` on functions. This can be replaced with careful use of `global_asm!` to create functions in pure assembly language. With this change, the kernel builds and works on stable 1.82.0 using my out-of-tree build system. --- build/kernel-link.x | 6 +- sys/kern/src/arch/arm_m.rs | 730 ++++++++++++++++++------------------- sys/kern/src/lib.rs | 1 - 3 files changed, 354 insertions(+), 383 deletions(-) diff --git a/build/kernel-link.x b/build/kernel-link.x index 2d9d450a8..6585a54d5 100644 --- a/build/kernel-link.x +++ b/build/kernel-link.x @@ -91,12 +91,14 @@ SECTIONS *(.PreResetTrampoline); *(.Reset); - *(.text .text.*); - /* The HardFaultTrampoline uses the `b` instruction to enter `HardFault`, so must be placed close to it. */ *(.HardFaultTrampoline); *(.HardFault.*); + *(.text.HardFault.*); + + *(.text .text.*); + . = ALIGN(4); __etext = .; } > FLASH diff --git a/sys/kern/src/arch/arm_m.rs b/sys/kern/src/arch/arm_m.rs index 562a45fc9..37b9dbca5 100644 --- a/sys/kern/src/arch/arm_m.rs +++ b/sys/kern/src/arch/arm_m.rs @@ -70,7 +70,7 @@ //! context switches, and just always do full save/restore, eliminating PendSV. //! We'll see. -use core::arch; +use core::arch::{self, global_asm}; use core::sync::atomic::{AtomicBool, AtomicPtr, AtomicU32, Ordering}; use zerocopy::FromBytes; @@ -723,12 +723,9 @@ pub fn start_first_task(tick_divisor: u32, task: &mut task::Task) -> ! { } } -/// Handler that gets linked into the vector table for the Supervisor Call (SVC) -/// instruction. (Name is dictated by the `cortex_m` crate.) -#[allow(non_snake_case)] -#[naked] -#[no_mangle] -pub unsafe extern "C" fn SVCall() { +// Handler that gets linked into the vector table for the Supervisor Call (SVC) +// instruction. (Name is dictated by the `cortex_m` crate.) +cfg_if::cfg_if! { // TODO: could shave several cycles off SVC entry with more careful ordering // of instructions below, though the precise details depend on how complex // of an M-series processor you're targeting -- so I've punted on this for @@ -748,144 +745,146 @@ pub unsafe extern "C" fn SVCall() { // // After that, we repeat the same steps in the opposite order to restore // task context (possibly for a different task!). - unsafe { - cfg_if::cfg_if! { - if #[cfg(armv6m)] { - arch::asm!(" - @ Inspect LR to figure out the caller's mode. - mov r0, lr - ldr r1, =0xFFFFFFF3 - bics r0, r0, r1 - @ Is the call coming from thread mode + main stack, i.e. - @ from the kernel startup routine? - cmp r0, #0x8 - @ If so, this is startup; jump ahead. The common case falls - @ through because branch-not-taken tends to be faster on small - @ cores. - beq 1f - - @ store volatile state. - @ first, get a pointer to the current task. - ldr r0, =CURRENT_TASK_PTR - ldr r1, [r0] - @ now, store volatile registers, plus the PSP, plus LR. - movs r2, r1 - stm r2!, {{r4-r7}} - mov r4, r8 - mov r5, r9 - mov r6, r10 - mov r7, r11 - stm r2!, {{r4-r7}} - mrs r4, PSP - mov r5, lr - stm r2!, {{r4, r5}} - - @ syscall number is passed in r11. Move it into r0 to pass - @ it as an argument to the handler, then call the handler. - mov r0, r11 - bl syscall_entry - - @ we're returning back to *some* task, maybe not the same one. - ldr r0, =CURRENT_TASK_PTR - ldr r0, [r0] - @ restore volatile registers, plus PSP. We will do this in - @ slightly reversed order for efficiency. First, do the high - @ ones. - movs r1, r0 - adds r1, r1, #(4 * 4) - ldm r1!, {{r4-r7}} - mov r11, r7 - mov r10, r6 - mov r9, r5 - mov r8, r4 - ldm r1!, {{r4, r5}} - msr PSP, r4 - mov lr, r5 - - @ Now that we no longer need r4-r7 as temporary registers, - @ restore them too. - ldm r0!, {{r4-r7}} - - @ resume - bx lr - - 1: @ starting up the first task. - @ Drop privilege in Thread mode. - movs r0, #1 - msr CONTROL, r0 - @ note: no barrier here because exc return serves as barrier - - @ Manufacture a new EXC_RETURN to change the processor mode - @ when we return. - ldr r0, ={exc_return} - mov lr, r0 - bx lr @ branch into user mode - ", - exc_return = const EXC_RETURN_CONST, - options(noreturn), - ) - } else if #[cfg(any(armv7m, armv8m))] { - arch::asm!(" - @ Inspect LR to figure out the caller's mode. - mov r0, lr - mov r1, #0xFFFFFFF3 - bic r0, r1 - @ Is the call coming from thread mode + main stack, i.e. - @ from the kernel startup routine? - cmp r0, #0x8 - @ If so, this is startup; jump ahead. The common case falls - @ through because branch-not-taken tends to be faster on small - @ cores. - beq 1f - - @ store volatile state. - @ first, get a pointer to the current task. - movw r0, #:lower16:CURRENT_TASK_PTR - movt r0, #:upper16:CURRENT_TASK_PTR - ldr r1, [r0] - movs r2, r1 - @ fetch the process-mode stack pointer. - @ fetching into r12 means the order in the stm below is right. - mrs r12, PSP - @ now, store volatile registers, plus the PSP in r12, plus LR. - stm r2!, {{r4-r12, lr}} - vstm r2, {{s16-s31}} - - @ syscall number is passed in r11. Move it into r0 to pass it as - @ an argument to the handler, then call the handler. - movs r0, r11 - bl syscall_entry - - @ we're returning back to *some* task, maybe not the same one. - movw r0, #:lower16:CURRENT_TASK_PTR - movt r0, #:upper16:CURRENT_TASK_PTR - ldr r0, [r0] - @ restore volatile registers, plus load PSP into r12 - ldm r0!, {{r4-r12, lr}} - vldm r0, {{s16-s31}} - msr PSP, r12 - - @ resume - bx lr - - 1: @ starting up the first task. - movs r0, #1 @ get bitmask to... - msr CONTROL, r0 @ ...shed privs from thread mode. - @ note: now barrier here because exc return - @ serves as barrier - - mov lr, {exc_return} @ materialize EXC_RETURN value to - @ return into thread mode, PSP, FP on - - bx lr @ branch into user mode - ", - exc_return = const EXC_RETURN_CONST, - options(noreturn), - ) - } else { - compile_error!("missing SVCall impl for ARM profile."); - } + if #[cfg(armv6m)] { + global_asm!{" + .section .text.SVCall + .globl SVCall + .type SVCall,function + SVCall: + @ Inspect LR to figure out the caller's mode. + mov r0, lr + ldr r1, =0xFFFFFFF3 + bics r0, r0, r1 + @ Is the call coming from thread mode + main stack, i.e. + @ from the kernel startup routine? + cmp r0, #0x8 + @ If so, this is startup; jump ahead. The common case falls + @ through because branch-not-taken tends to be faster on small + @ cores. + beq 1f + + @ store volatile state. + @ first, get a pointer to the current task. + ldr r0, =CURRENT_TASK_PTR + ldr r1, [r0] + @ now, store volatile registers, plus the PSP, plus LR. + movs r2, r1 + stm r2!, {{r4-r7}} + mov r4, r8 + mov r5, r9 + mov r6, r10 + mov r7, r11 + stm r2!, {{r4-r7}} + mrs r4, PSP + mov r5, lr + stm r2!, {{r4, r5}} + + @ syscall number is passed in r11. Move it into r0 to pass + @ it as an argument to the handler, then call the handler. + mov r0, r11 + bl syscall_entry + + @ we're returning back to *some* task, maybe not the same one. + ldr r0, =CURRENT_TASK_PTR + ldr r0, [r0] + @ restore volatile registers, plus PSP. We will do this in + @ slightly reversed order for efficiency. First, do the high + @ ones. + movs r1, r0 + adds r1, r1, #(4 * 4) + ldm r1!, {{r4-r7}} + mov r11, r7 + mov r10, r6 + mov r9, r5 + mov r8, r4 + ldm r1!, {{r4, r5}} + msr PSP, r4 + mov lr, r5 + + @ Now that we no longer need r4-r7 as temporary registers, + @ restore them too. + ldm r0!, {{r4-r7}} + + @ resume + bx lr + + 1: @ starting up the first task. + @ Drop privilege in Thread mode. + movs r0, #1 + msr CONTROL, r0 + @ note: no barrier here because exc return serves as barrier + + @ Manufacture a new EXC_RETURN to change the processor mode + @ when we return. + ldr r0, ={exc_return} + mov lr, r0 + bx lr @ branch into user mode + ", + exc_return = const EXC_RETURN_CONST, } + } else if #[cfg(any(armv7m, armv8m))] { + global_asm!{" + .section .text.SVCall + .globl SVCall + .type SVCall,function + SVCall: + @ Inspect LR to figure out the caller's mode. + mov r0, lr + mov r1, #0xFFFFFFF3 + bic r0, r1 + @ Is the call coming from thread mode + main stack, i.e. + @ from the kernel startup routine? + cmp r0, #0x8 + @ If so, this is startup; jump ahead. The common case falls + @ through because branch-not-taken tends to be faster on small + @ cores. + beq 1f + + @ store volatile state. + @ first, get a pointer to the current task. + movw r0, #:lower16:CURRENT_TASK_PTR + movt r0, #:upper16:CURRENT_TASK_PTR + ldr r1, [r0] + movs r2, r1 + @ fetch the process-mode stack pointer. + @ fetching into r12 means the order in the stm below is right. + mrs r12, PSP + @ now, store volatile registers, plus the PSP in r12, plus LR. + stm r2!, {{r4-r12, lr}} + vstm r2, {{s16-s31}} + + @ syscall number is passed in r11. Move it into r0 to pass it as + @ an argument to the handler, then call the handler. + movs r0, r11 + bl syscall_entry + + @ we're returning back to *some* task, maybe not the same one. + movw r0, #:lower16:CURRENT_TASK_PTR + movt r0, #:upper16:CURRENT_TASK_PTR + ldr r0, [r0] + @ restore volatile registers, plus load PSP into r12 + ldm r0!, {{r4-r12, lr}} + vldm r0, {{s16-s31}} + msr PSP, r12 + + @ resume + bx lr + + 1: @ starting up the first task. + movs r0, #1 @ get bitmask to... + msr CONTROL, r0 @ ...shed privs from thread mode. + @ note: now barrier here because exc return + @ serves as barrier + + mov lr, {exc_return} @ materialize EXC_RETURN value to + @ return into thread mode, PSP, FP on + + bx lr @ branch into user mode + ", + exc_return = const EXC_RETURN_CONST, + } + } else { + compile_error!("missing SVCall impl for ARM profile."); } } @@ -972,93 +971,90 @@ fn pend_context_switch_from_isr() { cortex_m::peripheral::SCB::set_pendsv(); } -#[allow(non_snake_case)] -#[naked] -#[no_mangle] -pub unsafe extern "C" fn PendSV() { - unsafe { - cfg_if::cfg_if! { - if #[cfg(armv6m)] { - arch::asm!( - " - @ store volatile state. - @ first, get a pointer to the current task. - ldr r0, =CURRENT_TASK_PTR - ldr r1, [r0] - @ now, store volatile registers, plus the PSP, plus LR. - stm r1!, {{r4-r7}} - mov r4, r8 - mov r5, r9 - mov r6, r10 - mov r7, r11 - stm r1!, {{r4-r7}} - mrs r4, PSP - mov r5, lr - stm r1!, {{r4, r5}} - - bl pendsv_entry - - @ we're returning back to *some* task, maybe not the same one. - ldr r0, =CURRENT_TASK_PTR - ldr r0, [r0] - @ restore volatile registers, plus PSP. We will do this in - @ slightly reversed order for efficiency. First, do the high - @ ones. - movs r1, r0 - adds r1, r1, #(4 * 4) - ldm r1!, {{r4-r7}} - mov r11, r7 - mov r10, r6 - mov r9, r5 - mov r8, r4 - ldm r1!, {{r4, r5}} - msr PSP, r4 - mov lr, r5 - - @ Now that we no longer need r4-r7 as temporary registers, - @ restore them too. - ldm r0!, {{r4-r7}} - - @ resume - bx lr - ", - options(noreturn), - ); - } else if #[cfg(any(armv7m, armv8m))] { - arch::asm!( - " - @ store volatile state. - @ first, get a pointer to the current task. - movw r0, #:lower16:CURRENT_TASK_PTR - movt r0, #:upper16:CURRENT_TASK_PTR - ldr r1, [r0] - @ fetch the process-mode stack pointer. - @ fetching into r12 means the order in the stm below is right. - mrs r12, PSP - @ now, store volatile registers, plus the PSP in r12, plus LR. - stm r1!, {{r4-r12, lr}} - vstm r1, {{s16-s31}} - - bl pendsv_entry - - @ we're returning back to *some* task, maybe not the same one. - movw r0, #:lower16:CURRENT_TASK_PTR - movt r0, #:upper16:CURRENT_TASK_PTR - ldr r0, [r0] - @ restore volatile registers, plus load PSP into r12 - ldm r0!, {{r4-r12, lr}} - vldm r0, {{s16-s31}} - msr PSP, r12 - - @ resume - bx lr - ", - options(noreturn), - ); - } else { - compile_error!("missing PendSV impl for ARM profile."); - } +cfg_if::cfg_if! { + if #[cfg(armv6m)] { + global_asm!{" + .section .text.PendSV + .globl PendSV + .type PendSV,function + PendSV: + @ store volatile state. + @ first, get a pointer to the current task. + ldr r0, =CURRENT_TASK_PTR + ldr r1, [r0] + @ now, store volatile registers, plus the PSP, plus LR. + stm r1!, {{r4-r7}} + mov r4, r8 + mov r5, r9 + mov r6, r10 + mov r7, r11 + stm r1!, {{r4-r7}} + mrs r4, PSP + mov r5, lr + stm r1!, {{r4, r5}} + + bl pendsv_entry + + @ we're returning back to *some* task, maybe not the same one. + ldr r0, =CURRENT_TASK_PTR + ldr r0, [r0] + @ restore volatile registers, plus PSP. We will do this in + @ slightly reversed order for efficiency. First, do the high + @ ones. + movs r1, r0 + adds r1, r1, #(4 * 4) + ldm r1!, {{r4-r7}} + mov r11, r7 + mov r10, r6 + mov r9, r5 + mov r8, r4 + ldm r1!, {{r4, r5}} + msr PSP, r4 + mov lr, r5 + + @ Now that we no longer need r4-r7 as temporary registers, + @ restore them too. + ldm r0!, {{r4-r7}} + + @ resume + bx lr + ", + } + } else if #[cfg(any(armv7m, armv8m))] { + global_asm!{" + .section .text.PendSV + .globl PendSV + .type PendSV,function + PendSV: + @ store volatile state. + @ first, get a pointer to the current task. + movw r0, #:lower16:CURRENT_TASK_PTR + movt r0, #:upper16:CURRENT_TASK_PTR + ldr r1, [r0] + @ fetch the process-mode stack pointer. + @ fetching into r12 means the order in the stm below is right. + mrs r12, PSP + @ now, store volatile registers, plus the PSP in r12, plus LR. + stm r1!, {{r4-r12, lr}} + vstm r1, {{s16-s31}} + + bl pendsv_entry + + @ we're returning back to *some* task, maybe not the same one. + movw r0, #:lower16:CURRENT_TASK_PTR + movt r0, #:upper16:CURRENT_TASK_PTR + ldr r0, [r0] + @ restore volatile registers, plus load PSP into r12 + ldm r0!, {{r4-r12, lr}} + vldm r0, {{s16-s31}} + msr PSP, r12 + + @ resume + bx lr + ", } + } else { + compile_error!("missing PendSV impl for ARM profile."); } } @@ -1207,160 +1203,134 @@ enum FaultType { UsageFault = 6, } -#[naked] -#[cfg(any(armv7m, armv8m))] -unsafe extern "C" fn configurable_fault() { - unsafe { - arch::asm!( - " - @ Read the current task pointer. - movw r0, #:lower16:CURRENT_TASK_PTR - movt r0, #:upper16:CURRENT_TASK_PTR - ldr r0, [r0] - mrs r12, PSP - - @ Now, to aid those who will debug what induced this fault, save our - @ context. Some of our context (namely, r0-r3, r12, LR, the return - @ address and the xPSR) is already on our stack as part of the fault; - @ we'll store our remaining registers, plus the PSP (now in r12), plus - @ exc_return (now in LR) into the save region in the current task. - @ Note that we explicitly refrain from saving the floating point - @ registers here: touching the floating point registers will induce - @ a lazy save on the stack, which is clearly bad news if we have - @ overflowed our stack! We do want to ultimately save them to aid - @ debuggability, however, so we pass the address to which they should - @ be saved to our fault handler, which will take the necessary - @ measures to save them safely. Finally, note that deferring the - @ save to later in handle_fault assumes that the floating point - @ registers are not in fact touched before determmining the fault type - @ and disabling lazy saving accordingly; should that assumption not - @ hold, we will need to be (ironically?) less lazy about disabling - @ lazy saving... - mov r2, r0 - stm r2!, {{r4-r12, lr}} - - @ Pull our fault number out of IPSR, allowing for program text to be - @ shared across all configurable faults. (Note that the exception - @ number is the bottom 9 bits, but we need only look at the bottom 4 - @ bits as this handler is only used for exceptions with numbers less - @ than 16.) - mrs r1, IPSR - and r1, r1, #0xf - bl handle_fault - - @ Our task has changed; reload it. - movw r0, #:lower16:CURRENT_TASK_PTR - movt r0, #:upper16:CURRENT_TASK_PTR - ldr r0, [r0] - - @ Restore volatile registers, plus load PSP into r12 - ldm r0!, {{r4-r12, lr}} - vldm r0, {{s16-s31}} - msr PSP, r12 - - @ resume - bx lr - ", - options(noreturn), - ); - } -} - -/// Initial entry point for handling a memory management fault. -#[allow(non_snake_case)] -#[no_mangle] -#[naked] -#[cfg(any(armv7m, armv8m))] -pub unsafe extern "C" fn MemoryManagement() { - // Safety: this is merely a call (a tailcall, really) to a different handler - // -- we're doing it this way simply because the other handler does context - // save, so we can't go up into Rust here. - unsafe { arch::asm!("b {0}", sym configurable_fault, options(noreturn)) } -} - -/// Initial entry point for handling a bus fault. -#[allow(non_snake_case)] -#[no_mangle] -#[naked] -#[cfg(any(armv7m, armv8m))] -pub unsafe extern "C" fn BusFault() { - // Safety: this is merely a call (a tailcall, really) to a different handler - // -- we're doing it this way simply because the other handler does context - // save, so we can't go up into Rust here. - unsafe { arch::asm!("b {0}", sym configurable_fault, options(noreturn)) } -} - -/// Initial entry point for handling a usage fault. -#[allow(non_snake_case)] -#[no_mangle] -#[naked] #[cfg(any(armv7m, armv8m))] -pub unsafe extern "C" fn UsageFault() { - // Safety: this is merely a call (a tailcall, really) to a different handler - // -- we're doing it this way simply because the other handler does context - // save, so we can't go up into Rust here. - unsafe { arch::asm!("b {0}", sym configurable_fault, options(noreturn)) } +global_asm! {" + .section .text.configurable_fault + .globl configurable_fault + .type configurable_fault,function + .cpu cortex-m4 @ least common denominator we support + configurable_fault: + @ Read the current task pointer. + movw r0, #:lower16:CURRENT_TASK_PTR + movt r0, #:upper16:CURRENT_TASK_PTR + ldr r0, [r0] + mrs r12, PSP + + @ Now, to aid those who will debug what induced this fault, save our + @ context. Some of our context (namely, r0-r3, r12, LR, the return + @ address and the xPSR) is already on our stack as part of the fault; + @ we'll store our remaining registers, plus the PSP (now in r12), plus + @ exc_return (now in LR) into the save region in the current task. + @ Note that we explicitly refrain from saving the floating point + @ registers here: touching the floating point registers will induce + @ a lazy save on the stack, which is clearly bad news if we have + @ overflowed our stack! We do want to ultimately save them to aid + @ debuggability, however, so we pass the address to which they should + @ be saved to our fault handler, which will take the necessary + @ measures to save them safely. Finally, note that deferring the + @ save to later in handle_fault assumes that the floating point + @ registers are not in fact touched before determmining the fault type + @ and disabling lazy saving accordingly; should that assumption not + @ hold, we will need to be (ironically?) less lazy about disabling + @ lazy saving... + mov r2, r0 + stm r2!, {{r4-r12, lr}} + + @ Pull our fault number out of IPSR, allowing for program text to be + @ shared across all configurable faults. (Note that the exception + @ number is the bottom 9 bits, but we need only look at the bottom 4 + @ bits as this handler is only used for exceptions with numbers less + @ than 16.) + mrs r1, IPSR + and r1, r1, #0xf + bl handle_fault + + @ Our task has changed; reload it. + movw r0, #:lower16:CURRENT_TASK_PTR + movt r0, #:upper16:CURRENT_TASK_PTR + ldr r0, [r0] + + @ Restore volatile registers, plus load PSP into r12 + ldm r0!, {{r4-r12, lr}} + vldm r0, {{s16-s31}} + msr PSP, r12 + + @ resume + bx lr + + .section .text.MemoryManagement + .globl MemoryManagement + .type MemoryManagement,function + MemoryManagement: + b configurable_fault + + .section .text.BusFault + .globl BusFault + .type BusFault,function + BusFault: + b configurable_fault + + .section .text.UsageFault + .globl UsageFault + .type UsageFault,function + UsageFault: + b configurable_fault + ", } -/// Initial entry point for handling a hard fault (ARMv6). -#[allow(non_snake_case)] -#[no_mangle] -#[naked] #[cfg(armv6m)] -pub unsafe extern "C" fn HardFault() { - unsafe { - arch::asm!( - " - @ Read the current task pointer. - ldr r0, =CURRENT_TASK_PTR - ldr r0, [r0] - mrs r12, PSP - - @ Now, to aid those who will debug what induced this fault, save our - @ context. Some of our context (namely, r0-r3, r12, LR, the return - @ address and the xPSR) is already on our stack as part of the fault; - @ we'll store our remaining registers, plus the PSP, plus exc_return - @ (now in LR) into the save region in the current task. - mov r2, r0 - stm r2!, {{r4-r7}} - mov r4, r8 - mov r5, r9 - mov r6, r10 - mov r7, r11 - stm r2!, {{r4-r7}} - mrs r4, PSP - mov r5, lr - stm r2!, {{r4, r5}} - - bl handle_fault - - @ Our task has changed; reload it. - ldr r0, =CURRENT_TASK_PTR - ldr r0, [r0] - @ restore volatile registers, plus PSP. We will do this in - @ slightly reversed order for efficiency. First, do the high - @ ones. - movs r1, r0 - adds r1, r1, #(4 * 4) - ldm r1!, {{r4-r7}} - mov r11, r7 - mov r10, r6 - mov r9, r5 - mov r8, r4 - ldm r1!, {{r4, r5}} - msr PSP, r4 - mov lr, r5 - - @ Now that we no longer need r4-r7 as temporary registers, - @ restore them too. - ldm r0!, {{r4-r7}} - - @ resume - bx lr - ", - options(noreturn), - ); - } +global_asm! {" + .section .text.HardFault + .globl HardFault + .type HardFault,function + HardFault: + @ Read the current task pointer. + ldr r0, =CURRENT_TASK_PTR + ldr r0, [r0] + mrs r12, PSP + + @ Now, to aid those who will debug what induced this fault, save our + @ context. Some of our context (namely, r0-r3, r12, LR, the return + @ address and the xPSR) is already on our stack as part of the fault; + @ we'll store our remaining registers, plus the PSP, plus exc_return + @ (now in LR) into the save region in the current task. + mov r2, r0 + stm r2!, {{r4-r7}} + mov r4, r8 + mov r5, r9 + mov r6, r10 + mov r7, r11 + stm r2!, {{r4-r7}} + mrs r4, PSP + mov r5, lr + stm r2!, {{r4, r5}} + + bl handle_fault + + @ Our task has changed; reload it. + ldr r0, =CURRENT_TASK_PTR + ldr r0, [r0] + @ restore volatile registers, plus PSP. We will do this in + @ slightly reversed order for efficiency. First, do the high + @ ones. + movs r1, r0 + adds r1, r1, #(4 * 4) + ldm r1!, {{r4-r7}} + mov r11, r7 + mov r10, r6 + mov r9, r5 + mov r8, r4 + ldm r1!, {{r4, r5}} + msr PSP, r4 + mov lr, r5 + + @ Now that we no longer need r4-r7 as temporary registers, + @ restore them too. + ldm r0!, {{r4-r7}} + + @ resume + bx lr + ", } bitflags::bitflags! { diff --git a/sys/kern/src/lib.rs b/sys/kern/src/lib.rs index e6558756a..e94e0c4ef 100644 --- a/sys/kern/src/lib.rs +++ b/sys/kern/src/lib.rs @@ -27,7 +27,6 @@ //! most clever algorithms used in kernels wind up requiring `unsafe`.) #![cfg_attr(target_os = "none", no_std)] -#![feature(naked_functions)] // Require an unsafe block even in an unsafe fn, because unsafe fns are about // contract, not implementation. #![forbid(unsafe_op_in_unsafe_fn)]