diff --git a/arch/arch.c b/arch/arch.c index 23dac12e9..bb0278b94 100644 --- a/arch/arch.c +++ b/arch/arch.c @@ -1 +1,13 @@ -// empty file to help build empty arch module +/* + * Copyright (c) 2020 Travis Geiselbrecht + * + * Use of this source code is governed by a MIT-style + * license that can be found in the LICENSE file or at + * https://opensource.org/licenses/MIT + */ + +#include + +__WEAK bool arch_in_int_handler(void) { + return false; +} diff --git a/arch/arm/arm/arch.c b/arch/arm/arm/arch.c index 093622587..c6fdb5bd0 100644 --- a/arch/arm/arm/arch.c +++ b/arch/arm/arm/arch.c @@ -47,6 +47,32 @@ static void arm_basic_setup(void); static void spinlock_test(void); static void spinlock_test_secondary(void); +static uint32_t cpu_in_irq_ctxt[SMP_MAX_CPUS]; + +enum handler_return arch_irq(struct arm_iframe *frame) { + enum handler_return ret; + uint32_t cpu = arch_curr_cpu_num(); + DEBUG_ASSERT(cpu < SMP_MAX_CPUS); + + cpu_in_irq_ctxt[cpu] = 1; + ret = platform_irq(frame); + cpu_in_irq_ctxt[cpu] = 0; + return ret; +} + +bool arch_in_int_handler() { +#if ARM_ISA_ARMV7M + uint32_t ipsr; + __asm volatile ("MRS %0, ipsr" : "=r" (ipsr) ); + return (ipsr & IPSR_ISR_Msk); +#else + uint32_t cpu = arch_curr_cpu_num(); + DEBUG_ASSERT(cpu < SMP_MAX_CPUS); + + return (cpu_in_irq_ctxt[cpu] == 1); +#endif +} + #if WITH_SMP /* smp boot lock */ spin_lock_t arm_boot_cpu_lock = 1; diff --git a/arch/arm/arm/exceptions.S b/arch/arm/arm/exceptions.S index 74ee3a7c3..cb3221898 100644 --- a/arch/arm/arm/exceptions.S +++ b/arch/arm/arm/exceptions.S @@ -204,18 +204,8 @@ FUNCTION(arm_irq) /* r0 now holds pointer to iframe */ - /* track that we're inside an irq handler */ - LOADCONST(r2, __arm_in_handler) - mov r1, #1 - str r1, [r2] - /* call into higher level code */ - bl platform_irq - - /* clear the irq handler status */ - LOADCONST(r1, __arm_in_handler) - mov r2, #0 - str r2, [r1] + bl arch_irq /* reschedule if the handler returns nonzero */ cmp r0, #0 @@ -237,7 +227,3 @@ FUNCTION(arm_fiq) DATA(__irq_cycle_count) .word 0 #endif - -.data -DATA(__arm_in_handler) - .word 0 diff --git a/arch/arm/include/arch/arch_ops.h b/arch/arm/include/arch/arch_ops.h index 87e61f1d8..53cbf7f73 100644 --- a/arch/arm/include/arch/arch_ops.h +++ b/arch/arm/include/arch/arch_ops.h @@ -20,6 +20,8 @@ __BEGIN_CDECLS +enum handler_return platform_irq(struct arm_iframe *frame); + #if ARM_ISA_ARMV7 || (ARM_ISA_ARMV6 && !__thumb__) #define ENABLE_CYCLE_COUNTER 1 @@ -67,19 +69,6 @@ static inline bool arch_fiqs_disabled(void) { return !!state; } -static inline bool arch_in_int_handler(void) { -#if ARM_ISA_ARMV7M - uint32_t ipsr; - __asm volatile ("MRS %0, ipsr" : "=r" (ipsr) ); - return (ipsr & IPSR_ISR_Msk); -#else - /* set by the interrupt glue to track that the cpu is inside a handler */ - extern bool __arm_in_handler; - - return __arm_in_handler; -#endif -} - static inline ulong arch_cycle_count(void) { #if ARM_ISA_ARMV7M #if ENABLE_CYCLE_COUNTER diff --git a/arch/arm64/arch.c b/arch/arm64/arch.c index 59fb253e6..9aaf69d7f 100644 --- a/arch/arm64/arch.c +++ b/arch/arm64/arch.c @@ -26,6 +26,25 @@ static spin_lock_t arm_boot_cpu_lock = 1; static volatile int secondaries_to_init = 0; #endif +static uint32_t cpu_in_irq_ctxt[SMP_MAX_CPUS]; + +enum handler_return arch_irq(struct arm64_iframe_short *frame) { + enum handler_return ret; + uint32_t cpu = arch_curr_cpu_num(); + DEBUG_ASSERT(cpu < SMP_MAX_CPUS); + + cpu_in_irq_ctxt[cpu] = 1; + ret = platform_irq(frame); + cpu_in_irq_ctxt[cpu] = 0; + return ret; +} + +bool arch_in_int_handler() { + uint32_t cpu = arch_curr_cpu_num(); + DEBUG_ASSERT(cpu < SMP_MAX_CPUS); + + return (cpu_in_irq_ctxt[cpu] == 1); +} static void arm64_cpu_early_init(void) { /* set the vector base */ diff --git a/arch/arm64/exceptions.S b/arch/arm64/exceptions.S index 1a872ddef..a3e2c1358 100644 --- a/arch/arm64/exceptions.S +++ b/arch/arm64/exceptions.S @@ -111,7 +111,7 @@ add sp, sp, #32 regsave_short msr daifclr, #1 /* reenable fiqs once elr and spsr have been saved */ mov x0, sp - bl platform_irq + bl arch_irq cbz x0, .Lirq_exception_no_preempt\@ bl thread_preempt .Lirq_exception_no_preempt\@: diff --git a/arch/arm64/include/arch/arch_ops.h b/arch/arm64/include/arch/arch_ops.h index e195d9a57..a9e77062d 100644 --- a/arch/arm64/include/arch/arch_ops.h +++ b/arch/arm64/include/arch/arch_ops.h @@ -17,6 +17,7 @@ #define ENABLE_CYCLE_COUNTER 1 void arch_stacktrace(uint64_t fp, uint64_t pc); +enum handler_return platform_irq(struct arm64_iframe_short *frame); // override of some routines static inline void arch_enable_ints(void) { diff --git a/arch/include/arch/ops.h b/arch/include/arch/ops.h index 07e2edd3c..baa59dbd7 100644 --- a/arch/include/arch/ops.h +++ b/arch/include/arch/ops.h @@ -16,11 +16,12 @@ __BEGIN_CDECLS +bool arch_in_int_handler(void); + /* fast routines that most arches will implement inline */ static void arch_enable_ints(void); static void arch_disable_ints(void); static bool arch_ints_disabled(void); -static bool arch_in_int_handler(void); static ulong arch_cycle_count(void); diff --git a/kernel/thread.c b/kernel/thread.c index 58f57ec6f..fd92d2a3a 100644 --- a/kernel/thread.c +++ b/kernel/thread.c @@ -472,6 +472,10 @@ void thread_resched(void) { thread_t *current_thread = get_current_thread(); uint cpu = arch_curr_cpu_num(); + /* Assert that cpu is not in active irq handling + * context */ + ASSERT(!arch_in_int_handler()); + DEBUG_ASSERT(arch_ints_disabled()); DEBUG_ASSERT(spin_lock_held(&thread_lock)); DEBUG_ASSERT(current_thread->state != THREAD_RUNNING);