From 410114bd508156097c0d5a93998a00fb9b19a83b Mon Sep 17 00:00:00 2001 From: vannapurve Date: Sun, 25 Oct 2020 14:28:33 +0530 Subject: [PATCH] [arm] Ensure context switch doesn't happen from irq Context switches should not happen from within the interrupt context before interrupt is cleared by write to GIC EOIR register, without it GIC will simply keep that interrupt active even if the hardware source clears the interrupt to the gic, causing subsequent irqs from the source to not get delivered to the CPU. This change adds an assertion that context switch doesn't happen from irq context before interrupt is EOIed. TCB field is added to convey if the current thread has interrupt context active, if so thread_resched should ideally not get called. Signed-off-by: vannapurve vannapurve@google.com --- arch/arch.c | 14 +++++++++++++- arch/arm/arm/arch.c | 26 ++++++++++++++++++++++++++ arch/arm/arm/exceptions.S | 16 +--------------- arch/arm/include/arch/arch_ops.h | 15 ++------------- arch/arm64/arch.c | 19 +++++++++++++++++++ arch/arm64/exceptions.S | 2 +- arch/arm64/include/arch/arch_ops.h | 1 + arch/include/arch/ops.h | 3 ++- kernel/thread.c | 4 ++++ 9 files changed, 69 insertions(+), 31 deletions(-) diff --git a/arch/arch.c b/arch/arch.c index 23dac12e98..bb0278b947 100644 --- a/arch/arch.c +++ b/arch/arch.c @@ -1 +1,13 @@ -// empty file to help build empty arch module +/* + * Copyright (c) 2020 Travis Geiselbrecht + * + * Use of this source code is governed by a MIT-style + * license that can be found in the LICENSE file or at + * https://opensource.org/licenses/MIT + */ + +#include + +__WEAK bool arch_in_int_handler(void) { + return false; +} diff --git a/arch/arm/arm/arch.c b/arch/arm/arm/arch.c index 0936225874..c6fdb5bd06 100644 --- a/arch/arm/arm/arch.c +++ b/arch/arm/arm/arch.c @@ -47,6 +47,32 @@ static void arm_basic_setup(void); static void spinlock_test(void); static void spinlock_test_secondary(void); +static uint32_t cpu_in_irq_ctxt[SMP_MAX_CPUS]; + +enum handler_return arch_irq(struct arm_iframe *frame) { + enum handler_return ret; + uint32_t cpu = arch_curr_cpu_num(); + DEBUG_ASSERT(cpu < SMP_MAX_CPUS); + + cpu_in_irq_ctxt[cpu] = 1; + ret = platform_irq(frame); + cpu_in_irq_ctxt[cpu] = 0; + return ret; +} + +bool arch_in_int_handler() { +#if ARM_ISA_ARMV7M + uint32_t ipsr; + __asm volatile ("MRS %0, ipsr" : "=r" (ipsr) ); + return (ipsr & IPSR_ISR_Msk); +#else + uint32_t cpu = arch_curr_cpu_num(); + DEBUG_ASSERT(cpu < SMP_MAX_CPUS); + + return (cpu_in_irq_ctxt[cpu] == 1); +#endif +} + #if WITH_SMP /* smp boot lock */ spin_lock_t arm_boot_cpu_lock = 1; diff --git a/arch/arm/arm/exceptions.S b/arch/arm/arm/exceptions.S index 74ee3a7c35..cb32218985 100644 --- a/arch/arm/arm/exceptions.S +++ b/arch/arm/arm/exceptions.S @@ -204,18 +204,8 @@ FUNCTION(arm_irq) /* r0 now holds pointer to iframe */ - /* track that we're inside an irq handler */ - LOADCONST(r2, __arm_in_handler) - mov r1, #1 - str r1, [r2] - /* call into higher level code */ - bl platform_irq - - /* clear the irq handler status */ - LOADCONST(r1, __arm_in_handler) - mov r2, #0 - str r2, [r1] + bl arch_irq /* reschedule if the handler returns nonzero */ cmp r0, #0 @@ -237,7 +227,3 @@ FUNCTION(arm_fiq) DATA(__irq_cycle_count) .word 0 #endif - -.data -DATA(__arm_in_handler) - .word 0 diff --git a/arch/arm/include/arch/arch_ops.h b/arch/arm/include/arch/arch_ops.h index 87e61f1d85..53cbf7f736 100644 --- a/arch/arm/include/arch/arch_ops.h +++ b/arch/arm/include/arch/arch_ops.h @@ -20,6 +20,8 @@ __BEGIN_CDECLS +enum handler_return platform_irq(struct arm_iframe *frame); + #if ARM_ISA_ARMV7 || (ARM_ISA_ARMV6 && !__thumb__) #define ENABLE_CYCLE_COUNTER 1 @@ -67,19 +69,6 @@ static inline bool arch_fiqs_disabled(void) { return !!state; } -static inline bool arch_in_int_handler(void) { -#if ARM_ISA_ARMV7M - uint32_t ipsr; - __asm volatile ("MRS %0, ipsr" : "=r" (ipsr) ); - return (ipsr & IPSR_ISR_Msk); -#else - /* set by the interrupt glue to track that the cpu is inside a handler */ - extern bool __arm_in_handler; - - return __arm_in_handler; -#endif -} - static inline ulong arch_cycle_count(void) { #if ARM_ISA_ARMV7M #if ENABLE_CYCLE_COUNTER diff --git a/arch/arm64/arch.c b/arch/arm64/arch.c index 59fb253e60..9aaf69d7f7 100644 --- a/arch/arm64/arch.c +++ b/arch/arm64/arch.c @@ -26,6 +26,25 @@ static spin_lock_t arm_boot_cpu_lock = 1; static volatile int secondaries_to_init = 0; #endif +static uint32_t cpu_in_irq_ctxt[SMP_MAX_CPUS]; + +enum handler_return arch_irq(struct arm64_iframe_short *frame) { + enum handler_return ret; + uint32_t cpu = arch_curr_cpu_num(); + DEBUG_ASSERT(cpu < SMP_MAX_CPUS); + + cpu_in_irq_ctxt[cpu] = 1; + ret = platform_irq(frame); + cpu_in_irq_ctxt[cpu] = 0; + return ret; +} + +bool arch_in_int_handler() { + uint32_t cpu = arch_curr_cpu_num(); + DEBUG_ASSERT(cpu < SMP_MAX_CPUS); + + return (cpu_in_irq_ctxt[cpu] == 1); +} static void arm64_cpu_early_init(void) { /* set the vector base */ diff --git a/arch/arm64/exceptions.S b/arch/arm64/exceptions.S index 1a872ddef8..a3e2c13580 100644 --- a/arch/arm64/exceptions.S +++ b/arch/arm64/exceptions.S @@ -111,7 +111,7 @@ add sp, sp, #32 regsave_short msr daifclr, #1 /* reenable fiqs once elr and spsr have been saved */ mov x0, sp - bl platform_irq + bl arch_irq cbz x0, .Lirq_exception_no_preempt\@ bl thread_preempt .Lirq_exception_no_preempt\@: diff --git a/arch/arm64/include/arch/arch_ops.h b/arch/arm64/include/arch/arch_ops.h index e195d9a577..a9e77062de 100644 --- a/arch/arm64/include/arch/arch_ops.h +++ b/arch/arm64/include/arch/arch_ops.h @@ -17,6 +17,7 @@ #define ENABLE_CYCLE_COUNTER 1 void arch_stacktrace(uint64_t fp, uint64_t pc); +enum handler_return platform_irq(struct arm64_iframe_short *frame); // override of some routines static inline void arch_enable_ints(void) { diff --git a/arch/include/arch/ops.h b/arch/include/arch/ops.h index 07e2edd3c5..baa59dbd73 100644 --- a/arch/include/arch/ops.h +++ b/arch/include/arch/ops.h @@ -16,11 +16,12 @@ __BEGIN_CDECLS +bool arch_in_int_handler(void); + /* fast routines that most arches will implement inline */ static void arch_enable_ints(void); static void arch_disable_ints(void); static bool arch_ints_disabled(void); -static bool arch_in_int_handler(void); static ulong arch_cycle_count(void); diff --git a/kernel/thread.c b/kernel/thread.c index 58f57ec6f7..fd92d2a3a1 100644 --- a/kernel/thread.c +++ b/kernel/thread.c @@ -472,6 +472,10 @@ void thread_resched(void) { thread_t *current_thread = get_current_thread(); uint cpu = arch_curr_cpu_num(); + /* Assert that cpu is not in active irq handling + * context */ + ASSERT(!arch_in_int_handler()); + DEBUG_ASSERT(arch_ints_disabled()); DEBUG_ASSERT(spin_lock_held(&thread_lock)); DEBUG_ASSERT(current_thread->state != THREAD_RUNNING);