From 382dcdd66ce86491ddd390b39224468c82a47892 Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Mon, 7 Jun 2021 10:46:05 +0100 Subject: [PATCH 01/20] arm64: remove redundant local_daif_mask() in bad_mode() Upon taking an exception, the CPU sets all the DAIF bits. We never clear any of these bits prior to calling bad_mode(), and bad_mode() itself never clears any of these bits, so there's no need to call local_daif_mask(). This patch removes the redundant call. Signed-off-by: Mark Rutland Acked-by: Marc Zyngier Acked-by: Catalin Marinas Reviewed-by: Joey Gouly Cc: James Morse Cc: Will Deacon Link: https://lore.kernel.org/r/20210607094624.34689-2-mark.rutland@arm.com Signed-off-by: Will Deacon --- arch/arm64/kernel/traps.c | 1 - 1 file changed, 1 deletion(-) diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c index a05d34f0e82a7a..41f0aa92022a19 100644 --- a/arch/arm64/kernel/traps.c +++ b/arch/arm64/kernel/traps.c @@ -765,7 +765,6 @@ asmlinkage void notrace bad_mode(struct pt_regs *regs, int reason, unsigned int esr_get_class_string(esr)); __show_regs(regs); - local_daif_mask(); panic("bad mode"); } From f7c706f0391d7894d1ae2d28cb2d5446f5ec59ad Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Mon, 7 Jun 2021 10:46:06 +0100 Subject: [PATCH 02/20] arm64: entry: unmask IRQ+FIQ after EL0 handling For non-fatal exceptions taken from EL0, we expect that at some point during exception handling it is possible to return to a regular process context with all exceptions unmasked (e.g. as we do in do_notify_resume()), and we generally aim to unmask exceptions wherever possible. While handling SError and debug exceptions from EL0, we need to leave some exceptions masked during handling. Handling SError requires us to mask SError (which also requires masking IRQ+FIQ), and handing debug exceptions requires us to mask debug (which also requires masking SError+IRQ+FIQ). Once do_serror() or do_debug_exception() has returned, we no longer need to mask exceptions, and can unmask them all, which is what we did prior to commit: 9034f6251572a474 ("arm64: Do not enable IRQs for ct_user_exit") ... where we had to mask IRQs as for context_tracking_user_exit() expected IRQs to be masked. Since then, we realised that our context tracking wasn't entirely correct, and reworked the entry code to fix this. As of commit: 23529049c6842382 ("arm64: entry: fix non-NMI user<->kernel transitions") ... we replaced the call to context_tracking_user_exit() with a call to user_exit_irqoff() as part of enter_from_user_mode(), which occurs earlier, before we run the body of the handler and unmask exceptions in DAIF. When we return to userspace, we go via ret_to_user(), which masks exceptions in DAIF prior to calling user_enter_irqoff() as part of exit_to_user_mode(). Thus, there's no longer a reason to leave IRQs or FIQs masked at the end of the EL0 debug or error handlers, as neither the user exit context tracking nor the user entry context tracking requires this. Let's bring these into line with other EL0 exception handlers and ensure that IRQ and FIQ are unmasked in DAIF at some point during the handler. Signed-off-by: Mark Rutland Acked-by: Catalin Marinas Acked-by: Marc Zyngier Reviewed-by: Joey Gouly Cc: James Morse Cc: Will Deacon Link: https://lore.kernel.org/r/20210607094624.34689-3-mark.rutland@arm.com Signed-off-by: Will Deacon --- arch/arm64/kernel/entry-common.c | 2 +- arch/arm64/kernel/entry.S | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/arm64/kernel/entry-common.c b/arch/arm64/kernel/entry-common.c index 340d04e1361794..02be1517e08f5c 100644 --- a/arch/arm64/kernel/entry-common.c +++ b/arch/arm64/kernel/entry-common.c @@ -398,7 +398,7 @@ static void noinstr el0_dbg(struct pt_regs *regs, unsigned long esr) enter_from_user_mode(); do_debug_exception(far, esr, regs); - local_daif_restore(DAIF_PROCCTX_NOIRQ); + local_daif_restore(DAIF_PROCCTX); } static void noinstr el0_svc(struct pt_regs *regs) diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S index 3513984a88bd1b..6b2f6f5c5bb8cd 100644 --- a/arch/arm64/kernel/entry.S +++ b/arch/arm64/kernel/entry.S @@ -794,7 +794,7 @@ el0_error_naked: mov x0, sp mov x1, x25 bl do_serror - enable_da + enable_daif b ret_to_user SYM_CODE_END(el0_error) From bb8e93a287a5f5f10fe7a9d8f612f6105c9622ef Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Mon, 7 Jun 2021 10:46:07 +0100 Subject: [PATCH 03/20] arm64: entry: convert SError handlers to C For various reasons we'd like to convert the bulk of arm64's exception triage logic to C. As a step towards that, this patch converts the EL1 and EL0 SError triage logic to C. Separate C functions are added for the native and compat cases so that in subsequent patches we can handle native/compat differences in C. There should be no functional change as a result of this patch. Signed-off-by: Mark Rutland Acked-by: Catalin Marinas Acked-by: Marc Zyngier Reviewed-by: Joey Gouly Cc: James Morse Cc: Will Deacon Link: https://lore.kernel.org/r/20210607094624.34689-4-mark.rutland@arm.com Signed-off-by: Will Deacon --- arch/arm64/include/asm/exception.h | 4 ++++ arch/arm64/kernel/entry-common.c | 32 ++++++++++++++++++++++++++++++ arch/arm64/kernel/entry.S | 16 +++++---------- arch/arm64/kernel/traps.c | 6 +----- 4 files changed, 42 insertions(+), 16 deletions(-) diff --git a/arch/arm64/include/asm/exception.h b/arch/arm64/include/asm/exception.h index 6546158d2f2d46..3a859d4e8b59e7 100644 --- a/arch/arm64/include/asm/exception.h +++ b/arch/arm64/include/asm/exception.h @@ -32,8 +32,11 @@ static inline u32 disr_to_esr(u64 disr) } asmlinkage void el1_sync_handler(struct pt_regs *regs); +asmlinkage void el1_error_handler(struct pt_regs *regs); asmlinkage void el0_sync_handler(struct pt_regs *regs); +asmlinkage void el0_error_handler(struct pt_regs *regs); asmlinkage void el0_sync_compat_handler(struct pt_regs *regs); +asmlinkage void el0_error_compat_handler(struct pt_regs *regs); asmlinkage void noinstr enter_el1_irq_or_nmi(struct pt_regs *regs); asmlinkage void noinstr exit_el1_irq_or_nmi(struct pt_regs *regs); @@ -57,4 +60,5 @@ void do_cp15instr(unsigned int esr, struct pt_regs *regs); void do_el0_svc(struct pt_regs *regs); void do_el0_svc_compat(struct pt_regs *regs); void do_ptrauth_fault(struct pt_regs *regs, unsigned int esr); +void do_serror(struct pt_regs *regs, unsigned int esr); #endif /* __ASM_EXCEPTION_H */ diff --git a/arch/arm64/kernel/entry-common.c b/arch/arm64/kernel/entry-common.c index 02be1517e08f5c..3b794372107761 100644 --- a/arch/arm64/kernel/entry-common.c +++ b/arch/arm64/kernel/entry-common.c @@ -279,6 +279,16 @@ asmlinkage void noinstr el1_sync_handler(struct pt_regs *regs) } } +asmlinkage void noinstr el1_error_handler(struct pt_regs *regs) +{ + unsigned long esr = read_sysreg(esr_el1); + + local_daif_restore(DAIF_ERRCTX); + arm64_enter_nmi(regs); + do_serror(regs, esr); + arm64_exit_nmi(regs); +} + asmlinkage void noinstr enter_from_user_mode(void) { lockdep_hardirqs_off(CALLER_ADDR0); @@ -468,6 +478,23 @@ asmlinkage void noinstr el0_sync_handler(struct pt_regs *regs) } } +static void __el0_error_handler_common(struct pt_regs *regs) +{ + unsigned long esr = read_sysreg(esr_el1); + + enter_from_user_mode(); + local_daif_restore(DAIF_ERRCTX); + arm64_enter_nmi(regs); + do_serror(regs, esr); + arm64_exit_nmi(regs); + local_daif_restore(DAIF_PROCCTX); +} + +asmlinkage void noinstr el0_error_handler(struct pt_regs *regs) +{ + __el0_error_handler_common(regs); +} + #ifdef CONFIG_COMPAT static void noinstr el0_cp15(struct pt_regs *regs, unsigned long esr) { @@ -526,4 +553,9 @@ asmlinkage void noinstr el0_sync_compat_handler(struct pt_regs *regs) el0_inv(regs, esr); } } + +asmlinkage void noinstr el0_error_compat_handler(struct pt_regs *regs) +{ + __el0_error_handler_common(regs); +} #endif /* CONFIG_COMPAT */ diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S index 6b2f6f5c5bb8cd..656f3129bfefe8 100644 --- a/arch/arm64/kernel/entry.S +++ b/arch/arm64/kernel/entry.S @@ -757,7 +757,9 @@ SYM_CODE_END(el0_fiq_compat) SYM_CODE_START_LOCAL_NOALIGN(el0_error_compat) kernel_entry 0, 32 - b el0_error_naked + mov x0, sp + bl el0_error_compat_handler + b ret_to_user SYM_CODE_END(el0_error_compat) #endif @@ -778,23 +780,15 @@ SYM_CODE_END(el0_fiq) SYM_CODE_START_LOCAL(el1_error) kernel_entry 1 - mrs x1, esr_el1 - enable_dbg mov x0, sp - bl do_serror + bl el1_error_handler kernel_exit 1 SYM_CODE_END(el1_error) SYM_CODE_START_LOCAL(el0_error) kernel_entry 0 -el0_error_naked: - mrs x25, esr_el1 - user_exit_irqoff - enable_dbg mov x0, sp - mov x1, x25 - bl do_serror - enable_daif + bl el0_error_handler b ret_to_user SYM_CODE_END(el0_error) diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c index 41f0aa92022a19..5fd12d19ef4be1 100644 --- a/arch/arm64/kernel/traps.c +++ b/arch/arm64/kernel/traps.c @@ -869,15 +869,11 @@ bool arm64_is_fatal_ras_serror(struct pt_regs *regs, unsigned int esr) } } -asmlinkage void noinstr do_serror(struct pt_regs *regs, unsigned int esr) +void do_serror(struct pt_regs *regs, unsigned int esr) { - arm64_enter_nmi(regs); - /* non-RAS errors are not containable */ if (!arm64_is_ras_serror(esr) || arm64_is_fatal_ras_serror(regs, esr)) arm64_serror_panic(regs, esr); - - arm64_exit_nmi(regs); } /* GENERIC_BUG traps */ From 33a3581a76f3a36c7dcc9864120ce681bcfbcff1 Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Mon, 7 Jun 2021 10:46:08 +0100 Subject: [PATCH 04/20] arm64: entry: move arm64_preempt_schedule_irq to entry-common.c Subsequent patches will pull more of the IRQ entry handling into C. To keep this in one place, let's move arm64_preempt_schedule_irq() into entry-common.c along with the other entry management functions. We no longer need to include in process.c, so the include directive is removed. There should be no functional change as a result of this patch. Reviewed-by Joey Gouly Signed-off-by: Mark Rutland Acked-by: Catalin Marinas Acked-by: Marc Zyngier Cc: James Morse Cc: Will Deacon Link: https://lore.kernel.org/r/20210607094624.34689-5-mark.rutland@arm.com Signed-off-by: Will Deacon --- arch/arm64/kernel/entry-common.c | 20 ++++++++++++++++++++ arch/arm64/kernel/process.c | 17 ----------------- 2 files changed, 20 insertions(+), 17 deletions(-) diff --git a/arch/arm64/kernel/entry-common.c b/arch/arm64/kernel/entry-common.c index 3b794372107761..1fe60578e55664 100644 --- a/arch/arm64/kernel/entry-common.c +++ b/arch/arm64/kernel/entry-common.c @@ -6,7 +6,11 @@ */ #include +#include +#include #include +#include +#include #include #include @@ -113,6 +117,22 @@ asmlinkage void noinstr exit_el1_irq_or_nmi(struct pt_regs *regs) exit_to_kernel_mode(regs); } +asmlinkage void __sched arm64_preempt_schedule_irq(void) +{ + lockdep_assert_irqs_disabled(); + + /* + * Preempting a task from an IRQ means we leave copies of PSTATE + * on the stack. cpufeature's enable calls may modify PSTATE, but + * resuming one of these preempted tasks would undo those changes. + * + * Only allow a task to be preempted once cpufeatures have been + * enabled. + */ + if (system_capabilities_finalized()) + preempt_schedule_irq(); +} + #ifdef CONFIG_ARM64_ERRATUM_1463225 static DEFINE_PER_CPU(int, __in_cortex_a76_erratum_1463225_wa); diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c index b4bb67f17a2cab..2e733770915520 100644 --- a/arch/arm64/kernel/process.c +++ b/arch/arm64/kernel/process.c @@ -18,7 +18,6 @@ #include #include #include -#include #include #include #include @@ -724,22 +723,6 @@ static int __init tagged_addr_init(void) core_initcall(tagged_addr_init); #endif /* CONFIG_ARM64_TAGGED_ADDR_ABI */ -asmlinkage void __sched arm64_preempt_schedule_irq(void) -{ - lockdep_assert_irqs_disabled(); - - /* - * Preempting a task from an IRQ means we leave copies of PSTATE - * on the stack. cpufeature's enable calls may modify PSTATE, but - * resuming one of these preempted tasks would undo those changes. - * - * Only allow a task to be preempted once cpufeatures have been - * enabled. - */ - if (system_capabilities_finalized()) - preempt_schedule_irq(); -} - #ifdef CONFIG_BINFMT_ELF int arch_elf_adjust_prot(int prot, const struct arch_elf_state *state, bool has_interp, bool is_interp) From 101a5b665dcdff169ae7ad90556604c483d9027e Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Mon, 7 Jun 2021 10:46:09 +0100 Subject: [PATCH 05/20] arm64: entry: move NMI preempt logic to C Currently portions of our preempt logic are written in C while other parts are written in assembly. Let's clean this up a little bit by moving the NMI preempt checks to C. For now, the preempt count (and need_resched) checking is left in assembly, and will be converted with the body of the IRQ handler in subsequent patches. Other than the increased lockdep coverage there should be no functional change as a result of this patch. Signed-off-by: Mark Rutland Acked-by: Catalin Marinas Acked-by: Marc Zyngier Reviewed-by: Joey Gouly Cc: James Morse Cc: Will Deacon Link: https://lore.kernel.org/r/20210607094624.34689-6-mark.rutland@arm.com Signed-off-by: Will Deacon --- arch/arm64/kernel/entry-common.c | 9 +++++++++ arch/arm64/kernel/entry.S | 12 +----------- 2 files changed, 10 insertions(+), 11 deletions(-) diff --git a/arch/arm64/kernel/entry-common.c b/arch/arm64/kernel/entry-common.c index 1fe60578e55664..08d17eb0ce13af 100644 --- a/arch/arm64/kernel/entry-common.c +++ b/arch/arm64/kernel/entry-common.c @@ -121,6 +121,15 @@ asmlinkage void __sched arm64_preempt_schedule_irq(void) { lockdep_assert_irqs_disabled(); + /* + * DAIF.DA are cleared at the start of IRQ/FIQ handling, and when GIC + * priority masking is used the GIC irqchip driver will clear DAIF.IF + * using gic_arch_enable_irqs() for normal IRQs. If anything is set in + * DAIF we must have handled an NMI, so skip preemption. + */ + if (system_uses_irq_prio_masking() && read_sysreg(daif)) + return; + /* * Preempting a task from an IRQ means we leave copies of PSTATE * on the stack. cpufeature's enable calls may modify PSTATE, but diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S index 656f3129bfefe8..449628290ce8f5 100644 --- a/arch/arm64/kernel/entry.S +++ b/arch/arm64/kernel/entry.S @@ -562,17 +562,7 @@ tsk .req x28 // current thread_info #ifdef CONFIG_PREEMPTION ldr x24, [tsk, #TSK_TI_PREEMPT] // get preempt count -alternative_if ARM64_HAS_IRQ_PRIO_MASKING - /* - * DA were cleared at start of handling, and IF are cleared by - * the GIC irqchip driver using gic_arch_enable_irqs() for - * normal IRQs. If anything is set, it means we come back from - * an NMI instead of a normal IRQ, so skip preemption - */ - mrs x0, daif - orr x24, x24, x0 -alternative_else_nop_endif - cbnz x24, 1f // preempt count != 0 || NMI return path + cbnz x24, 1f // preempt count != 0 bl arm64_preempt_schedule_irq // irq en/disable is done inside 1: #endif From f8049488e7d37b0a0e438ee449e83b3e46958743 Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Mon, 7 Jun 2021 10:46:10 +0100 Subject: [PATCH 06/20] arm64: entry: add a call_on_irq_stack helper When handling IRQ/FIQ exceptions the entry assembly may transition from a task's stack to a CPU's IRQ stack (and IRQ shadow call stack). In subsequent patches we want to migrate the IRQ/FIQ triage logic to C, and as we want to perform some actions on the task stack (e.g. EL1 preemption), we need to switch stacks within the C handler. So that we can do so, this patch adds a helper to call a function on a CPU's IRQ stack (and shadow stack as appropriate). Subsequent patches will make use of the new helper function. Signed-off-by: Mark Rutland Acked-by: Catalin Marinas Acked-by: Marc Zyngier Cc: James Morse Cc: Will Deacon Link: https://lore.kernel.org/r/20210607094624.34689-7-mark.rutland@arm.com Signed-off-by: Will Deacon --- arch/arm64/include/asm/exception.h | 2 ++ arch/arm64/kernel/entry.S | 36 ++++++++++++++++++++++++++++++ 2 files changed, 38 insertions(+) diff --git a/arch/arm64/include/asm/exception.h b/arch/arm64/include/asm/exception.h index 3a859d4e8b59e7..c24b69c0c58998 100644 --- a/arch/arm64/include/asm/exception.h +++ b/arch/arm64/include/asm/exception.h @@ -40,6 +40,8 @@ asmlinkage void el0_error_compat_handler(struct pt_regs *regs); asmlinkage void noinstr enter_el1_irq_or_nmi(struct pt_regs *regs); asmlinkage void noinstr exit_el1_irq_or_nmi(struct pt_regs *regs); +asmlinkage void call_on_irq_stack(struct pt_regs *regs, + void (*func)(struct pt_regs *)); asmlinkage void enter_from_user_mode(void); asmlinkage void exit_to_user_mode(void); void arm64_enter_nmi(struct pt_regs *regs); diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S index 449628290ce8f5..8ca74ce115eed1 100644 --- a/arch/arm64/kernel/entry.S +++ b/arch/arm64/kernel/entry.S @@ -982,6 +982,42 @@ SYM_CODE_START(ret_from_fork) SYM_CODE_END(ret_from_fork) NOKPROBE(ret_from_fork) +/* + * void call_on_irq_stack(struct pt_regs *regs, + * void (*func)(struct pt_regs *)); + * + * Calls func(regs) using this CPU's irq stack and shadow irq stack. + */ +SYM_FUNC_START(call_on_irq_stack) +#ifdef CONFIG_SHADOW_CALL_STACK + stp scs_sp, xzr, [sp, #-16]! + ldr_this_cpu scs_sp, irq_shadow_call_stack_ptr, x17 +#endif + /* Create a frame record to save our LR and SP (implicit in FP) */ + stp x29, x30, [sp, #-16]! + mov x29, sp + + ldr_this_cpu x16, irq_stack_ptr, x17 + mov x15, #IRQ_STACK_SIZE + add x16, x16, x15 + + /* Move to the new stack and call the function there */ + mov sp, x16 + blr x1 + + /* + * Restore the SP from the FP, and restore the FP and LR from the frame + * record. + */ + mov sp, x29 + ldp x29, x30, [sp], #16 +#ifdef CONFIG_SHADOW_CALL_STACK + ldp scs_sp, xzr, [sp], #16 +#endif + ret +SYM_FUNC_END(call_on_irq_stack) +NOKPROBE(call_on_irq_stack) + #ifdef CONFIG_ARM_SDE_INTERFACE #include From 064dbfb4169141943ec7d9dbfd02974dd008f2ce Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Mon, 7 Jun 2021 10:46:11 +0100 Subject: [PATCH 07/20] arm64: entry: convert IRQ+FIQ handlers to C For various reasons we'd like to convert the bulk of arm64's exception triage logic to C. As a step towards that, this patch converts the EL1 and EL0 IRQ+FIQ triage logic to C. Separate C functions are added for the native and compat cases so that in subsequent patches we can handle native/compat differences in C. Since the triage functions can now call arm64_apply_bp_hardening() directly, the do_el0_irq_bp_hardening() wrapper function is removed. Since the user_exit_irqoff macro is now unused, it is removed. The user_enter_irqoff macro is still used by the ret_to_user code, and cannot be removed at this time. Signed-off-by: Mark Rutland Acked-by: Catalin Marinas Acked-by: Marc Zyngier Reviewed-by: Joey Gouly Cc: James Morse Cc: Will Deacon Link: https://lore.kernel.org/r/20210607094624.34689-8-mark.rutland@arm.com Signed-off-by: Will Deacon --- arch/arm64/include/asm/exception.h | 8 ++- arch/arm64/include/asm/processor.h | 2 - arch/arm64/kernel/entry-common.c | 93 ++++++++++++++++++++++++- arch/arm64/kernel/entry.S | 108 ++++------------------------- arch/arm64/mm/fault.c | 7 -- 5 files changed, 110 insertions(+), 108 deletions(-) diff --git a/arch/arm64/include/asm/exception.h b/arch/arm64/include/asm/exception.h index c24b69c0c58998..4284ee57a9a597 100644 --- a/arch/arm64/include/asm/exception.h +++ b/arch/arm64/include/asm/exception.h @@ -32,14 +32,18 @@ static inline u32 disr_to_esr(u64 disr) } asmlinkage void el1_sync_handler(struct pt_regs *regs); +asmlinkage void el1_irq_handler(struct pt_regs *regs); +asmlinkage void el1_fiq_handler(struct pt_regs *regs); asmlinkage void el1_error_handler(struct pt_regs *regs); asmlinkage void el0_sync_handler(struct pt_regs *regs); +asmlinkage void el0_irq_handler(struct pt_regs *regs); +asmlinkage void el0_fiq_handler(struct pt_regs *regs); asmlinkage void el0_error_handler(struct pt_regs *regs); asmlinkage void el0_sync_compat_handler(struct pt_regs *regs); +asmlinkage void el0_irq_compat_handler(struct pt_regs *regs); +asmlinkage void el0_fiq_compat_handler(struct pt_regs *regs); asmlinkage void el0_error_compat_handler(struct pt_regs *regs); -asmlinkage void noinstr enter_el1_irq_or_nmi(struct pt_regs *regs); -asmlinkage void noinstr exit_el1_irq_or_nmi(struct pt_regs *regs); asmlinkage void call_on_irq_stack(struct pt_regs *regs, void (*func)(struct pt_regs *)); asmlinkage void enter_from_user_mode(void); diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h index 9df3feeee8909b..2f21c76324bb71 100644 --- a/arch/arm64/include/asm/processor.h +++ b/arch/arm64/include/asm/processor.h @@ -257,8 +257,6 @@ void set_task_sctlr_el1(u64 sctlr); extern struct task_struct *cpu_switch_to(struct task_struct *prev, struct task_struct *next); -asmlinkage void arm64_preempt_schedule_irq(void); - #define task_pt_regs(p) \ ((struct pt_regs *)(THREAD_SIZE + task_stack_page(p)) - 1) diff --git a/arch/arm64/kernel/entry-common.c b/arch/arm64/kernel/entry-common.c index 08d17eb0ce13af..ae1b6d7c00e1b0 100644 --- a/arch/arm64/kernel/entry-common.c +++ b/arch/arm64/kernel/entry-common.c @@ -19,6 +19,8 @@ #include #include #include +#include +#include #include /* @@ -101,7 +103,7 @@ void noinstr arm64_exit_nmi(struct pt_regs *regs) __nmi_exit(); } -asmlinkage void noinstr enter_el1_irq_or_nmi(struct pt_regs *regs) +static void noinstr enter_el1_irq_or_nmi(struct pt_regs *regs) { if (IS_ENABLED(CONFIG_ARM64_PSEUDO_NMI) && !interrupts_enabled(regs)) arm64_enter_nmi(regs); @@ -109,7 +111,7 @@ asmlinkage void noinstr enter_el1_irq_or_nmi(struct pt_regs *regs) enter_from_kernel_mode(regs); } -asmlinkage void noinstr exit_el1_irq_or_nmi(struct pt_regs *regs) +static void noinstr exit_el1_irq_or_nmi(struct pt_regs *regs) { if (IS_ENABLED(CONFIG_ARM64_PSEUDO_NMI) && !interrupts_enabled(regs)) arm64_exit_nmi(regs); @@ -117,7 +119,7 @@ asmlinkage void noinstr exit_el1_irq_or_nmi(struct pt_regs *regs) exit_to_kernel_mode(regs); } -asmlinkage void __sched arm64_preempt_schedule_irq(void) +static void __sched arm64_preempt_schedule_irq(void) { lockdep_assert_irqs_disabled(); @@ -142,6 +144,18 @@ asmlinkage void __sched arm64_preempt_schedule_irq(void) preempt_schedule_irq(); } +static void do_interrupt_handler(struct pt_regs *regs, + void (*handler)(struct pt_regs *)) +{ + if (on_thread_stack()) + call_on_irq_stack(regs, handler); + else + handler(regs); +} + +extern void (*handle_arch_irq)(struct pt_regs *); +extern void (*handle_arch_fiq)(struct pt_regs *); + #ifdef CONFIG_ARM64_ERRATUM_1463225 static DEFINE_PER_CPU(int, __in_cortex_a76_erratum_1463225_wa); @@ -308,6 +322,36 @@ asmlinkage void noinstr el1_sync_handler(struct pt_regs *regs) } } +static void noinstr el1_interrupt(struct pt_regs *regs, + void (*handler)(struct pt_regs *)) +{ + write_sysreg(DAIF_PROCCTX_NOIRQ, daif); + + enter_el1_irq_or_nmi(regs); + do_interrupt_handler(regs, handler); + + /* + * Note: thread_info::preempt_count includes both thread_info::count + * and thread_info::need_resched, and is not equivalent to + * preempt_count(). + */ + if (IS_ENABLED(CONFIG_PREEMPTION) && + READ_ONCE(current_thread_info()->preempt_count) == 0) + arm64_preempt_schedule_irq(); + + exit_el1_irq_or_nmi(regs); +} + +asmlinkage void noinstr el1_irq_handler(struct pt_regs *regs) +{ + el1_interrupt(regs, handle_arch_irq); +} + +asmlinkage void noinstr el1_fiq_handler(struct pt_regs *regs) +{ + el1_interrupt(regs, handle_arch_fiq); +} + asmlinkage void noinstr el1_error_handler(struct pt_regs *regs) { unsigned long esr = read_sysreg(esr_el1); @@ -507,6 +551,39 @@ asmlinkage void noinstr el0_sync_handler(struct pt_regs *regs) } } +static void noinstr el0_interrupt(struct pt_regs *regs, + void (*handler)(struct pt_regs *)) +{ + enter_from_user_mode(); + + write_sysreg(DAIF_PROCCTX_NOIRQ, daif); + + if (regs->pc & BIT(55)) + arm64_apply_bp_hardening(); + + do_interrupt_handler(regs, handler); +} + +static void noinstr __el0_irq_handler_common(struct pt_regs *regs) +{ + el0_interrupt(regs, handle_arch_irq); +} + +asmlinkage void noinstr el0_irq_handler(struct pt_regs *regs) +{ + __el0_irq_handler_common(regs); +} + +static void noinstr __el0_fiq_handler_common(struct pt_regs *regs) +{ + el0_interrupt(regs, handle_arch_fiq); +} + +asmlinkage void noinstr el0_fiq_handler(struct pt_regs *regs) +{ + __el0_fiq_handler_common(regs); +} + static void __el0_error_handler_common(struct pt_regs *regs) { unsigned long esr = read_sysreg(esr_el1); @@ -583,6 +660,16 @@ asmlinkage void noinstr el0_sync_compat_handler(struct pt_regs *regs) } } +asmlinkage void noinstr el0_irq_compat_handler(struct pt_regs *regs) +{ + __el0_irq_handler_common(regs); +} + +asmlinkage void noinstr el0_fiq_compat_handler(struct pt_regs *regs) +{ + __el0_fiq_handler_common(regs); +} + asmlinkage void noinstr el0_error_compat_handler(struct pt_regs *regs) { __el0_error_handler_common(regs); diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S index 8ca74ce115eed1..8eb3a0a5141308 100644 --- a/arch/arm64/kernel/entry.S +++ b/arch/arm64/kernel/entry.S @@ -33,12 +33,6 @@ * Context tracking and irqflag tracing need to instrument transitions between * user and kernel mode. */ - .macro user_exit_irqoff -#if defined(CONFIG_CONTEXT_TRACKING) || defined(CONFIG_TRACE_IRQFLAGS) - bl enter_from_user_mode -#endif - .endm - .macro user_enter_irqoff #if defined(CONFIG_CONTEXT_TRACKING) || defined(CONFIG_TRACE_IRQFLAGS) bl exit_to_user_mode @@ -486,63 +480,12 @@ SYM_CODE_START_LOCAL(__swpan_exit_el0) SYM_CODE_END(__swpan_exit_el0) #endif - .macro irq_stack_entry - mov x19, sp // preserve the original sp -#ifdef CONFIG_SHADOW_CALL_STACK - mov x24, scs_sp // preserve the original shadow stack -#endif - - /* - * Compare sp with the base of the task stack. - * If the top ~(THREAD_SIZE - 1) bits match, we are on a task stack, - * and should switch to the irq stack. - */ - ldr x25, [tsk, TSK_STACK] - eor x25, x25, x19 - and x25, x25, #~(THREAD_SIZE - 1) - cbnz x25, 9998f - - ldr_this_cpu x25, irq_stack_ptr, x26 - mov x26, #IRQ_STACK_SIZE - add x26, x25, x26 - - /* switch to the irq stack */ - mov sp, x26 - -#ifdef CONFIG_SHADOW_CALL_STACK - /* also switch to the irq shadow stack */ - ldr_this_cpu scs_sp, irq_shadow_call_stack_ptr, x26 -#endif - -9998: - .endm - - /* - * The callee-saved regs (x19-x29) should be preserved between - * irq_stack_entry and irq_stack_exit, but note that kernel_entry - * uses x20-x23 to store data for later use. - */ - .macro irq_stack_exit - mov sp, x19 -#ifdef CONFIG_SHADOW_CALL_STACK - mov scs_sp, x24 -#endif - .endm - /* GPRs used by entry code */ tsk .req x28 // current thread_info /* * Interrupt handling. */ - .macro irq_handler, handler:req - ldr_l x1, \handler - mov x0, sp - irq_stack_entry - blr x1 - irq_stack_exit - .endm - .macro gic_prio_kentry_setup, tmp:req #ifdef CONFIG_ARM64_PSEUDO_NMI alternative_if ARM64_HAS_IRQ_PRIO_MASKING @@ -552,35 +495,6 @@ tsk .req x28 // current thread_info #endif .endm - .macro el1_interrupt_handler, handler:req - enable_da - - mov x0, sp - bl enter_el1_irq_or_nmi - - irq_handler \handler - -#ifdef CONFIG_PREEMPTION - ldr x24, [tsk, #TSK_TI_PREEMPT] // get preempt count - cbnz x24, 1f // preempt count != 0 - bl arm64_preempt_schedule_irq // irq en/disable is done inside -1: -#endif - - mov x0, sp - bl exit_el1_irq_or_nmi - .endm - - .macro el0_interrupt_handler, handler:req - user_exit_irqoff - enable_da - - tbz x22, #55, 1f - bl do_el0_irq_bp_hardening -1: - irq_handler \handler - .endm - .text /* @@ -704,13 +618,15 @@ SYM_CODE_END(el1_sync) .align 6 SYM_CODE_START_LOCAL_NOALIGN(el1_irq) kernel_entry 1 - el1_interrupt_handler handle_arch_irq + mov x0, sp + bl el1_irq_handler kernel_exit 1 SYM_CODE_END(el1_irq) SYM_CODE_START_LOCAL_NOALIGN(el1_fiq) kernel_entry 1 - el1_interrupt_handler handle_arch_fiq + mov x0, sp + bl el1_fiq_handler kernel_exit 1 SYM_CODE_END(el1_fiq) @@ -737,12 +653,16 @@ SYM_CODE_END(el0_sync_compat) .align 6 SYM_CODE_START_LOCAL_NOALIGN(el0_irq_compat) kernel_entry 0, 32 - b el0_irq_naked + mov x0, sp + bl el0_irq_compat_handler + b ret_to_user SYM_CODE_END(el0_irq_compat) SYM_CODE_START_LOCAL_NOALIGN(el0_fiq_compat) kernel_entry 0, 32 - b el0_fiq_naked + mov x0, sp + bl el0_fiq_compat_handler + b ret_to_user SYM_CODE_END(el0_fiq_compat) SYM_CODE_START_LOCAL_NOALIGN(el0_error_compat) @@ -756,15 +676,15 @@ SYM_CODE_END(el0_error_compat) .align 6 SYM_CODE_START_LOCAL_NOALIGN(el0_irq) kernel_entry 0 -el0_irq_naked: - el0_interrupt_handler handle_arch_irq + mov x0, sp + bl el0_irq_handler b ret_to_user SYM_CODE_END(el0_irq) SYM_CODE_START_LOCAL_NOALIGN(el0_fiq) kernel_entry 0 -el0_fiq_naked: - el0_interrupt_handler handle_arch_fiq + mov x0, sp + bl el0_fiq_handler b ret_to_user SYM_CODE_END(el0_fiq) diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c index 871c82ab0a3095..3b4a4adfddfd30 100644 --- a/arch/arm64/mm/fault.c +++ b/arch/arm64/mm/fault.c @@ -836,13 +836,6 @@ void do_mem_abort(unsigned long far, unsigned int esr, struct pt_regs *regs) } NOKPROBE_SYMBOL(do_mem_abort); -void do_el0_irq_bp_hardening(void) -{ - /* PC has already been checked in entry.S */ - arm64_apply_bp_hardening(); -} -NOKPROBE_SYMBOL(do_el0_irq_bp_hardening); - void do_sp_pc_abort(unsigned long addr, unsigned int esr, struct pt_regs *regs) { arm64_notify_die("SP/PC alignment exception", regs, SIGBUS, BUS_ADRALN, From 2f2bbaa4eda027d0bf0f3f23d0c206b2b76e2180 Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Mon, 7 Jun 2021 10:46:12 +0100 Subject: [PATCH 08/20] arm64: entry: organise entry handlers consistently In entry.S we have two comments which distinguish EL0 and EL1 exception handlers, but the code isn't actually laid out to match, and there are a few other inconsistencies that would be good to clear up. This patch organizes the entry handers consistently: * The handlers are laid out in order of the vectors, to make them easier to navigate. * The inconsistently-applied alignment is removed * The handlers are consistently marked with SYM_CODE_START_LOCAL() rather than SYM_CODE_START_LOCAL_NOALIGN(), giving them the same default alignment as other assembly code snippets. There should be no functional change as a result of this patch. Signed-off-by: Mark Rutland Acked-by: Catalin Marinas Acked-by: Marc Zyngier Reviewed-by: Joey Gouly Cc: James Morse Cc: Will Deacon Link: https://lore.kernel.org/r/20210607094624.34689-9-mark.rutland@arm.com Signed-off-by: Will Deacon --- arch/arm64/kernel/entry.S | 78 ++++++++++++++++++--------------------- 1 file changed, 36 insertions(+), 42 deletions(-) diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S index 8eb3a0a5141308..ed7c55d57afeb5 100644 --- a/arch/arm64/kernel/entry.S +++ b/arch/arm64/kernel/entry.S @@ -607,65 +607,88 @@ SYM_CODE_END(el1_error_invalid) /* * EL1 mode handlers. */ - .align 6 -SYM_CODE_START_LOCAL_NOALIGN(el1_sync) +SYM_CODE_START_LOCAL(el1_sync) kernel_entry 1 mov x0, sp bl el1_sync_handler kernel_exit 1 SYM_CODE_END(el1_sync) - .align 6 -SYM_CODE_START_LOCAL_NOALIGN(el1_irq) +SYM_CODE_START_LOCAL(el1_irq) kernel_entry 1 mov x0, sp bl el1_irq_handler kernel_exit 1 SYM_CODE_END(el1_irq) -SYM_CODE_START_LOCAL_NOALIGN(el1_fiq) +SYM_CODE_START_LOCAL(el1_fiq) kernel_entry 1 mov x0, sp bl el1_fiq_handler kernel_exit 1 SYM_CODE_END(el1_fiq) +SYM_CODE_START_LOCAL(el1_error) + kernel_entry 1 + mov x0, sp + bl el1_error_handler + kernel_exit 1 +SYM_CODE_END(el1_error) + /* * EL0 mode handlers. */ - .align 6 -SYM_CODE_START_LOCAL_NOALIGN(el0_sync) +SYM_CODE_START_LOCAL(el0_sync) kernel_entry 0 mov x0, sp bl el0_sync_handler b ret_to_user SYM_CODE_END(el0_sync) +SYM_CODE_START_LOCAL(el0_irq) + kernel_entry 0 + mov x0, sp + bl el0_irq_handler + b ret_to_user +SYM_CODE_END(el0_irq) + +SYM_CODE_START_LOCAL(el0_fiq) + kernel_entry 0 + mov x0, sp + bl el0_fiq_handler + b ret_to_user +SYM_CODE_END(el0_fiq) + +SYM_CODE_START_LOCAL(el0_error) + kernel_entry 0 + mov x0, sp + bl el0_error_handler + b ret_to_user +SYM_CODE_END(el0_error) + #ifdef CONFIG_COMPAT - .align 6 -SYM_CODE_START_LOCAL_NOALIGN(el0_sync_compat) +SYM_CODE_START_LOCAL(el0_sync_compat) kernel_entry 0, 32 mov x0, sp bl el0_sync_compat_handler b ret_to_user SYM_CODE_END(el0_sync_compat) - .align 6 -SYM_CODE_START_LOCAL_NOALIGN(el0_irq_compat) +SYM_CODE_START_LOCAL(el0_irq_compat) kernel_entry 0, 32 mov x0, sp bl el0_irq_compat_handler b ret_to_user SYM_CODE_END(el0_irq_compat) -SYM_CODE_START_LOCAL_NOALIGN(el0_fiq_compat) +SYM_CODE_START_LOCAL(el0_fiq_compat) kernel_entry 0, 32 mov x0, sp bl el0_fiq_compat_handler b ret_to_user SYM_CODE_END(el0_fiq_compat) -SYM_CODE_START_LOCAL_NOALIGN(el0_error_compat) +SYM_CODE_START_LOCAL(el0_error_compat) kernel_entry 0, 32 mov x0, sp bl el0_error_compat_handler @@ -673,35 +696,6 @@ SYM_CODE_START_LOCAL_NOALIGN(el0_error_compat) SYM_CODE_END(el0_error_compat) #endif - .align 6 -SYM_CODE_START_LOCAL_NOALIGN(el0_irq) - kernel_entry 0 - mov x0, sp - bl el0_irq_handler - b ret_to_user -SYM_CODE_END(el0_irq) - -SYM_CODE_START_LOCAL_NOALIGN(el0_fiq) - kernel_entry 0 - mov x0, sp - bl el0_fiq_handler - b ret_to_user -SYM_CODE_END(el0_fiq) - -SYM_CODE_START_LOCAL(el1_error) - kernel_entry 1 - mov x0, sp - bl el1_error_handler - kernel_exit 1 -SYM_CODE_END(el1_error) - -SYM_CODE_START_LOCAL(el0_error) - kernel_entry 0 - mov x0, sp - bl el0_error_handler - b ret_to_user -SYM_CODE_END(el0_error) - /* * "slow" syscall return path. */ From e931fa03c6bf525babc9a41b951eb2311b055abb Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Mon, 7 Jun 2021 10:46:13 +0100 Subject: [PATCH 09/20] arm64: entry: organise entry vectors consistently In subsequent patches we'll rename the entry handlers based on their original EL, register width, and exception class. To do so, we need to make all 3 mandatory arguments to the `kernel_ventry` macro, and distinguish EL1h from EL1t. In preparation for this, let's make the current set of arguments mandatory, and move the `regsize` column before the branch label suffix, making the vectors easier to read column-wise. There should be no functional change as a result of this patch. Signed-off-by: Mark Rutland Acked-by: Marc Zyngier Acked-by: Catalin Marinas Reviewed-by: Joey Gouly Cc: James Morse Cc: Will Deacon Link: https://lore.kernel.org/r/20210607094624.34689-10-mark.rutland@arm.com Signed-off-by: Will Deacon --- arch/arm64/kernel/entry.S | 42 +++++++++++++++++++-------------------- 1 file changed, 21 insertions(+), 21 deletions(-) diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S index ed7c55d57afeb5..e29d0fb7735892 100644 --- a/arch/arm64/kernel/entry.S +++ b/arch/arm64/kernel/entry.S @@ -54,7 +54,7 @@ #define BAD_FIQ 2 #define BAD_ERROR 3 - .macro kernel_ventry, el, label, regsize = 64 + .macro kernel_ventry, el:req, regsize:req, label:req .align 7 #ifdef CONFIG_UNMAP_KERNEL_AT_EL0 .if \el == 0 @@ -504,31 +504,31 @@ tsk .req x28 // current thread_info .align 11 SYM_CODE_START(vectors) - kernel_ventry 1, sync_invalid // Synchronous EL1t - kernel_ventry 1, irq_invalid // IRQ EL1t - kernel_ventry 1, fiq_invalid // FIQ EL1t - kernel_ventry 1, error_invalid // Error EL1t + kernel_ventry 1, 64, sync_invalid // Synchronous EL1t + kernel_ventry 1, 64, irq_invalid // IRQ EL1t + kernel_ventry 1, 64, fiq_invalid // FIQ EL1t + kernel_ventry 1, 64, error_invalid // Error EL1t - kernel_ventry 1, sync // Synchronous EL1h - kernel_ventry 1, irq // IRQ EL1h - kernel_ventry 1, fiq // FIQ EL1h - kernel_ventry 1, error // Error EL1h + kernel_ventry 1, 64, sync // Synchronous EL1h + kernel_ventry 1, 64, irq // IRQ EL1h + kernel_ventry 1, 64, fiq // FIQ EL1h + kernel_ventry 1, 64, error // Error EL1h - kernel_ventry 0, sync // Synchronous 64-bit EL0 - kernel_ventry 0, irq // IRQ 64-bit EL0 - kernel_ventry 0, fiq // FIQ 64-bit EL0 - kernel_ventry 0, error // Error 64-bit EL0 + kernel_ventry 0, 64, sync // Synchronous 64-bit EL0 + kernel_ventry 0, 64, irq // IRQ 64-bit EL0 + kernel_ventry 0, 64, fiq // FIQ 64-bit EL0 + kernel_ventry 0, 64, error // Error 64-bit EL0 #ifdef CONFIG_COMPAT - kernel_ventry 0, sync_compat, 32 // Synchronous 32-bit EL0 - kernel_ventry 0, irq_compat, 32 // IRQ 32-bit EL0 - kernel_ventry 0, fiq_compat, 32 // FIQ 32-bit EL0 - kernel_ventry 0, error_compat, 32 // Error 32-bit EL0 + kernel_ventry 0, 32, sync_compat // Synchronous 32-bit EL0 + kernel_ventry 0, 32, irq_compat // IRQ 32-bit EL0 + kernel_ventry 0, 32, fiq_compat // FIQ 32-bit EL0 + kernel_ventry 0, 32, error_compat // Error 32-bit EL0 #else - kernel_ventry 0, sync_invalid, 32 // Synchronous 32-bit EL0 - kernel_ventry 0, irq_invalid, 32 // IRQ 32-bit EL0 - kernel_ventry 0, fiq_invalid, 32 // FIQ 32-bit EL0 - kernel_ventry 0, error_invalid, 32 // Error 32-bit EL0 + kernel_ventry 0, 32, sync_invalid // Synchronous 32-bit EL0 + kernel_ventry 0, 32, irq_invalid // IRQ 32-bit EL0 + kernel_ventry 0, 32, fiq_invalid // FIQ 32-bit EL0 + kernel_ventry 0, 32, error_invalid // Error 32-bit EL0 #endif SYM_CODE_END(vectors) From af541cbbf9c646d2eaa8b3ee3836d5b16435e848 Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Mon, 7 Jun 2021 10:46:14 +0100 Subject: [PATCH 10/20] arm64: entry: consolidate EL1 exception returns Following the example of ret_to_user, let's consolidate all the EL1 return paths with a ret_to_kernel helper, rather than each entry point having its own copy of the return code. There should be no functional change as a result of this patch. Signed-off-by: Mark Rutland Acked-by: Catalin Marinas Acked-by: Marc Zyngier Reviewed-by: Joey Gouly Cc: James Morse Cc: Will Deacon Link: https://lore.kernel.org/r/20210607094624.34689-11-mark.rutland@arm.com Signed-off-by: Will Deacon --- arch/arm64/kernel/entry.S | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S index e29d0fb7735892..54986d488983ba 100644 --- a/arch/arm64/kernel/entry.S +++ b/arch/arm64/kernel/entry.S @@ -611,30 +611,34 @@ SYM_CODE_START_LOCAL(el1_sync) kernel_entry 1 mov x0, sp bl el1_sync_handler - kernel_exit 1 + b ret_to_kernel SYM_CODE_END(el1_sync) SYM_CODE_START_LOCAL(el1_irq) kernel_entry 1 mov x0, sp bl el1_irq_handler - kernel_exit 1 + b ret_to_kernel SYM_CODE_END(el1_irq) SYM_CODE_START_LOCAL(el1_fiq) kernel_entry 1 mov x0, sp bl el1_fiq_handler - kernel_exit 1 + b ret_to_kernel SYM_CODE_END(el1_fiq) SYM_CODE_START_LOCAL(el1_error) kernel_entry 1 mov x0, sp bl el1_error_handler - kernel_exit 1 + b ret_to_kernel SYM_CODE_END(el1_error) +SYM_CODE_START_LOCAL(ret_to_kernel) + kernel_exit 1 +SYM_CODE_END(ret_to_kernel) + /* * EL0 mode handlers. */ From cbed5f8d3feb5ecc84c998b81db7e004b3fb2135 Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Mon, 7 Jun 2021 10:46:15 +0100 Subject: [PATCH 11/20] arm64: entry: move bad_mode() to entry-common.c In subsequent patches we'll rework the way bad_mode() is called by exception entry code. In preparation for this, let's move bad_mode() itself into entry-common.c. Let's also mark it as noinstr (e.g. to prevent it being kprobed), and let's also make the `handler` array a local variable, as this is only use by bad_mode(), and will be removed entirely in a subsequent patch. There should be no functional change as a result of this patch. Signed-off-by: Mark Rutland Acked-by: Catalin Marinas Acked-by: Marc Zyngier Reviewed-by: Joey Gouly Cc: James Morse Cc: Will Deacon Link: https://lore.kernel.org/r/20210607094624.34689-12-mark.rutland@arm.com Signed-off-by: Will Deacon --- arch/arm64/kernel/entry-common.c | 27 +++++++++++++++++++++++++++ arch/arm64/kernel/traps.c | 25 ------------------------- 2 files changed, 27 insertions(+), 25 deletions(-) diff --git a/arch/arm64/kernel/entry-common.c b/arch/arm64/kernel/entry-common.c index ae1b6d7c00e1b0..74d09fd3dafafb 100644 --- a/arch/arm64/kernel/entry-common.c +++ b/arch/arm64/kernel/entry-common.c @@ -22,6 +22,7 @@ #include #include #include +#include /* * This is intended to match the logic in irqentry_enter(), handling the kernel @@ -156,6 +157,32 @@ static void do_interrupt_handler(struct pt_regs *regs, extern void (*handle_arch_irq)(struct pt_regs *); extern void (*handle_arch_fiq)(struct pt_regs *); +/* + * bad_mode handles the impossible case in the exception vector. This is always + * fatal. + */ +asmlinkage void noinstr bad_mode(struct pt_regs *regs, int reason, unsigned int esr) +{ + const char *handler[] = { + "Synchronous Abort", + "IRQ", + "FIQ", + "Error" + }; + + arm64_enter_nmi(regs); + + console_verbose(); + + pr_crit("Bad mode in %s handler detected on CPU%d, code 0x%08x -- %s\n", + handler[reason], smp_processor_id(), esr, + esr_get_class_string(esr)); + + __show_regs(regs); + panic("bad mode"); +} + + #ifdef CONFIG_ARM64_ERRATUM_1463225 static DEFINE_PER_CPU(int, __in_cortex_a76_erratum_1463225_wa); diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c index 5fd12d19ef4be1..7def18ff02e253 100644 --- a/arch/arm64/kernel/traps.c +++ b/arch/arm64/kernel/traps.c @@ -45,13 +45,6 @@ #include #include -static const char *handler[] = { - "Synchronous Abort", - "IRQ", - "FIQ", - "Error" -}; - int show_unhandled_signals = 0; static void dump_kernel_instr(const char *lvl, struct pt_regs *regs) @@ -750,24 +743,6 @@ const char *esr_get_class_string(u32 esr) return esr_class_str[ESR_ELx_EC(esr)]; } -/* - * bad_mode handles the impossible case in the exception vector. This is always - * fatal. - */ -asmlinkage void notrace bad_mode(struct pt_regs *regs, int reason, unsigned int esr) -{ - arm64_enter_nmi(regs); - - console_verbose(); - - pr_crit("Bad mode in %s handler detected on CPU%d, code 0x%08x -- %s\n", - handler[reason], smp_processor_id(), esr, - esr_get_class_string(esr)); - - __show_regs(regs); - panic("bad mode"); -} - /* * bad_el0_sync handles unexpected, but potentially recoverable synchronous * exceptions taken from EL0. Unlike bad_mode, this returns. From ca0c2647f54c34000b4026c6632268d2dc304c67 Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Mon, 7 Jun 2021 10:46:16 +0100 Subject: [PATCH 12/20] arm64: entry: improve bad_mode() Our use of bad_mode() has a few rough edges: * AArch64 doesn't use the term "mode", and refers to "Execution states", "Exception levels", and "Selected stack pointer". * We log the exception type (SYNC/IRQ/FIQ/SError), but not the actual "mode" (though this can be decoded from the SPSR value). * We use bad_mode() as a second-level handler for unexpected synchronous exceptions, where the "mode" is legitimate, but the specific exception is not. * We dump the ESR value, but call this "code", and so it's not clear to all readers that this is the ESR. ... and all of this can be somewhat opaque to those who aren't extremely familiar with the code. Let's make this a bit clearer by having bad_mode() log "Unhandled ${TYPE} exception" rather than "Bad mode in ${TYPE} handler", using "ESR" rather than "code", and having the final panic() log "Unhandled exception" rather than "Bad mode". In future we'd like to log the specific architectural vector rather than just the type of exception, so we also split the core of bad_mode() out into a helper called __panic_unhandled(), which takes the vector as a string argument. Signed-off-by: Mark Rutland Acked-by: Catalin Marinas Acked-by: Marc Zyngier Reviewed-by: Joey Gouly Cc: James Morse Cc: Will Deacon Link: https://lore.kernel.org/r/20210607094624.34689-13-mark.rutland@arm.com Signed-off-by: Will Deacon --- arch/arm64/kernel/entry-common.c | 31 ++++++++++++++++--------------- 1 file changed, 16 insertions(+), 15 deletions(-) diff --git a/arch/arm64/kernel/entry-common.c b/arch/arm64/kernel/entry-common.c index 74d09fd3dafafb..d0f9a639406756 100644 --- a/arch/arm64/kernel/entry-common.c +++ b/arch/arm64/kernel/entry-common.c @@ -157,31 +157,32 @@ static void do_interrupt_handler(struct pt_regs *regs, extern void (*handle_arch_irq)(struct pt_regs *); extern void (*handle_arch_fiq)(struct pt_regs *); -/* - * bad_mode handles the impossible case in the exception vector. This is always - * fatal. - */ -asmlinkage void noinstr bad_mode(struct pt_regs *regs, int reason, unsigned int esr) +static void noinstr __panic_unhandled(struct pt_regs *regs, const char *vector, + unsigned int esr) { - const char *handler[] = { - "Synchronous Abort", - "IRQ", - "FIQ", - "Error" - }; - arm64_enter_nmi(regs); console_verbose(); - pr_crit("Bad mode in %s handler detected on CPU%d, code 0x%08x -- %s\n", - handler[reason], smp_processor_id(), esr, + pr_crit("Unhandled %s exception on CPU%d, ESR 0x%08x -- %s\n", + vector, smp_processor_id(), esr, esr_get_class_string(esr)); __show_regs(regs); - panic("bad mode"); + panic("Unhandled exception"); } +asmlinkage void noinstr bad_mode(struct pt_regs *regs, int reason, unsigned int esr) +{ + const char *handler[] = { + "Synchronous Abort", + "IRQ", + "FIQ", + "Error" + }; + + __panic_unhandled(regs, handler[reason], esr); +} #ifdef CONFIG_ARM64_ERRATUM_1463225 static DEFINE_PER_CPU(int, __in_cortex_a76_erratum_1463225_wa); From a5b43a87a7609d49ed4a453a2b99b6d36ab1e5d0 Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Mon, 7 Jun 2021 10:46:17 +0100 Subject: [PATCH 13/20] arm64: entry: template the entry asm functions Now that the majority of the exception triage logic has been converted to C, the entry assembly functions all have a uniform structure. Let's generate them all with an assembly macro to reduce the amount of code and to ensure they all remain in sync if we make changes in future. There should be no functional change as a result of this patch. Signed-off-by: Mark Rutland Acked-by: Catalin Marinas Acked-by: Marc Zyngier Reviewed-by: Joey Gouly Cc: James Morse Cc: Will Deacon Link: https://lore.kernel.org/r/20210607094624.34689-14-mark.rutland@arm.com Signed-off-by: Will Deacon --- arch/arm64/kernel/entry.S | 113 +++++++++----------------------------- 1 file changed, 27 insertions(+), 86 deletions(-) diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S index 54986d488983ba..b719ac26f7d13b 100644 --- a/arch/arm64/kernel/entry.S +++ b/arch/arm64/kernel/entry.S @@ -604,102 +604,43 @@ SYM_CODE_START_LOCAL(el1_error_invalid) inv_entry 1, BAD_ERROR SYM_CODE_END(el1_error_invalid) -/* - * EL1 mode handlers. - */ -SYM_CODE_START_LOCAL(el1_sync) - kernel_entry 1 - mov x0, sp - bl el1_sync_handler - b ret_to_kernel -SYM_CODE_END(el1_sync) - -SYM_CODE_START_LOCAL(el1_irq) - kernel_entry 1 - mov x0, sp - bl el1_irq_handler - b ret_to_kernel -SYM_CODE_END(el1_irq) - -SYM_CODE_START_LOCAL(el1_fiq) - kernel_entry 1 - mov x0, sp - bl el1_fiq_handler - b ret_to_kernel -SYM_CODE_END(el1_fiq) - -SYM_CODE_START_LOCAL(el1_error) - kernel_entry 1 + .macro entry_handler el:req, regsize:req, label:req +SYM_CODE_START_LOCAL(el\el\()_\label) + kernel_entry \el, \regsize mov x0, sp - bl el1_error_handler + bl el\el\()_\label\()_handler + .if \el == 0 + b ret_to_user + .else b ret_to_kernel -SYM_CODE_END(el1_error) - -SYM_CODE_START_LOCAL(ret_to_kernel) - kernel_exit 1 -SYM_CODE_END(ret_to_kernel) + .endif +SYM_CODE_END(el\el\()_\label) + .endm /* - * EL0 mode handlers. + * Early exception handlers */ -SYM_CODE_START_LOCAL(el0_sync) - kernel_entry 0 - mov x0, sp - bl el0_sync_handler - b ret_to_user -SYM_CODE_END(el0_sync) + entry_handler 1, 64, sync + entry_handler 1, 64, irq + entry_handler 1, 64, fiq + entry_handler 1, 64, error -SYM_CODE_START_LOCAL(el0_irq) - kernel_entry 0 - mov x0, sp - bl el0_irq_handler - b ret_to_user -SYM_CODE_END(el0_irq) - -SYM_CODE_START_LOCAL(el0_fiq) - kernel_entry 0 - mov x0, sp - bl el0_fiq_handler - b ret_to_user -SYM_CODE_END(el0_fiq) - -SYM_CODE_START_LOCAL(el0_error) - kernel_entry 0 - mov x0, sp - bl el0_error_handler - b ret_to_user -SYM_CODE_END(el0_error) + entry_handler 0, 64, sync + entry_handler 0, 64, irq + entry_handler 0, 64, fiq + entry_handler 0, 64, error #ifdef CONFIG_COMPAT -SYM_CODE_START_LOCAL(el0_sync_compat) - kernel_entry 0, 32 - mov x0, sp - bl el0_sync_compat_handler - b ret_to_user -SYM_CODE_END(el0_sync_compat) - -SYM_CODE_START_LOCAL(el0_irq_compat) - kernel_entry 0, 32 - mov x0, sp - bl el0_irq_compat_handler - b ret_to_user -SYM_CODE_END(el0_irq_compat) - -SYM_CODE_START_LOCAL(el0_fiq_compat) - kernel_entry 0, 32 - mov x0, sp - bl el0_fiq_compat_handler - b ret_to_user -SYM_CODE_END(el0_fiq_compat) - -SYM_CODE_START_LOCAL(el0_error_compat) - kernel_entry 0, 32 - mov x0, sp - bl el0_error_compat_handler - b ret_to_user -SYM_CODE_END(el0_error_compat) + entry_handler 0, 32, sync_compat + entry_handler 0, 32, irq_compat + entry_handler 0, 32, fiq_compat + entry_handler 0, 32, error_compat #endif +SYM_CODE_START_LOCAL(ret_to_kernel) + kernel_exit 1 +SYM_CODE_END(ret_to_kernel) + /* * "slow" syscall return path. */ From ec841aab8d3cdd23decdcf0c47292e14627446c1 Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Mon, 7 Jun 2021 10:46:18 +0100 Subject: [PATCH 14/20] arm64: entry: handle all vectors with C We have 16 architectural exception vectors, and depending on kernel configuration we handle 8 or 12 of these with C code, with the remaining 8 or 4 of these handled as special cases in the entry assembly. It would be nicer if the entry assembly were uniform for all exceptions, and we deferred any specific handling of the exceptions to C code. This way the entry assembly can be more easily templated without ifdeffery or special cases, and it's easier to modify the handling of these cases in future (e.g. to dump additional registers other context). This patch reworks the entry code so that we always have a C handler for every architectural exception vector, with the entry assembly being completely uniform. We now have to handle exceptions from EL1t and EL1h, and also have to handle exceptions from AArch32 even when the kernel is built without CONFIG_COMPAT. To make this clear and to simplify templating, we rename the top-level exception handlers with a consistent naming scheme: asm: __ c: ___handler .. where: is `el1t`, `el1h`, or `el0t` is `64` or `32` is `sync`, `irq`, `fiq`, or `error` ... e.g. asm: el1h_64_sync c: el1h_64_sync_handler ... with lower-level handlers simply using "el1" and "compat" as today. For unexpected exceptions, this information is passed to __panic_unhandled(), so it can report the specific vector an unexpected exception was taken from, e.g. | Unhandled 64-bit el1t sync exception For vectors we never expect to enter legitimately, the C code is generated using a macro to avoid code duplication. The exceptions are handled via __panic_unhandled(), replacing bad_mode() (which is removed). The `kernel_ventry` and `entry_handler` assembly macros are updated to handle the new naming scheme. In theory it should be possible to generate the entry functions at the same time as the vectors using a single table, but this will require reworking the linker script to split the two into separate sections, so for now we have separate tables. Signed-off-by: Mark Rutland Acked-by: Catalin Marinas Acked-by: Marc Zyngier Reviewed-by: Joey Gouly Cc: James Morse Cc: Will Deacon Link: https://lore.kernel.org/r/20210607094624.34689-15-mark.rutland@arm.com Signed-off-by: Will Deacon --- arch/arm64/include/asm/exception.h | 32 ++++--- arch/arm64/kernel/entry-common.c | 51 +++++----- arch/arm64/kernel/entry.S | 146 +++++++++-------------------- arch/arm64/kernel/traps.c | 2 +- 4 files changed, 93 insertions(+), 138 deletions(-) diff --git a/arch/arm64/include/asm/exception.h b/arch/arm64/include/asm/exception.h index 4284ee57a9a597..ad30a5a1d2bfa0 100644 --- a/arch/arm64/include/asm/exception.h +++ b/arch/arm64/include/asm/exception.h @@ -31,18 +31,25 @@ static inline u32 disr_to_esr(u64 disr) return esr; } -asmlinkage void el1_sync_handler(struct pt_regs *regs); -asmlinkage void el1_irq_handler(struct pt_regs *regs); -asmlinkage void el1_fiq_handler(struct pt_regs *regs); -asmlinkage void el1_error_handler(struct pt_regs *regs); -asmlinkage void el0_sync_handler(struct pt_regs *regs); -asmlinkage void el0_irq_handler(struct pt_regs *regs); -asmlinkage void el0_fiq_handler(struct pt_regs *regs); -asmlinkage void el0_error_handler(struct pt_regs *regs); -asmlinkage void el0_sync_compat_handler(struct pt_regs *regs); -asmlinkage void el0_irq_compat_handler(struct pt_regs *regs); -asmlinkage void el0_fiq_compat_handler(struct pt_regs *regs); -asmlinkage void el0_error_compat_handler(struct pt_regs *regs); +asmlinkage void el1t_64_sync_handler(struct pt_regs *regs); +asmlinkage void el1t_64_irq_handler(struct pt_regs *regs); +asmlinkage void el1t_64_fiq_handler(struct pt_regs *regs); +asmlinkage void el1t_64_error_handler(struct pt_regs *regs); + +asmlinkage void el1h_64_sync_handler(struct pt_regs *regs); +asmlinkage void el1h_64_irq_handler(struct pt_regs *regs); +asmlinkage void el1h_64_fiq_handler(struct pt_regs *regs); +asmlinkage void el1h_64_error_handler(struct pt_regs *regs); + +asmlinkage void el0t_64_sync_handler(struct pt_regs *regs); +asmlinkage void el0t_64_irq_handler(struct pt_regs *regs); +asmlinkage void el0t_64_fiq_handler(struct pt_regs *regs); +asmlinkage void el0t_64_error_handler(struct pt_regs *regs); + +asmlinkage void el0t_32_sync_handler(struct pt_regs *regs); +asmlinkage void el0t_32_irq_handler(struct pt_regs *regs); +asmlinkage void el0t_32_fiq_handler(struct pt_regs *regs); +asmlinkage void el0t_32_error_handler(struct pt_regs *regs); asmlinkage void call_on_irq_stack(struct pt_regs *regs, void (*func)(struct pt_regs *)); @@ -53,7 +60,6 @@ void arm64_exit_nmi(struct pt_regs *regs); void do_mem_abort(unsigned long far, unsigned int esr, struct pt_regs *regs); void do_undefinstr(struct pt_regs *regs); void do_bti(struct pt_regs *regs); -asmlinkage void bad_mode(struct pt_regs *regs, int reason, unsigned int esr); void do_debug_exception(unsigned long addr_if_watchpoint, unsigned int esr, struct pt_regs *regs); void do_fpsimd_acc(unsigned int esr, struct pt_regs *regs); diff --git a/arch/arm64/kernel/entry-common.c b/arch/arm64/kernel/entry-common.c index d0f9a639406756..dd6403b748f2c1 100644 --- a/arch/arm64/kernel/entry-common.c +++ b/arch/arm64/kernel/entry-common.c @@ -172,16 +172,11 @@ static void noinstr __panic_unhandled(struct pt_regs *regs, const char *vector, panic("Unhandled exception"); } -asmlinkage void noinstr bad_mode(struct pt_regs *regs, int reason, unsigned int esr) -{ - const char *handler[] = { - "Synchronous Abort", - "IRQ", - "FIQ", - "Error" - }; - - __panic_unhandled(regs, handler[reason], esr); +#define UNHANDLED(el, regsize, vector) \ +asmlinkage void noinstr el##_##regsize##_##vector##_handler(struct pt_regs *regs) \ +{ \ + const char *desc = #regsize "-bit " #el " " #vector; \ + __panic_unhandled(regs, desc, read_sysreg(esr_el1)); \ } #ifdef CONFIG_ARM64_ERRATUM_1463225 @@ -233,6 +228,11 @@ static bool cortex_a76_erratum_1463225_debug_handler(struct pt_regs *regs) } #endif /* CONFIG_ARM64_ERRATUM_1463225 */ +UNHANDLED(el1t, 64, sync) +UNHANDLED(el1t, 64, irq) +UNHANDLED(el1t, 64, fiq) +UNHANDLED(el1t, 64, error) + static void noinstr el1_abort(struct pt_regs *regs, unsigned long esr) { unsigned long far = read_sysreg(far_el1); @@ -268,7 +268,7 @@ static void noinstr el1_inv(struct pt_regs *regs, unsigned long esr) { enter_from_kernel_mode(regs); local_daif_inherit(regs); - bad_mode(regs, 0, esr); + __panic_unhandled(regs, "64-bit el1h sync", esr); local_daif_mask(); exit_to_kernel_mode(regs); } @@ -316,7 +316,7 @@ static void noinstr el1_fpac(struct pt_regs *regs, unsigned long esr) exit_to_kernel_mode(regs); } -asmlinkage void noinstr el1_sync_handler(struct pt_regs *regs) +asmlinkage void noinstr el1h_64_sync_handler(struct pt_regs *regs) { unsigned long esr = read_sysreg(esr_el1); @@ -370,17 +370,17 @@ static void noinstr el1_interrupt(struct pt_regs *regs, exit_el1_irq_or_nmi(regs); } -asmlinkage void noinstr el1_irq_handler(struct pt_regs *regs) +asmlinkage void noinstr el1h_64_irq_handler(struct pt_regs *regs) { el1_interrupt(regs, handle_arch_irq); } -asmlinkage void noinstr el1_fiq_handler(struct pt_regs *regs) +asmlinkage void noinstr el1h_64_fiq_handler(struct pt_regs *regs) { el1_interrupt(regs, handle_arch_fiq); } -asmlinkage void noinstr el1_error_handler(struct pt_regs *regs) +asmlinkage void noinstr el1h_64_error_handler(struct pt_regs *regs) { unsigned long esr = read_sysreg(esr_el1); @@ -526,7 +526,7 @@ static void noinstr el0_fpac(struct pt_regs *regs, unsigned long esr) do_ptrauth_fault(regs, esr); } -asmlinkage void noinstr el0_sync_handler(struct pt_regs *regs) +asmlinkage void noinstr el0t_64_sync_handler(struct pt_regs *regs) { unsigned long esr = read_sysreg(esr_el1); @@ -597,7 +597,7 @@ static void noinstr __el0_irq_handler_common(struct pt_regs *regs) el0_interrupt(regs, handle_arch_irq); } -asmlinkage void noinstr el0_irq_handler(struct pt_regs *regs) +asmlinkage void noinstr el0t_64_irq_handler(struct pt_regs *regs) { __el0_irq_handler_common(regs); } @@ -607,7 +607,7 @@ static void noinstr __el0_fiq_handler_common(struct pt_regs *regs) el0_interrupt(regs, handle_arch_fiq); } -asmlinkage void noinstr el0_fiq_handler(struct pt_regs *regs) +asmlinkage void noinstr el0t_64_fiq_handler(struct pt_regs *regs) { __el0_fiq_handler_common(regs); } @@ -624,7 +624,7 @@ static void __el0_error_handler_common(struct pt_regs *regs) local_daif_restore(DAIF_PROCCTX); } -asmlinkage void noinstr el0_error_handler(struct pt_regs *regs) +asmlinkage void noinstr el0t_64_error_handler(struct pt_regs *regs) { __el0_error_handler_common(regs); } @@ -644,7 +644,7 @@ static void noinstr el0_svc_compat(struct pt_regs *regs) do_el0_svc_compat(regs); } -asmlinkage void noinstr el0_sync_compat_handler(struct pt_regs *regs) +asmlinkage void noinstr el0t_32_sync_handler(struct pt_regs *regs) { unsigned long esr = read_sysreg(esr_el1); @@ -688,18 +688,23 @@ asmlinkage void noinstr el0_sync_compat_handler(struct pt_regs *regs) } } -asmlinkage void noinstr el0_irq_compat_handler(struct pt_regs *regs) +asmlinkage void noinstr el0t_32_irq_handler(struct pt_regs *regs) { __el0_irq_handler_common(regs); } -asmlinkage void noinstr el0_fiq_compat_handler(struct pt_regs *regs) +asmlinkage void noinstr el0t_32_fiq_handler(struct pt_regs *regs) { __el0_fiq_handler_common(regs); } -asmlinkage void noinstr el0_error_compat_handler(struct pt_regs *regs) +asmlinkage void noinstr el0t_32_error_handler(struct pt_regs *regs) { __el0_error_handler_common(regs); } +#else /* CONFIG_COMPAT */ +UNHANDLED(el0t, 32, sync) +UNHANDLED(el0t, 32, irq) +UNHANDLED(el0t, 32, fiq) +UNHANDLED(el0t, 32, error) #endif /* CONFIG_COMPAT */ diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S index b719ac26f7d13b..d43a12dfd189ef 100644 --- a/arch/arm64/kernel/entry.S +++ b/arch/arm64/kernel/entry.S @@ -45,16 +45,7 @@ .endr .endm -/* - * Bad Abort numbers - *----------------- - */ -#define BAD_SYNC 0 -#define BAD_IRQ 1 -#define BAD_FIQ 2 -#define BAD_ERROR 3 - - .macro kernel_ventry, el:req, regsize:req, label:req + .macro kernel_ventry, el:req, ht:req, regsize:req, label:req .align 7 #ifdef CONFIG_UNMAP_KERNEL_AT_EL0 .if \el == 0 @@ -81,7 +72,7 @@ alternative_else_nop_endif tbnz x0, #THREAD_SHIFT, 0f sub x0, sp, x0 // x0'' = sp' - x0' = (sp + x0) - sp = x0 sub sp, sp, x0 // sp'' = sp' - x0 = (sp + x0) - x0 = sp - b el\()\el\()_\label + b el\el\ht\()_\regsize\()_\label 0: /* @@ -113,7 +104,7 @@ alternative_else_nop_endif sub sp, sp, x0 mrs x0, tpidrro_el0 #endif - b el\()\el\()_\label + b el\el\ht\()_\regsize\()_\label .endm .macro tramp_alias, dst, sym @@ -504,32 +495,25 @@ tsk .req x28 // current thread_info .align 11 SYM_CODE_START(vectors) - kernel_ventry 1, 64, sync_invalid // Synchronous EL1t - kernel_ventry 1, 64, irq_invalid // IRQ EL1t - kernel_ventry 1, 64, fiq_invalid // FIQ EL1t - kernel_ventry 1, 64, error_invalid // Error EL1t - - kernel_ventry 1, 64, sync // Synchronous EL1h - kernel_ventry 1, 64, irq // IRQ EL1h - kernel_ventry 1, 64, fiq // FIQ EL1h - kernel_ventry 1, 64, error // Error EL1h - - kernel_ventry 0, 64, sync // Synchronous 64-bit EL0 - kernel_ventry 0, 64, irq // IRQ 64-bit EL0 - kernel_ventry 0, 64, fiq // FIQ 64-bit EL0 - kernel_ventry 0, 64, error // Error 64-bit EL0 - -#ifdef CONFIG_COMPAT - kernel_ventry 0, 32, sync_compat // Synchronous 32-bit EL0 - kernel_ventry 0, 32, irq_compat // IRQ 32-bit EL0 - kernel_ventry 0, 32, fiq_compat // FIQ 32-bit EL0 - kernel_ventry 0, 32, error_compat // Error 32-bit EL0 -#else - kernel_ventry 0, 32, sync_invalid // Synchronous 32-bit EL0 - kernel_ventry 0, 32, irq_invalid // IRQ 32-bit EL0 - kernel_ventry 0, 32, fiq_invalid // FIQ 32-bit EL0 - kernel_ventry 0, 32, error_invalid // Error 32-bit EL0 -#endif + kernel_ventry 1, t, 64, sync // Synchronous EL1t + kernel_ventry 1, t, 64, irq // IRQ EL1t + kernel_ventry 1, t, 64, fiq // FIQ EL1h + kernel_ventry 1, t, 64, error // Error EL1t + + kernel_ventry 1, h, 64, sync // Synchronous EL1h + kernel_ventry 1, h, 64, irq // IRQ EL1h + kernel_ventry 1, h, 64, fiq // FIQ EL1h + kernel_ventry 1, h, 64, error // Error EL1h + + kernel_ventry 0, t, 64, sync // Synchronous 64-bit EL0 + kernel_ventry 0, t, 64, irq // IRQ 64-bit EL0 + kernel_ventry 0, t, 64, fiq // FIQ 64-bit EL0 + kernel_ventry 0, t, 64, error // Error 64-bit EL0 + + kernel_ventry 0, t, 32, sync // Synchronous 32-bit EL0 + kernel_ventry 0, t, 32, irq // IRQ 32-bit EL0 + kernel_ventry 0, t, 32, fiq // FIQ 32-bit EL0 + kernel_ventry 0, t, 32, error // Error 32-bit EL0 SYM_CODE_END(vectors) #ifdef CONFIG_VMAP_STACK @@ -560,82 +544,42 @@ __bad_stack: ASM_BUG() #endif /* CONFIG_VMAP_STACK */ -/* - * Invalid mode handlers - */ - .macro inv_entry, el, reason, regsize = 64 - kernel_entry \el, \regsize - mov x0, sp - mov x1, #\reason - mrs x2, esr_el1 - bl bad_mode - ASM_BUG() - .endm - -SYM_CODE_START_LOCAL(el0_sync_invalid) - inv_entry 0, BAD_SYNC -SYM_CODE_END(el0_sync_invalid) - -SYM_CODE_START_LOCAL(el0_irq_invalid) - inv_entry 0, BAD_IRQ -SYM_CODE_END(el0_irq_invalid) - -SYM_CODE_START_LOCAL(el0_fiq_invalid) - inv_entry 0, BAD_FIQ -SYM_CODE_END(el0_fiq_invalid) - -SYM_CODE_START_LOCAL(el0_error_invalid) - inv_entry 0, BAD_ERROR -SYM_CODE_END(el0_error_invalid) -SYM_CODE_START_LOCAL(el1_sync_invalid) - inv_entry 1, BAD_SYNC -SYM_CODE_END(el1_sync_invalid) - -SYM_CODE_START_LOCAL(el1_irq_invalid) - inv_entry 1, BAD_IRQ -SYM_CODE_END(el1_irq_invalid) - -SYM_CODE_START_LOCAL(el1_fiq_invalid) - inv_entry 1, BAD_FIQ -SYM_CODE_END(el1_fiq_invalid) - -SYM_CODE_START_LOCAL(el1_error_invalid) - inv_entry 1, BAD_ERROR -SYM_CODE_END(el1_error_invalid) - - .macro entry_handler el:req, regsize:req, label:req -SYM_CODE_START_LOCAL(el\el\()_\label) + .macro entry_handler el:req, ht:req, regsize:req, label:req +SYM_CODE_START_LOCAL(el\el\ht\()_\regsize\()_\label) kernel_entry \el, \regsize mov x0, sp - bl el\el\()_\label\()_handler + bl el\el\ht\()_\regsize\()_\label\()_handler .if \el == 0 b ret_to_user .else b ret_to_kernel .endif -SYM_CODE_END(el\el\()_\label) +SYM_CODE_END(el\el\ht\()_\regsize\()_\label) .endm /* * Early exception handlers */ - entry_handler 1, 64, sync - entry_handler 1, 64, irq - entry_handler 1, 64, fiq - entry_handler 1, 64, error - - entry_handler 0, 64, sync - entry_handler 0, 64, irq - entry_handler 0, 64, fiq - entry_handler 0, 64, error - -#ifdef CONFIG_COMPAT - entry_handler 0, 32, sync_compat - entry_handler 0, 32, irq_compat - entry_handler 0, 32, fiq_compat - entry_handler 0, 32, error_compat -#endif + entry_handler 1, t, 64, sync + entry_handler 1, t, 64, irq + entry_handler 1, t, 64, fiq + entry_handler 1, t, 64, error + + entry_handler 1, h, 64, sync + entry_handler 1, h, 64, irq + entry_handler 1, h, 64, fiq + entry_handler 1, h, 64, error + + entry_handler 0, t, 64, sync + entry_handler 0, t, 64, irq + entry_handler 0, t, 64, fiq + entry_handler 0, t, 64, error + + entry_handler 0, t, 32, sync + entry_handler 0, t, 32, irq + entry_handler 0, t, 32, fiq + entry_handler 0, t, 32, error SYM_CODE_START_LOCAL(ret_to_kernel) kernel_exit 1 diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c index 7def18ff02e253..47d423f7ac81f6 100644 --- a/arch/arm64/kernel/traps.c +++ b/arch/arm64/kernel/traps.c @@ -745,7 +745,7 @@ const char *esr_get_class_string(u32 esr) /* * bad_el0_sync handles unexpected, but potentially recoverable synchronous - * exceptions taken from EL0. Unlike bad_mode, this returns. + * exceptions taken from EL0. */ void bad_el0_sync(struct pt_regs *regs, int reason, unsigned int esr) { From afd05e28c9115d01f01d934962634789d069d3fe Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Mon, 7 Jun 2021 10:46:19 +0100 Subject: [PATCH 15/20] arm64: entry: fold el1_inv() into el1h_64_sync_handler() An unexpected synchronous exception from EL1h could happen at any time, and for robustness we should treat this as an NMI, making minimal assumptions about the context the exception was taken from. Currently el1_inv() assumes we can use enter_from_kernel_mode(), and also assumes that we should inherit the original DAIF value. Neither of these are desireable when we take an unexpected exception. Further, after el1_inv() calls __panic_unhandled(), the remainder of the function is unreachable, and therefore superfluous. Let's address this and simplify things by having el1h_64_sync_handler() call __panic_unhandled() directly, without any of the redundant logic. Signed-off-by: Mark Rutland Acked-by: Catalin Marinas Acked-by: Marc Zyngier Reported-by: Joey Gouly Cc: James Morse Cc: Will Deacon Link: https://lore.kernel.org/r/20210607094624.34689-16-mark.rutland@arm.com Signed-off-by: Will Deacon --- arch/arm64/kernel/entry-common.c | 11 +---------- 1 file changed, 1 insertion(+), 10 deletions(-) diff --git a/arch/arm64/kernel/entry-common.c b/arch/arm64/kernel/entry-common.c index dd6403b748f2c1..ce5c8af91d311f 100644 --- a/arch/arm64/kernel/entry-common.c +++ b/arch/arm64/kernel/entry-common.c @@ -264,15 +264,6 @@ static void noinstr el1_undef(struct pt_regs *regs) exit_to_kernel_mode(regs); } -static void noinstr el1_inv(struct pt_regs *regs, unsigned long esr) -{ - enter_from_kernel_mode(regs); - local_daif_inherit(regs); - __panic_unhandled(regs, "64-bit el1h sync", esr); - local_daif_mask(); - exit_to_kernel_mode(regs); -} - static void noinstr arm64_enter_el1_dbg(struct pt_regs *regs) { regs->lockdep_hardirqs = lockdep_hardirqs_enabled(); @@ -346,7 +337,7 @@ asmlinkage void noinstr el1h_64_sync_handler(struct pt_regs *regs) el1_fpac(regs, esr); break; default: - el1_inv(regs, esr); + __panic_unhandled(regs, "64-bit el1h sync", esr); } } From 8168f098867f6584295ea408c683f61e945c6ff1 Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Mon, 7 Jun 2021 10:46:20 +0100 Subject: [PATCH 16/20] arm64: entry: split bad stack entry We'd like to keep all the entry sequencing in entry-common.c, as this will allow us to ensure this is consistent, and free from any unsound instrumentation. Currently handle_bad_stack() performs the NMI entry sequence in traps.c. Let's split the low-level entry sequence from the reporting, moving the former to entry-common.c and keeping the latter in traps.c. To make it clear that reporting function never returns, it is renamed to panic_bad_stack(). Signed-off-by: Mark Rutland Acked-by: Catalin Marinas Acked-by: Marc Zyngier Reviewed-by: Joey Gouly Cc: James Morse Cc: Will Deacon Link: https://lore.kernel.org/r/20210607094624.34689-17-mark.rutland@arm.com Signed-off-by: Will Deacon --- arch/arm64/include/asm/exception.h | 4 ++++ arch/arm64/kernel/entry-common.c | 11 +++++++++++ arch/arm64/kernel/traps.c | 6 +----- 3 files changed, 16 insertions(+), 5 deletions(-) diff --git a/arch/arm64/include/asm/exception.h b/arch/arm64/include/asm/exception.h index ad30a5a1d2bfa0..0113b9242b67e7 100644 --- a/arch/arm64/include/asm/exception.h +++ b/arch/arm64/include/asm/exception.h @@ -31,6 +31,8 @@ static inline u32 disr_to_esr(u64 disr) return esr; } +asmlinkage void handle_bad_stack(struct pt_regs *regs); + asmlinkage void el1t_64_sync_handler(struct pt_regs *regs); asmlinkage void el1t_64_irq_handler(struct pt_regs *regs); asmlinkage void el1t_64_fiq_handler(struct pt_regs *regs); @@ -73,4 +75,6 @@ void do_el0_svc(struct pt_regs *regs); void do_el0_svc_compat(struct pt_regs *regs); void do_ptrauth_fault(struct pt_regs *regs, unsigned int esr); void do_serror(struct pt_regs *regs, unsigned int esr); + +void panic_bad_stack(struct pt_regs *regs, unsigned int esr, unsigned long far); #endif /* __ASM_EXCEPTION_H */ diff --git a/arch/arm64/kernel/entry-common.c b/arch/arm64/kernel/entry-common.c index ce5c8af91d311f..efe95edf10c0ce 100644 --- a/arch/arm64/kernel/entry-common.c +++ b/arch/arm64/kernel/entry-common.c @@ -699,3 +699,14 @@ UNHANDLED(el0t, 32, irq) UNHANDLED(el0t, 32, fiq) UNHANDLED(el0t, 32, error) #endif /* CONFIG_COMPAT */ + +#ifdef CONFIG_VMAP_STACK +asmlinkage void noinstr handle_bad_stack(struct pt_regs *regs) +{ + unsigned int esr = read_sysreg(esr_el1); + unsigned long far = read_sysreg(far_el1); + + arm64_enter_nmi(regs); + panic_bad_stack(regs, esr, far); +} +#endif /* CONFIG_VMAP_STACK */ diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c index 47d423f7ac81f6..af941996eb5f8d 100644 --- a/arch/arm64/kernel/traps.c +++ b/arch/arm64/kernel/traps.c @@ -763,15 +763,11 @@ void bad_el0_sync(struct pt_regs *regs, int reason, unsigned int esr) DEFINE_PER_CPU(unsigned long [OVERFLOW_STACK_SIZE/sizeof(long)], overflow_stack) __aligned(16); -asmlinkage void noinstr handle_bad_stack(struct pt_regs *regs) +void panic_bad_stack(struct pt_regs *regs, unsigned int esr, unsigned long far) { unsigned long tsk_stk = (unsigned long)current->stack; unsigned long irq_stk = (unsigned long)this_cpu_read(irq_stack_ptr); unsigned long ovf_stk = (unsigned long)this_cpu_ptr(overflow_stack); - unsigned int esr = read_sysreg(esr_el1); - unsigned long far = read_sysreg(far_el1); - - arm64_enter_nmi(regs); console_verbose(); pr_emerg("Insufficient stack space to handle exception!"); From d60b228fd19985a903b8e8c599be0538a875d505 Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Mon, 7 Jun 2021 10:46:21 +0100 Subject: [PATCH 17/20] arm64: entry: split SDEI entry We'd like to keep all the entry sequencing in entry-common.c, as this will allow us to ensure this is consistent, and free from any unsound instrumentation. Currently __sdei_handler() performs the NMI entry/exit sequences in sdei.c. Let's split the low-level entry sequence from the event handling, moving the former to entry-common.c and keeping the latter in sdei.c. The event handling function is renamed to do_sdei_event(), matching the do_${FOO}() pattern used for other exception handlers. Signed-off-by: Mark Rutland Acked-by: Catalin Marinas Acked-by: Marc Zyngier Reviewed-by: Joey Gouly Cc: James Morse Cc: Will Deacon Link: https://lore.kernel.org/r/20210607094624.34689-18-mark.rutland@arm.com Signed-off-by: Will Deacon --- arch/arm64/include/asm/sdei.h | 3 ++ arch/arm64/kernel/entry-common.c | 37 ++++++++++++++++++++++++ arch/arm64/kernel/sdei.c | 48 ++------------------------------ 3 files changed, 43 insertions(+), 45 deletions(-) diff --git a/arch/arm64/include/asm/sdei.h b/arch/arm64/include/asm/sdei.h index 63e0b92a5fbb06..03d619a49d4a49 100644 --- a/arch/arm64/include/asm/sdei.h +++ b/arch/arm64/include/asm/sdei.h @@ -37,6 +37,9 @@ struct sdei_registered_event; asmlinkage unsigned long __sdei_handler(struct pt_regs *regs, struct sdei_registered_event *arg); +unsigned long do_sdei_event(struct pt_regs *regs, + struct sdei_registered_event *arg); + unsigned long sdei_arch_get_entry_point(int conduit); #define sdei_arch_get_entry_point(x) sdei_arch_get_entry_point(x) diff --git a/arch/arm64/kernel/entry-common.c b/arch/arm64/kernel/entry-common.c index efe95edf10c0ce..1b32ca3848f535 100644 --- a/arch/arm64/kernel/entry-common.c +++ b/arch/arm64/kernel/entry-common.c @@ -20,6 +20,7 @@ #include #include #include +#include #include #include #include @@ -710,3 +711,39 @@ asmlinkage void noinstr handle_bad_stack(struct pt_regs *regs) panic_bad_stack(regs, esr, far); } #endif /* CONFIG_VMAP_STACK */ + +#ifdef CONFIG_ARM_SDE_INTERFACE +asmlinkage noinstr unsigned long +__sdei_handler(struct pt_regs *regs, struct sdei_registered_event *arg) +{ + unsigned long ret; + + /* + * We didn't take an exception to get here, so the HW hasn't + * set/cleared bits in PSTATE that we may rely on. + * + * The original SDEI spec (ARM DEN 0054A) can be read ambiguously as to + * whether PSTATE bits are inherited unchanged or generated from + * scratch, and the TF-A implementation always clears PAN and always + * clears UAO. There are no other known implementations. + * + * Subsequent revisions (ARM DEN 0054B) follow the usual rules for how + * PSTATE is modified upon architectural exceptions, and so PAN is + * either inherited or set per SCTLR_ELx.SPAN, and UAO is always + * cleared. + * + * We must explicitly reset PAN to the expected state, including + * clearing it when the host isn't using it, in case a VM had it set. + */ + if (system_uses_hw_pan()) + set_pstate_pan(1); + else if (cpu_has_pan()) + set_pstate_pan(0); + + arm64_enter_nmi(regs); + ret = do_sdei_event(regs, arg); + arm64_exit_nmi(regs); + + return ret; +} +#endif /* CONFIG_ARM_SDE_INTERFACE */ diff --git a/arch/arm64/kernel/sdei.c b/arch/arm64/kernel/sdei.c index 2c7ca449dd5111..e7295399274333 100644 --- a/arch/arm64/kernel/sdei.c +++ b/arch/arm64/kernel/sdei.c @@ -231,13 +231,13 @@ unsigned long sdei_arch_get_entry_point(int conduit) } /* - * __sdei_handler() returns one of: + * do_sdei_event() returns one of: * SDEI_EV_HANDLED - success, return to the interrupted context. * SDEI_EV_FAILED - failure, return this error code to firmare. * virtual-address - success, return to this address. */ -static __kprobes unsigned long _sdei_handler(struct pt_regs *regs, - struct sdei_registered_event *arg) +unsigned long __kprobes do_sdei_event(struct pt_regs *regs, + struct sdei_registered_event *arg) { u32 mode; int i, err = 0; @@ -292,45 +292,3 @@ static __kprobes unsigned long _sdei_handler(struct pt_regs *regs, return vbar + 0x480; } - -static void __kprobes notrace __sdei_pstate_entry(void) -{ - /* - * The original SDEI spec (ARM DEN 0054A) can be read ambiguously as to - * whether PSTATE bits are inherited unchanged or generated from - * scratch, and the TF-A implementation always clears PAN and always - * clears UAO. There are no other known implementations. - * - * Subsequent revisions (ARM DEN 0054B) follow the usual rules for how - * PSTATE is modified upon architectural exceptions, and so PAN is - * either inherited or set per SCTLR_ELx.SPAN, and UAO is always - * cleared. - * - * We must explicitly reset PAN to the expected state, including - * clearing it when the host isn't using it, in case a VM had it set. - */ - if (system_uses_hw_pan()) - set_pstate_pan(1); - else if (cpu_has_pan()) - set_pstate_pan(0); -} - -asmlinkage noinstr unsigned long -__sdei_handler(struct pt_regs *regs, struct sdei_registered_event *arg) -{ - unsigned long ret; - - /* - * We didn't take an exception to get here, so the HW hasn't - * set/cleared bits in PSTATE that we may rely on. Initialize PAN. - */ - __sdei_pstate_entry(); - - arm64_enter_nmi(regs); - - ret = _sdei_handler(regs, arg); - - arm64_exit_nmi(regs); - - return ret; -} From 6ecbc78c3d06a3e7a4676f348a52f1c533d88464 Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Mon, 7 Jun 2021 10:46:22 +0100 Subject: [PATCH 18/20] arm64: entry: make NMI entry/exit functions static Now that we only call arm64_enter_nmi() and arm64_exit_nmi() from within entry-common.c, let's make these static to ensure this remains the case. Signed-off-by: Mark Rutland Acked-by: Catalin Marinas Acked-by: Marc Zyngier Reviewed-by: Joey Gouly Cc: James Morse Cc: Will Deacon Link: https://lore.kernel.org/r/20210607094624.34689-19-mark.rutland@arm.com Signed-off-by: Will Deacon --- arch/arm64/include/asm/exception.h | 2 -- arch/arm64/kernel/entry-common.c | 4 ++-- 2 files changed, 2 insertions(+), 4 deletions(-) diff --git a/arch/arm64/include/asm/exception.h b/arch/arm64/include/asm/exception.h index 0113b9242b67e7..4afbc45b8bb0e7 100644 --- a/arch/arm64/include/asm/exception.h +++ b/arch/arm64/include/asm/exception.h @@ -57,8 +57,6 @@ asmlinkage void call_on_irq_stack(struct pt_regs *regs, void (*func)(struct pt_regs *)); asmlinkage void enter_from_user_mode(void); asmlinkage void exit_to_user_mode(void); -void arm64_enter_nmi(struct pt_regs *regs); -void arm64_exit_nmi(struct pt_regs *regs); void do_mem_abort(unsigned long far, unsigned int esr, struct pt_regs *regs); void do_undefinstr(struct pt_regs *regs); void do_bti(struct pt_regs *regs); diff --git a/arch/arm64/kernel/entry-common.c b/arch/arm64/kernel/entry-common.c index 1b32ca3848f535..12ce14a98b7c4f 100644 --- a/arch/arm64/kernel/entry-common.c +++ b/arch/arm64/kernel/entry-common.c @@ -75,7 +75,7 @@ static void noinstr exit_to_kernel_mode(struct pt_regs *regs) } } -void noinstr arm64_enter_nmi(struct pt_regs *regs) +static void noinstr arm64_enter_nmi(struct pt_regs *regs) { regs->lockdep_hardirqs = lockdep_hardirqs_enabled(); @@ -88,7 +88,7 @@ void noinstr arm64_enter_nmi(struct pt_regs *regs) ftrace_nmi_enter(); } -void noinstr arm64_exit_nmi(struct pt_regs *regs) +static void noinstr arm64_exit_nmi(struct pt_regs *regs) { bool restore = regs->lockdep_hardirqs; From bf6fa2c0dda751863c3446aa64d733013bec4a19 Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Mon, 7 Jun 2021 10:46:23 +0100 Subject: [PATCH 19/20] arm64: entry: don't instrument entry code with KCOV The code in entry-common.c runs at exception entry and return boundaries, where portions of the kernel environment aren't available. For example, RCU may not be watching, and lockdep state may be out-of-sync with the hardware. Due to this, it is not sound to instrument this code. We generally avoid instrumentation by marking the entry functions as `noinstr`, but currently this doesn't inhibit KCOV instrumentation. Prevent this by disabling KCOV for the entire compilation unit. Signed-off-by: Mark Rutland Acked-by: Catalin Marinas Acked-by: Marc Zyngier Cc: James Morse Cc: Will Deacon Link: https://lore.kernel.org/r/20210607094624.34689-20-mark.rutland@arm.com Signed-off-by: Will Deacon --- arch/arm64/kernel/Makefile | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/arch/arm64/kernel/Makefile b/arch/arm64/kernel/Makefile index 6cc97730790e71..787c3c83edd7a4 100644 --- a/arch/arm64/kernel/Makefile +++ b/arch/arm64/kernel/Makefile @@ -14,6 +14,11 @@ CFLAGS_REMOVE_return_address.o = $(CC_FLAGS_FTRACE) CFLAGS_REMOVE_syscall.o = -fstack-protector -fstack-protector-strong CFLAGS_syscall.o += -fno-stack-protector +# It's not safe to invoke KCOV when portions of the kernel environment aren't +# available or are out-of-sync with HW state. Since `noinstr` doesn't always +# inhibit KCOV instrumentation, disable it for the entire compilation unit. +KCOV_INSTRUMENT_entry.o := n + # Object file lists. obj-y := debug-monitors.o entry.o irq.o fpsimd.o \ entry-common.o entry-fpsimd.o process.o ptrace.o \ From b5df5b8307b1db6d168ffac29eff3974779bb34b Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Mon, 7 Jun 2021 10:46:24 +0100 Subject: [PATCH 20/20] arm64: idle: don't instrument idle code with KCOV The low-level idle code in arch_cpu_idle() and its callees runs at a time where where portions of the kernel environment aren't available. For example, RCU may not be watching, and lockdep state may be out-of-sync with the hardware. Due to this, it is not sound to instrument this code. We generally avoid instrumentation by marking the entry functions as `noinstr`, but currently this doesn't inhibit KCOV instrumentation. Prevent this by factoring these functions into a new idle.c so that we can disable KCOV for the entire compilation unit, as is done for the core idle code in kernel/sched/idle.c. We'd like to keep instrumentation of the rest of process.c, and for the existing code in cpuidle.c, so a new compilation unit is preferable. The arch_cpu_idle_dead() function in process.c is a cpu hotplug function that is safe to instrument, so it is left as-is in process.c. Signed-off-by: Mark Rutland Acked-by: Catalin Marinas Acked-by: Marc Zyngier Cc: James Morse Cc: Will Deacon Link: https://lore.kernel.org/r/20210607094624.34689-21-mark.rutland@arm.com Signed-off-by: Will Deacon --- arch/arm64/kernel/Makefile | 3 +- arch/arm64/kernel/idle.c | 69 +++++++++++++++++++++++++++++++++++++ arch/arm64/kernel/process.c | 57 ------------------------------ 3 files changed, 71 insertions(+), 58 deletions(-) create mode 100644 arch/arm64/kernel/idle.c diff --git a/arch/arm64/kernel/Makefile b/arch/arm64/kernel/Makefile index 787c3c83edd7a4..de434204d72342 100644 --- a/arch/arm64/kernel/Makefile +++ b/arch/arm64/kernel/Makefile @@ -18,6 +18,7 @@ CFLAGS_syscall.o += -fno-stack-protector # available or are out-of-sync with HW state. Since `noinstr` doesn't always # inhibit KCOV instrumentation, disable it for the entire compilation unit. KCOV_INSTRUMENT_entry.o := n +KCOV_INSTRUMENT_idle.o := n # Object file lists. obj-y := debug-monitors.o entry.o irq.o fpsimd.o \ @@ -27,7 +28,7 @@ obj-y := debug-monitors.o entry.o irq.o fpsimd.o \ return_address.o cpuinfo.o cpu_errata.o \ cpufeature.o alternative.o cacheinfo.o \ smp.o smp_spin_table.o topology.o smccc-call.o \ - syscall.o proton-pack.o idreg-override.o + syscall.o proton-pack.o idreg-override.o idle.o targets += efi-entry.o diff --git a/arch/arm64/kernel/idle.c b/arch/arm64/kernel/idle.c new file mode 100644 index 00000000000000..45c79204dc40cc --- /dev/null +++ b/arch/arm64/kernel/idle.c @@ -0,0 +1,69 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Low-level idle sequences + */ + +#include +#include + +#include +#include +#include +#include + +static void noinstr __cpu_do_idle(void) +{ + dsb(sy); + wfi(); +} + +static void noinstr __cpu_do_idle_irqprio(void) +{ + unsigned long pmr; + unsigned long daif_bits; + + daif_bits = read_sysreg(daif); + write_sysreg(daif_bits | PSR_I_BIT | PSR_F_BIT, daif); + + /* + * Unmask PMR before going idle to make sure interrupts can + * be raised. + */ + pmr = gic_read_pmr(); + gic_write_pmr(GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET); + + __cpu_do_idle(); + + gic_write_pmr(pmr); + write_sysreg(daif_bits, daif); +} + +/* + * cpu_do_idle() + * + * Idle the processor (wait for interrupt). + * + * If the CPU supports priority masking we must do additional work to + * ensure that interrupts are not masked at the PMR (because the core will + * not wake up if we block the wake up signal in the interrupt controller). + */ +void noinstr cpu_do_idle(void) +{ + if (system_uses_irq_prio_masking()) + __cpu_do_idle_irqprio(); + else + __cpu_do_idle(); +} + +/* + * This is our default idle handler. + */ +void noinstr arch_cpu_idle(void) +{ + /* + * This should do all the clock switching and wait for interrupt + * tricks + */ + cpu_do_idle(); + raw_local_irq_enable(); +} diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c index 2e733770915520..72c5d80f03fa84 100644 --- a/arch/arm64/kernel/process.c +++ b/arch/arm64/kernel/process.c @@ -73,63 +73,6 @@ EXPORT_SYMBOL_GPL(pm_power_off); void (*arm_pm_restart)(enum reboot_mode reboot_mode, const char *cmd); -static void noinstr __cpu_do_idle(void) -{ - dsb(sy); - wfi(); -} - -static void noinstr __cpu_do_idle_irqprio(void) -{ - unsigned long pmr; - unsigned long daif_bits; - - daif_bits = read_sysreg(daif); - write_sysreg(daif_bits | PSR_I_BIT | PSR_F_BIT, daif); - - /* - * Unmask PMR before going idle to make sure interrupts can - * be raised. - */ - pmr = gic_read_pmr(); - gic_write_pmr(GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET); - - __cpu_do_idle(); - - gic_write_pmr(pmr); - write_sysreg(daif_bits, daif); -} - -/* - * cpu_do_idle() - * - * Idle the processor (wait for interrupt). - * - * If the CPU supports priority masking we must do additional work to - * ensure that interrupts are not masked at the PMR (because the core will - * not wake up if we block the wake up signal in the interrupt controller). - */ -void noinstr cpu_do_idle(void) -{ - if (system_uses_irq_prio_masking()) - __cpu_do_idle_irqprio(); - else - __cpu_do_idle(); -} - -/* - * This is our default idle handler. - */ -void noinstr arch_cpu_idle(void) -{ - /* - * This should do all the clock switching and wait for interrupt - * tricks - */ - cpu_do_idle(); - raw_local_irq_enable(); -} - #ifdef CONFIG_HOTPLUG_CPU void arch_cpu_idle_dead(void) {