Skip to content

Commit ae65411

Browse files
mrutland-armctmarinas
authored andcommitted
arm64: entry: Use split preemption logic
The generic irqentry code now provides irqentry_exit_to_kernel_mode_preempt() and irqentry_exit_to_kernel_mode_after_preempt(), which can be used where architectures have different state requirements for involuntary preemption and exception return, as is the case on arm64. Use the new functions on arm64, aligning our exit to kernel mode logic with the style of our exit to user mode logic. This removes the need for the recently-added bodge in arch_irqentry_exit_need_resched(), and allows preemption to occur when returning from any exception taken from kernel mode, which is nicer for RT. In an ideal world, we'd remove arch_irqentry_exit_need_resched(), and fold the conditionality directly into the architecture-specific entry code. That way all the logic necessary to avoid preempting from a pseudo-NMI could be constrained specifically to the EL1 IRQ/FIQ paths, avoiding redundant work for other exceptions, and making the flow a bit clearer. At present it looks like that would require a larger refactoring (e.g. for the PREEMPT_DYNAMIC logic), and so I've left that as-is for now. Signed-off-by: Mark Rutland <[email protected]> Cc: Andy Lutomirski <[email protected]> Cc: Jinjie Ruan <[email protected]> Cc: Peter Zijlstra <[email protected]> Cc: Thomas Gleixner <[email protected]> Cc: Vladimir Murzin <[email protected]> Cc: Will Deacon <[email protected]> Reviewed-by: Jinjie Ruan <[email protected]> Acked-by: Peter Zijlstra (Intel) <[email protected]> Signed-off-by: Catalin Marinas <[email protected]>
1 parent a07b7b2 commit ae65411

2 files changed

Lines changed: 12 additions & 21 deletions

File tree

arch/arm64/include/asm/entry-common.h

Lines changed: 8 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -29,19 +29,14 @@ static __always_inline void arch_exit_to_user_mode_work(struct pt_regs *regs,
2929

3030
static inline bool arch_irqentry_exit_need_resched(void)
3131
{
32-
if (system_uses_irq_prio_masking()) {
33-
/*
34-
* DAIF.DA are cleared at the start of IRQ/FIQ handling, and when GIC
35-
* priority masking is used the GIC irqchip driver will clear DAIF.IF
36-
* using gic_arch_enable_irqs() for normal IRQs. If anything is set in
37-
* DAIF we must have handled an NMI, so skip preemption.
38-
*/
39-
if (read_sysreg(daif))
40-
return false;
41-
} else {
42-
if (read_sysreg(daif) & (PSR_D_BIT | PSR_A_BIT))
43-
return false;
44-
}
32+
/*
33+
* DAIF.DA are cleared at the start of IRQ/FIQ handling, and when GIC
34+
* priority masking is used the GIC irqchip driver will clear DAIF.IF
35+
* using gic_arch_enable_irqs() for normal IRQs. If anything is set in
36+
* DAIF we must have handled an NMI, so skip preemption.
37+
*/
38+
if (system_uses_irq_prio_masking() && read_sysreg(daif))
39+
return false;
4540

4641
/*
4742
* Preempting a task from an IRQ means we leave copies of PSTATE

arch/arm64/kernel/entry-common.c

Lines changed: 4 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -54,8 +54,11 @@ static noinstr irqentry_state_t arm64_enter_from_kernel_mode(struct pt_regs *reg
5454
static void noinstr arm64_exit_to_kernel_mode(struct pt_regs *regs,
5555
irqentry_state_t state)
5656
{
57+
local_irq_disable();
58+
irqentry_exit_to_kernel_mode_preempt(regs, state);
59+
local_daif_mask();
5760
mte_check_tfsr_exit();
58-
irqentry_exit_to_kernel_mode(regs, state);
61+
irqentry_exit_to_kernel_mode_after_preempt(regs, state);
5962
}
6063

6164
/*
@@ -301,7 +304,6 @@ static void noinstr el1_abort(struct pt_regs *regs, unsigned long esr)
301304
state = arm64_enter_from_kernel_mode(regs);
302305
local_daif_inherit(regs);
303306
do_mem_abort(far, esr, regs);
304-
local_daif_mask();
305307
arm64_exit_to_kernel_mode(regs, state);
306308
}
307309

@@ -313,7 +315,6 @@ static void noinstr el1_pc(struct pt_regs *regs, unsigned long esr)
313315
state = arm64_enter_from_kernel_mode(regs);
314316
local_daif_inherit(regs);
315317
do_sp_pc_abort(far, esr, regs);
316-
local_daif_mask();
317318
arm64_exit_to_kernel_mode(regs, state);
318319
}
319320

@@ -324,7 +325,6 @@ static void noinstr el1_undef(struct pt_regs *regs, unsigned long esr)
324325
state = arm64_enter_from_kernel_mode(regs);
325326
local_daif_inherit(regs);
326327
do_el1_undef(regs, esr);
327-
local_daif_mask();
328328
arm64_exit_to_kernel_mode(regs, state);
329329
}
330330

@@ -335,7 +335,6 @@ static void noinstr el1_bti(struct pt_regs *regs, unsigned long esr)
335335
state = arm64_enter_from_kernel_mode(regs);
336336
local_daif_inherit(regs);
337337
do_el1_bti(regs, esr);
338-
local_daif_mask();
339338
arm64_exit_to_kernel_mode(regs, state);
340339
}
341340

@@ -346,7 +345,6 @@ static void noinstr el1_gcs(struct pt_regs *regs, unsigned long esr)
346345
state = arm64_enter_from_kernel_mode(regs);
347346
local_daif_inherit(regs);
348347
do_el1_gcs(regs, esr);
349-
local_daif_mask();
350348
arm64_exit_to_kernel_mode(regs, state);
351349
}
352350

@@ -357,7 +355,6 @@ static void noinstr el1_mops(struct pt_regs *regs, unsigned long esr)
357355
state = arm64_enter_from_kernel_mode(regs);
358356
local_daif_inherit(regs);
359357
do_el1_mops(regs, esr);
360-
local_daif_mask();
361358
arm64_exit_to_kernel_mode(regs, state);
362359
}
363360

@@ -423,7 +420,6 @@ static void noinstr el1_fpac(struct pt_regs *regs, unsigned long esr)
423420
state = arm64_enter_from_kernel_mode(regs);
424421
local_daif_inherit(regs);
425422
do_el1_fpac(regs, esr);
426-
local_daif_mask();
427423
arm64_exit_to_kernel_mode(regs, state);
428424
}
429425

0 commit comments

Comments
 (0)