Skip to content

Commit d224a69

Browse files
James Morsewildea01
authored andcommitted
arm64: remove irq_count and do_softirq_own_stack()
sysrq_handle_reboot() re-enables interrupts while on the irq stack. The irq_stack implementation wrongly assumed this would only ever happen via the softirq path, allowing it to update irq_count late, in do_softirq_own_stack(). This means if an irq occurs in sysrq_handle_reboot(), during emergency_restart() the stack will be corrupted, as irq_count wasn't updated. Lose the optimisation, and instead of moving the adding/subtracting of irq_count into irq_stack_entry/irq_stack_exit, remove it, and compare sp_el0 (struct thread_info) with sp & ~(THREAD_SIZE - 1). This tells us if we are on a task stack, if so, we can safely switch to the irq stack. Finally, remove do_softirq_own_stack(), we don't need it anymore. Reported-by: Will Deacon <will.deacon@arm.com> Signed-off-by: James Morse <james.morse@arm.com> [will: use get_thread_info macro] Signed-off-by: Will Deacon <will.deacon@arm.com>
1 parent 66b3923 commit d224a69

File tree

3 files changed

+11
-48
lines changed

3 files changed

+11
-48
lines changed

arch/arm64/include/asm/irq.h

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -11,8 +11,6 @@
1111
#include <asm-generic/irq.h>
1212
#include <asm/thread_info.h>
1313

14-
#define __ARCH_HAS_DO_SOFTIRQ
15-
1614
struct pt_regs;
1715

1816
DECLARE_PER_CPU(unsigned long [IRQ_STACK_SIZE/sizeof(long)], irq_stack);

arch/arm64/kernel/entry.S

Lines changed: 10 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -181,19 +181,20 @@ alternative_endif
181181
.macro irq_stack_entry
182182
mov x19, sp // preserve the original sp
183183

184-
this_cpu_ptr irq_stack, x25, x26
185-
186184
/*
187-
* Check the lowest address on irq_stack for the irq_count value,
188-
* incremented by do_softirq_own_stack if we have re-enabled irqs
189-
* while on the irq_stack.
185+
* Compare sp with the current thread_info, if the top
186+
* ~(THREAD_SIZE - 1) bits match, we are on a task stack, and
187+
* should switch to the irq stack.
190188
*/
191-
ldr x26, [x25]
192-
cbnz x26, 9998f // recursive use?
189+
and x25, x19, #~(THREAD_SIZE - 1)
190+
cmp x25, tsk
191+
b.ne 9998f
193192

194-
/* switch to the irq stack */
193+
this_cpu_ptr irq_stack, x25, x26
195194
mov x26, #IRQ_STACK_START_SP
196195
add x26, x25, x26
196+
197+
/* switch to the irq stack */
197198
mov sp, x26
198199

199200
/*
@@ -405,10 +406,10 @@ el1_irq:
405406
bl trace_hardirqs_off
406407
#endif
407408

409+
get_thread_info tsk
408410
irq_handler
409411

410412
#ifdef CONFIG_PREEMPT
411-
get_thread_info tsk
412413
ldr w24, [tsk, #TI_PREEMPT] // get preempt count
413414
cbnz w24, 1f // preempt count != 0
414415
ldr x0, [tsk, #TI_FLAGS] // get flags

arch/arm64/kernel/irq.c

Lines changed: 1 addition & 37 deletions
Original file line numberDiff line numberDiff line change
@@ -25,24 +25,14 @@
2525
#include <linux/irq.h>
2626
#include <linux/smp.h>
2727
#include <linux/init.h>
28-
#include <linux/interrupt.h>
2928
#include <linux/irqchip.h>
3029
#include <linux/seq_file.h>
3130

3231
unsigned long irq_err_count;
3332

34-
/*
35-
* irq stack only needs to be 16 byte aligned - not IRQ_STACK_SIZE aligned.
36-
* irq_stack[0] is used as irq_count, a non-zero value indicates the stack
37-
* is in use, and el?_irq() shouldn't switch to it. This is used to detect
38-
* recursive use of the irq_stack, it is lazily updated by
39-
* do_softirq_own_stack(), which is called on the irq_stack, before
40-
* re-enabling interrupts to process softirqs.
41-
*/
33+
/* irq stack only needs to be 16 byte aligned - not IRQ_STACK_SIZE aligned. */
4234
DEFINE_PER_CPU(unsigned long [IRQ_STACK_SIZE/sizeof(long)], irq_stack) __aligned(16);
4335

44-
#define IRQ_COUNT() (*per_cpu(irq_stack, smp_processor_id()))
45-
4636
int arch_show_interrupts(struct seq_file *p, int prec)
4737
{
4838
show_ipi_list(p, prec);
@@ -66,29 +56,3 @@ void __init init_IRQ(void)
6656
if (!handle_arch_irq)
6757
panic("No interrupt controller found.");
6858
}
69-
70-
/*
71-
* do_softirq_own_stack() is called from irq_exit() before __do_softirq()
72-
* re-enables interrupts, at which point we may re-enter el?_irq(). We
73-
* increase irq_count here so that el1_irq() knows that it is already on the
74-
* irq stack.
75-
*
76-
* Called with interrupts disabled, so we don't worry about moving cpu, or
77-
* being interrupted while modifying irq_count.
78-
*
79-
* This function doesn't actually switch stack.
80-
*/
81-
void do_softirq_own_stack(void)
82-
{
83-
int cpu = smp_processor_id();
84-
85-
WARN_ON_ONCE(!irqs_disabled());
86-
87-
if (on_irq_stack(current_stack_pointer, cpu)) {
88-
IRQ_COUNT()++;
89-
__do_softirq();
90-
IRQ_COUNT()--;
91-
} else {
92-
__do_softirq();
93-
}
94-
}

0 commit comments

Comments
 (0)