Skip to content

Commit c5cb83b

Browse files
committed
genirq/cpuhotplug: Handle managed IRQs on CPU hotplug
If a CPU goes offline, interrupts affine to the CPU are moved away. If the outgoing CPU is the last CPU in the affinity mask the migration code breaks the affinity and sets it it all online cpus. This is a problem for affinity managed interrupts as CPU hotplug is often used for power management purposes. If the affinity is broken, the interrupt is not longer affine to the CPUs to which it was allocated. The affinity spreading allows to lay out multi queue devices in a way that they are assigned to a single CPU or a group of CPUs. If the last CPU goes offline, then the queue is not longer used, so the interrupt can be shutdown gracefully and parked until one of the assigned CPUs comes online again. Add a graceful shutdown mechanism into the irq affinity breaking code path, mark the irq as MANAGED_SHUTDOWN and leave the affinity mask unmodified. In the online path, scan the active interrupts for managed interrupts and if the interrupt is functional and the newly online CPU is part of the affinity mask, restart the interrupt if it is marked MANAGED_SHUTDOWN or if the interrupts is started up, try to add the CPU back to the effective affinity mask. Originally-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Cc: Jens Axboe <axboe@kernel.dk> Cc: Marc Zyngier <marc.zyngier@arm.com> Cc: Michael Ellerman <mpe@ellerman.id.au> Cc: Keith Busch <keith.busch@intel.com> Cc: Peter Zijlstra <peterz@infradead.org> Link: http://lkml.kernel.org/r/20170619235447.273417334@linutronix.de
1 parent 761ea38 commit c5cb83b

File tree

4 files changed

+56
-0
lines changed

4 files changed

+56
-0
lines changed

include/linux/cpuhotplug.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -124,6 +124,7 @@ enum cpuhp_state {
124124
CPUHP_AP_ONLINE_IDLE,
125125
CPUHP_AP_SMPBOOT_THREADS,
126126
CPUHP_AP_X86_VDSO_VMA_ONLINE,
127+
CPUHP_AP_IRQ_AFFINITY_ONLINE,
127128
CPUHP_AP_PERF_ONLINE,
128129
CPUHP_AP_PERF_X86_ONLINE,
129130
CPUHP_AP_PERF_X86_UNCORE_ONLINE,

include/linux/irq.h

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -500,7 +500,12 @@ extern int irq_set_affinity_locked(struct irq_data *data,
500500
const struct cpumask *cpumask, bool force);
501501
extern int irq_set_vcpu_affinity(unsigned int irq, void *vcpu_info);
502502

503+
#if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_IRQ_MIGRATION)
503504
extern void irq_migrate_all_off_this_cpu(void);
505+
extern int irq_affinity_online_cpu(unsigned int cpu);
506+
#else
507+
# define irq_affinity_online_cpu NULL
508+
#endif
504509

505510
#if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_PENDING_IRQ)
506511
void irq_move_irq(struct irq_data *data);

kernel/cpu.c

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1252,6 +1252,11 @@ static struct cpuhp_step cpuhp_ap_states[] = {
12521252
.startup.single = smpboot_unpark_threads,
12531253
.teardown.single = NULL,
12541254
},
1255+
[CPUHP_AP_IRQ_AFFINITY_ONLINE] = {
1256+
.name = "irq/affinity:online",
1257+
.startup.single = irq_affinity_online_cpu,
1258+
.teardown.single = NULL,
1259+
},
12551260
[CPUHP_AP_PERF_ONLINE] = {
12561261
.name = "perf:online",
12571262
.startup.single = perf_event_init_cpu,

kernel/irq/cpuhotplug.c

Lines changed: 45 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -83,6 +83,15 @@ static bool migrate_one_irq(struct irq_desc *desc)
8383
chip->irq_mask(d);
8484

8585
if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) {
86+
/*
87+
* If the interrupt is managed, then shut it down and leave
88+
* the affinity untouched.
89+
*/
90+
if (irqd_affinity_is_managed(d)) {
91+
irqd_set_managed_shutdown(d);
92+
irq_shutdown(desc);
93+
return false;
94+
}
8695
affinity = cpu_online_mask;
8796
brokeaff = true;
8897
}
@@ -129,3 +138,39 @@ void irq_migrate_all_off_this_cpu(void)
129138
}
130139
}
131140
}
141+
142+
static void irq_restore_affinity_of_irq(struct irq_desc *desc, unsigned int cpu)
143+
{
144+
struct irq_data *data = irq_desc_get_irq_data(desc);
145+
const struct cpumask *affinity = irq_data_get_affinity_mask(data);
146+
147+
if (!irqd_affinity_is_managed(data) || !desc->action ||
148+
!irq_data_get_irq_chip(data) || !cpumask_test_cpu(cpu, affinity))
149+
return;
150+
151+
if (irqd_is_managed_and_shutdown(data))
152+
irq_startup(desc, IRQ_RESEND, IRQ_START_COND);
153+
else
154+
irq_set_affinity_locked(data, affinity, false);
155+
}
156+
157+
/**
158+
* irq_affinity_online_cpu - Restore affinity for managed interrupts
159+
* @cpu: Upcoming CPU for which interrupts should be restored
160+
*/
161+
int irq_affinity_online_cpu(unsigned int cpu)
162+
{
163+
struct irq_desc *desc;
164+
unsigned int irq;
165+
166+
irq_lock_sparse();
167+
for_each_active_irq(irq) {
168+
desc = irq_to_desc(irq);
169+
raw_spin_lock_irq(&desc->lock);
170+
irq_restore_affinity_of_irq(desc, cpu);
171+
raw_spin_unlock_irq(&desc->lock);
172+
}
173+
irq_unlock_sparse();
174+
175+
return 0;
176+
}

0 commit comments

Comments
 (0)