Skip to content

Commit 761ea38

Browse files
committed
genirq: Handle managed irqs gracefully in irq_startup()
Affinity managed interrupts should keep their assigned affinity accross CPU hotplug. To avoid magic hackery in device drivers, the core code shall manage them transparently and set these interrupts into a managed shutdown state when the last CPU of the assigned affinity mask goes offline. The interrupt will be restarted when one of the CPUs in the assigned affinity mask comes back online. Add the necessary logic to irq_startup(). If an interrupt is requested and started up, the code checks whether it is affinity managed and if so, it checks whether a CPU in the interrupts affinity mask is online. If not, it puts the interrupt into managed shutdown state. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Cc: Jens Axboe <axboe@kernel.dk> Cc: Marc Zyngier <marc.zyngier@arm.com> Cc: Michael Ellerman <mpe@ellerman.id.au> Cc: Keith Busch <keith.busch@intel.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Christoph Hellwig <hch@lst.de> Link: http://lkml.kernel.org/r/20170619235447.189851170@linutronix.de
1 parent 4cde9c6 commit 761ea38

File tree

2 files changed

+62
-4
lines changed

2 files changed

+62
-4
lines changed

include/linux/irq.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -346,7 +346,7 @@ static inline bool irqd_is_started(struct irq_data *d)
346346
return __irqd_to_state(d) & IRQD_IRQ_STARTED;
347347
}
348348

349-
static inline bool irqd_is_managed_shutdown(struct irq_data *d)
349+
static inline bool irqd_is_managed_and_shutdown(struct irq_data *d)
350350
{
351351
return __irqd_to_state(d) & IRQD_MANAGED_SHUTDOWN;
352352
}

kernel/irq/chip.c

Lines changed: 61 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -195,6 +195,52 @@ static void irq_state_set_started(struct irq_desc *desc)
195195
irqd_set(&desc->irq_data, IRQD_IRQ_STARTED);
196196
}
197197

198+
enum {
199+
IRQ_STARTUP_NORMAL,
200+
IRQ_STARTUP_MANAGED,
201+
IRQ_STARTUP_ABORT,
202+
};
203+
204+
#ifdef CONFIG_SMP
205+
static int
206+
__irq_startup_managed(struct irq_desc *desc, struct cpumask *aff, bool force)
207+
{
208+
struct irq_data *d = irq_desc_get_irq_data(desc);
209+
210+
if (!irqd_affinity_is_managed(d))
211+
return IRQ_STARTUP_NORMAL;
212+
213+
irqd_clr_managed_shutdown(d);
214+
215+
if (cpumask_any_and(aff, cpu_online_mask) > nr_cpu_ids) {
216+
/*
217+
* Catch code which fiddles with enable_irq() on a managed
218+
* and potentially shutdown IRQ. Chained interrupt
219+
* installment or irq auto probing should not happen on
220+
* managed irqs either. Emit a warning, break the affinity
221+
* and start it up as a normal interrupt.
222+
*/
223+
if (WARN_ON_ONCE(force))
224+
return IRQ_STARTUP_NORMAL;
225+
/*
226+
* The interrupt was requested, but there is no online CPU
227+
* in it's affinity mask. Put it into managed shutdown
228+
* state and let the cpu hotplug mechanism start it up once
229+
* a CPU in the mask becomes available.
230+
*/
231+
irqd_set_managed_shutdown(d);
232+
return IRQ_STARTUP_ABORT;
233+
}
234+
return IRQ_STARTUP_MANAGED;
235+
}
236+
#else
237+
static int
238+
__irq_startup_managed(struct irq_desc *desc, struct cpumask *aff, bool force)
239+
{
240+
return IRQ_STARTUP_NORMAL;
241+
}
242+
#endif
243+
198244
static int __irq_startup(struct irq_desc *desc)
199245
{
200246
struct irq_data *d = irq_desc_get_irq_data(desc);
@@ -214,15 +260,27 @@ static int __irq_startup(struct irq_desc *desc)
214260

215261
int irq_startup(struct irq_desc *desc, bool resend, bool force)
216262
{
263+
struct irq_data *d = irq_desc_get_irq_data(desc);
264+
struct cpumask *aff = irq_data_get_affinity_mask(d);
217265
int ret = 0;
218266

219267
desc->depth = 0;
220268

221-
if (irqd_is_started(&desc->irq_data)) {
269+
if (irqd_is_started(d)) {
222270
irq_enable(desc);
223271
} else {
224-
ret = __irq_startup(desc);
225-
irq_setup_affinity(desc);
272+
switch (__irq_startup_managed(desc, aff, force)) {
273+
case IRQ_STARTUP_NORMAL:
274+
ret = __irq_startup(desc);
275+
irq_setup_affinity(desc);
276+
break;
277+
case IRQ_STARTUP_MANAGED:
278+
ret = __irq_startup(desc);
279+
irq_set_affinity_locked(d, aff, false);
280+
break;
281+
case IRQ_STARTUP_ABORT:
282+
return 0;
283+
}
226284
}
227285
if (resend)
228286
check_irq_resend(desc);

0 commit comments

Comments
 (0)