Skip to content

Commit e360adb

Browse files
Peter ZijlstraIngo Molnar
authored andcommitted
irq_work: Add generic hardirq context callbacks
Provide a mechanism that allows running code in IRQ context. It is most useful for NMI code that needs to interact with the rest of the system -- like wakeup a task to drain buffers. Perf currently has such a mechanism, so extract that and provide it as a generic feature, independent of perf so that others may also benefit. The IRQ context callback is generated through self-IPIs where possible, or on architectures like powerpc the decrementer (the built-in timer facility) is set to generate an interrupt immediately. Architectures that don't have anything like this get to do with a callback from the timer tick. These architectures can call irq_work_run() at the tail of any IRQ handlers that might enqueue such work (like the perf IRQ handler) to avoid undue latencies in processing the work. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Acked-by: Kyle McMartin <kyle@mcmartin.ca> Acked-by: Martin Schwidefsky <schwidefsky@de.ibm.com> [ various fixes ] Signed-off-by: Huang Ying <ying.huang@intel.com> LKML-Reference: <1287036094.7768.291.camel@yhuang-dev> Signed-off-by: Ingo Molnar <mingo@elte.hu>
1 parent 8e5fc1a commit e360adb

File tree

39 files changed

+311
-242
lines changed

39 files changed

+311
-242
lines changed

arch/alpha/Kconfig

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -9,6 +9,7 @@ config ALPHA
99
select HAVE_IDE
1010
select HAVE_OPROFILE
1111
select HAVE_SYSCALL_WRAPPERS
12+
select HAVE_IRQ_WORK
1213
select HAVE_PERF_EVENTS
1314
select HAVE_DMA_ATTRS
1415
help

arch/alpha/include/asm/perf_event.h

Lines changed: 0 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -1,11 +1,6 @@
11
#ifndef __ASM_ALPHA_PERF_EVENT_H
22
#define __ASM_ALPHA_PERF_EVENT_H
33

4-
/* Alpha only supports software events through this interface. */
5-
extern void set_perf_event_pending(void);
6-
7-
#define PERF_EVENT_INDEX_OFFSET 0
8-
94
#ifdef CONFIG_PERF_EVENTS
105
extern void init_hw_perf_events(void);
116
#else

arch/alpha/kernel/time.c

Lines changed: 15 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -41,7 +41,7 @@
4141
#include <linux/init.h>
4242
#include <linux/bcd.h>
4343
#include <linux/profile.h>
44-
#include <linux/perf_event.h>
44+
#include <linux/irq_work.h>
4545

4646
#include <asm/uaccess.h>
4747
#include <asm/io.h>
@@ -83,25 +83,25 @@ static struct {
8383

8484
unsigned long est_cycle_freq;
8585

86-
#ifdef CONFIG_PERF_EVENTS
86+
#ifdef CONFIG_IRQ_WORK
8787

88-
DEFINE_PER_CPU(u8, perf_event_pending);
88+
DEFINE_PER_CPU(u8, irq_work_pending);
8989

90-
#define set_perf_event_pending_flag() __get_cpu_var(perf_event_pending) = 1
91-
#define test_perf_event_pending() __get_cpu_var(perf_event_pending)
92-
#define clear_perf_event_pending() __get_cpu_var(perf_event_pending) = 0
90+
#define set_irq_work_pending_flag() __get_cpu_var(irq_work_pending) = 1
91+
#define test_irq_work_pending() __get_cpu_var(irq_work_pending)
92+
#define clear_irq_work_pending() __get_cpu_var(irq_work_pending) = 0
9393

94-
void set_perf_event_pending(void)
94+
void set_irq_work_pending(void)
9595
{
96-
set_perf_event_pending_flag();
96+
set_irq_work_pending_flag();
9797
}
9898

99-
#else /* CONFIG_PERF_EVENTS */
99+
#else /* CONFIG_IRQ_WORK */
100100

101-
#define test_perf_event_pending() 0
102-
#define clear_perf_event_pending()
101+
#define test_irq_work_pending() 0
102+
#define clear_irq_work_pending()
103103

104-
#endif /* CONFIG_PERF_EVENTS */
104+
#endif /* CONFIG_IRQ_WORK */
105105

106106

107107
static inline __u32 rpcc(void)
@@ -191,9 +191,9 @@ irqreturn_t timer_interrupt(int irq, void *dev)
191191

192192
write_sequnlock(&xtime_lock);
193193

194-
if (test_perf_event_pending()) {
195-
clear_perf_event_pending();
196-
perf_event_do_pending();
194+
if (test_irq_work_pending()) {
195+
clear_irq_work_pending();
196+
irq_work_run();
197197
}
198198

199199
#ifndef CONFIG_SMP

arch/arm/Kconfig

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -23,6 +23,7 @@ config ARM
2323
select HAVE_KERNEL_GZIP
2424
select HAVE_KERNEL_LZO
2525
select HAVE_KERNEL_LZMA
26+
select HAVE_IRQ_WORK
2627
select HAVE_PERF_EVENTS
2728
select PERF_USE_VMALLOC
2829
select HAVE_REGS_AND_STACK_ACCESS_API

arch/arm/include/asm/perf_event.h

Lines changed: 0 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -12,18 +12,6 @@
1212
#ifndef __ARM_PERF_EVENT_H__
1313
#define __ARM_PERF_EVENT_H__
1414

15-
/*
16-
* NOP: on *most* (read: all supported) ARM platforms, the performance
17-
* counter interrupts are regular interrupts and not an NMI. This
18-
* means that when we receive the interrupt we can call
19-
* perf_event_do_pending() that handles all of the work with
20-
* interrupts disabled.
21-
*/
22-
static inline void
23-
set_perf_event_pending(void)
24-
{
25-
}
26-
2715
/* ARM performance counters start from 1 (in the cp15 accesses) so use the
2816
* same indexes here for consistency. */
2917
#define PERF_EVENT_INDEX_OFFSET 1

arch/arm/kernel/perf_event.c

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1092,7 +1092,7 @@ armv6pmu_handle_irq(int irq_num,
10921092
* platforms that can have the PMU interrupts raised as an NMI, this
10931093
* will not work.
10941094
*/
1095-
perf_event_do_pending();
1095+
irq_work_run();
10961096

10971097
return IRQ_HANDLED;
10981098
}
@@ -2068,7 +2068,7 @@ static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev)
20682068
* platforms that can have the PMU interrupts raised as an NMI, this
20692069
* will not work.
20702070
*/
2071-
perf_event_do_pending();
2071+
irq_work_run();
20722072

20732073
return IRQ_HANDLED;
20742074
}
@@ -2436,7 +2436,7 @@ xscale1pmu_handle_irq(int irq_num, void *dev)
24362436
armpmu->disable(hwc, idx);
24372437
}
24382438

2439-
perf_event_do_pending();
2439+
irq_work_run();
24402440

24412441
/*
24422442
* Re-enable the PMU.
@@ -2763,7 +2763,7 @@ xscale2pmu_handle_irq(int irq_num, void *dev)
27632763
armpmu->disable(hwc, idx);
27642764
}
27652765

2766-
perf_event_do_pending();
2766+
irq_work_run();
27672767

27682768
/*
27692769
* Re-enable the PMU.

arch/frv/Kconfig

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -7,6 +7,7 @@ config FRV
77
default y
88
select HAVE_IDE
99
select HAVE_ARCH_TRACEHOOK
10+
select HAVE_IRQ_WORK
1011
select HAVE_PERF_EVENTS
1112

1213
config ZONE_DMA

arch/frv/lib/Makefile

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -5,4 +5,4 @@
55
lib-y := \
66
__ashldi3.o __lshrdi3.o __muldi3.o __ashrdi3.o __negdi2.o __ucmpdi2.o \
77
checksum.o memcpy.o memset.o atomic-ops.o atomic64-ops.o \
8-
outsl_ns.o outsl_sw.o insl_ns.o insl_sw.o cache.o perf_event.o
8+
outsl_ns.o outsl_sw.o insl_ns.o insl_sw.o cache.o

arch/frv/lib/perf_event.c

Lines changed: 0 additions & 19 deletions
This file was deleted.

arch/parisc/Kconfig

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -16,6 +16,7 @@ config PARISC
1616
select RTC_DRV_GENERIC
1717
select INIT_ALL_POSSIBLE
1818
select BUG
19+
select HAVE_IRQ_WORK
1920
select HAVE_PERF_EVENTS
2021
select GENERIC_ATOMIC64 if !64BIT
2122
help

arch/parisc/include/asm/perf_event.h

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,6 @@
11
#ifndef __ASM_PARISC_PERF_EVENT_H
22
#define __ASM_PARISC_PERF_EVENT_H
33

4-
/* parisc only supports software events through this interface. */
5-
static inline void set_perf_event_pending(void) { }
4+
/* Empty, just to avoid compiling error */
65

76
#endif /* __ASM_PARISC_PERF_EVENT_H */

arch/powerpc/Kconfig

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -138,6 +138,7 @@ config PPC
138138
select HAVE_OPROFILE
139139
select HAVE_SYSCALL_WRAPPERS if PPC64
140140
select GENERIC_ATOMIC64 if PPC32
141+
select HAVE_IRQ_WORK
141142
select HAVE_PERF_EVENTS
142143
select HAVE_REGS_AND_STACK_ACCESS_API
143144
select HAVE_HW_BREAKPOINT if PERF_EVENTS && PPC_BOOK3S_64

arch/powerpc/include/asm/paca.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -129,7 +129,7 @@ struct paca_struct {
129129
u8 soft_enabled; /* irq soft-enable flag */
130130
u8 hard_enabled; /* set if irqs are enabled in MSR */
131131
u8 io_sync; /* writel() needs spin_unlock sync */
132-
u8 perf_event_pending; /* PM interrupt while soft-disabled */
132+
u8 irq_work_pending; /* IRQ_WORK interrupt while soft-disable */
133133

134134
/* Stuff for accurate time accounting */
135135
u64 user_time; /* accumulated usermode TB ticks */

arch/powerpc/kernel/time.c

Lines changed: 21 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -53,7 +53,7 @@
5353
#include <linux/posix-timers.h>
5454
#include <linux/irq.h>
5555
#include <linux/delay.h>
56-
#include <linux/perf_event.h>
56+
#include <linux/irq_work.h>
5757
#include <asm/trace.h>
5858

5959
#include <asm/io.h>
@@ -493,60 +493,60 @@ void __init iSeries_time_init_early(void)
493493
}
494494
#endif /* CONFIG_PPC_ISERIES */
495495

496-
#ifdef CONFIG_PERF_EVENTS
496+
#ifdef CONFIG_IRQ_WORK
497497

498498
/*
499499
* 64-bit uses a byte in the PACA, 32-bit uses a per-cpu variable...
500500
*/
501501
#ifdef CONFIG_PPC64
502-
static inline unsigned long test_perf_event_pending(void)
502+
static inline unsigned long test_irq_work_pending(void)
503503
{
504504
unsigned long x;
505505

506506
asm volatile("lbz %0,%1(13)"
507507
: "=r" (x)
508-
: "i" (offsetof(struct paca_struct, perf_event_pending)));
508+
: "i" (offsetof(struct paca_struct, irq_work_pending)));
509509
return x;
510510
}
511511

512-
static inline void set_perf_event_pending_flag(void)
512+
static inline void set_irq_work_pending_flag(void)
513513
{
514514
asm volatile("stb %0,%1(13)" : :
515515
"r" (1),
516-
"i" (offsetof(struct paca_struct, perf_event_pending)));
516+
"i" (offsetof(struct paca_struct, irq_work_pending)));
517517
}
518518

519-
static inline void clear_perf_event_pending(void)
519+
static inline void clear_irq_work_pending(void)
520520
{
521521
asm volatile("stb %0,%1(13)" : :
522522
"r" (0),
523-
"i" (offsetof(struct paca_struct, perf_event_pending)));
523+
"i" (offsetof(struct paca_struct, irq_work_pending)));
524524
}
525525

526526
#else /* 32-bit */
527527

528-
DEFINE_PER_CPU(u8, perf_event_pending);
528+
DEFINE_PER_CPU(u8, irq_work_pending);
529529

530-
#define set_perf_event_pending_flag() __get_cpu_var(perf_event_pending) = 1
531-
#define test_perf_event_pending() __get_cpu_var(perf_event_pending)
532-
#define clear_perf_event_pending() __get_cpu_var(perf_event_pending) = 0
530+
#define set_irq_work_pending_flag() __get_cpu_var(irq_work_pending) = 1
531+
#define test_irq_work_pending() __get_cpu_var(irq_work_pending)
532+
#define clear_irq_work_pending() __get_cpu_var(irq_work_pending) = 0
533533

534534
#endif /* 32 vs 64 bit */
535535

536-
void set_perf_event_pending(void)
536+
void set_irq_work_pending(void)
537537
{
538538
preempt_disable();
539-
set_perf_event_pending_flag();
539+
set_irq_work_pending_flag();
540540
set_dec(1);
541541
preempt_enable();
542542
}
543543

544-
#else /* CONFIG_PERF_EVENTS */
544+
#else /* CONFIG_IRQ_WORK */
545545

546-
#define test_perf_event_pending() 0
547-
#define clear_perf_event_pending()
546+
#define test_irq_work_pending() 0
547+
#define clear_irq_work_pending()
548548

549-
#endif /* CONFIG_PERF_EVENTS */
549+
#endif /* CONFIG_IRQ_WORK */
550550

551551
/*
552552
* For iSeries shared processors, we have to let the hypervisor
@@ -587,9 +587,9 @@ void timer_interrupt(struct pt_regs * regs)
587587

588588
calculate_steal_time();
589589

590-
if (test_perf_event_pending()) {
591-
clear_perf_event_pending();
592-
perf_event_do_pending();
590+
if (test_irq_work_pending()) {
591+
clear_irq_work_pending();
592+
irq_work_run();
593593
}
594594

595595
#ifdef CONFIG_PPC_ISERIES

arch/s390/Kconfig

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -95,6 +95,7 @@ config S390
9595
select HAVE_KVM if 64BIT
9696
select HAVE_ARCH_TRACEHOOK
9797
select INIT_ALL_POSSIBLE
98+
select HAVE_IRQ_WORK
9899
select HAVE_PERF_EVENTS
99100
select HAVE_KERNEL_GZIP
100101
select HAVE_KERNEL_BZIP2

arch/s390/include/asm/perf_event.h

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,6 @@
44
* Copyright 2009 Martin Schwidefsky, IBM Corporation.
55
*/
66

7-
static inline void set_perf_event_pending(void) {}
8-
static inline void clear_perf_event_pending(void) {}
7+
/* Empty, just to avoid compiling error */
98

109
#define PERF_EVENT_INDEX_OFFSET 0

arch/sh/Kconfig

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -16,6 +16,7 @@ config SUPERH
1616
select HAVE_ARCH_TRACEHOOK
1717
select HAVE_DMA_API_DEBUG
1818
select HAVE_DMA_ATTRS
19+
select HAVE_IRQ_WORK
1920
select HAVE_PERF_EVENTS
2021
select PERF_USE_VMALLOC
2122
select HAVE_KERNEL_GZIP

arch/sh/include/asm/perf_event.h

Lines changed: 0 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -26,11 +26,4 @@ extern int register_sh_pmu(struct sh_pmu *);
2626
extern int reserve_pmc_hardware(void);
2727
extern void release_pmc_hardware(void);
2828

29-
static inline void set_perf_event_pending(void)
30-
{
31-
/* Nothing to see here, move along. */
32-
}
33-
34-
#define PERF_EVENT_INDEX_OFFSET 0
35-
3629
#endif /* __ASM_SH_PERF_EVENT_H */

arch/sparc/Kconfig

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -26,6 +26,7 @@ config SPARC
2626
select ARCH_WANT_OPTIONAL_GPIOLIB
2727
select RTC_CLASS
2828
select RTC_DRV_M48T59
29+
select HAVE_IRQ_WORK
2930
select HAVE_PERF_EVENTS
3031
select PERF_USE_VMALLOC
3132
select HAVE_DMA_ATTRS
@@ -54,6 +55,7 @@ config SPARC64
5455
select RTC_DRV_BQ4802
5556
select RTC_DRV_SUN4V
5657
select RTC_DRV_STARFIRE
58+
select HAVE_IRQ_WORK
5759
select HAVE_PERF_EVENTS
5860
select PERF_USE_VMALLOC
5961

arch/sparc/include/asm/perf_event.h

Lines changed: 0 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1,10 +1,6 @@
11
#ifndef __ASM_SPARC_PERF_EVENT_H
22
#define __ASM_SPARC_PERF_EVENT_H
33

4-
extern void set_perf_event_pending(void);
5-
6-
#define PERF_EVENT_INDEX_OFFSET 0
7-
84
#ifdef CONFIG_PERF_EVENTS
95
#include <asm/ptrace.h>
106

0 commit comments

Comments
 (0)