Skip to content

Commit 9c48f1c

Browse files
dzickusrhIngo Molnar
authored andcommitted
x86, nmi: Wire up NMI handlers to new routines
Just convert all the files that have an nmi handler to the new routines. Most of it is straight forward conversion. A couple of places needed some tweaking like kgdb which separates the debug notifier from the nmi handler and mce removes a call to notify_die. [Thanks to Ying for finding out the history behind that mce call https://lkml.org/lkml/2010/5/27/114 And Boris responding that he would like to remove that call because of it https://lkml.org/lkml/2011/9/21/163] The things that get converted are the registeration/unregistration routines and the nmi handler itself has its args changed along with code removal to check which list it is on (most are on one NMI list except for kgdb which has both an NMI routine and an NMI Unknown routine). Signed-off-by: Don Zickus <dzickus@redhat.com> Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Acked-by: Corey Minyard <minyard@acm.org> Cc: Jason Wessel <jason.wessel@windriver.com> Cc: Andi Kleen <ak@linux.intel.com> Cc: Robert Richter <robert.richter@amd.com> Cc: Huang Ying <ying.huang@intel.com> Cc: Corey Minyard <minyard@acm.org> Cc: Jack Steiner <steiner@sgi.com> Link: http://lkml.kernel.org/r/1317409584-23662-4-git-send-email-dzickus@redhat.com Signed-off-by: Ingo Molnar <mingo@elte.hu>
1 parent c9126b2 commit 9c48f1c

File tree

16 files changed

+124
-281
lines changed

16 files changed

+124
-281
lines changed

arch/x86/include/asm/nmi.h

Lines changed: 0 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -22,26 +22,6 @@ void arch_trigger_all_cpu_backtrace(void);
2222
#define arch_trigger_all_cpu_backtrace arch_trigger_all_cpu_backtrace
2323
#endif
2424

25-
/*
26-
* Define some priorities for the nmi notifier call chain.
27-
*
28-
* Create a local nmi bit that has a higher priority than
29-
* external nmis, because the local ones are more frequent.
30-
*
31-
* Also setup some default high/normal/low settings for
32-
* subsystems to registers with. Using 4 bits to separate
33-
* the priorities. This can go a lot higher if needed be.
34-
*/
35-
36-
#define NMI_LOCAL_SHIFT 16 /* randomly picked */
37-
#define NMI_LOCAL_BIT (1ULL << NMI_LOCAL_SHIFT)
38-
#define NMI_HIGH_PRIOR (1ULL << 8)
39-
#define NMI_NORMAL_PRIOR (1ULL << 4)
40-
#define NMI_LOW_PRIOR (1ULL << 0)
41-
#define NMI_LOCAL_HIGH_PRIOR (NMI_LOCAL_BIT | NMI_HIGH_PRIOR)
42-
#define NMI_LOCAL_NORMAL_PRIOR (NMI_LOCAL_BIT | NMI_NORMAL_PRIOR)
43-
#define NMI_LOCAL_LOW_PRIOR (NMI_LOCAL_BIT | NMI_LOW_PRIOR)
44-
4525
#define NMI_FLAG_FIRST 1
4626

4727
enum {

arch/x86/include/asm/reboot.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,7 @@ void machine_real_restart(unsigned int type);
2323
#define MRR_BIOS 0
2424
#define MRR_APM 1
2525

26-
typedef void (*nmi_shootdown_cb)(int, struct die_args*);
26+
typedef void (*nmi_shootdown_cb)(int, struct pt_regs*);
2727
void nmi_shootdown_cpus(nmi_shootdown_cb callback);
2828

2929
#endif /* _ASM_X86_REBOOT_H */

arch/x86/kernel/apic/hw_nmi.c

Lines changed: 5 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -60,22 +60,10 @@ void arch_trigger_all_cpu_backtrace(void)
6060
}
6161

6262
static int __kprobes
63-
arch_trigger_all_cpu_backtrace_handler(struct notifier_block *self,
64-
unsigned long cmd, void *__args)
63+
arch_trigger_all_cpu_backtrace_handler(unsigned int cmd, struct pt_regs *regs)
6564
{
66-
struct die_args *args = __args;
67-
struct pt_regs *regs;
6865
int cpu;
6966

70-
switch (cmd) {
71-
case DIE_NMI:
72-
break;
73-
74-
default:
75-
return NOTIFY_DONE;
76-
}
77-
78-
regs = args->regs;
7967
cpu = smp_processor_id();
8068

8169
if (cpumask_test_cpu(cpu, to_cpumask(backtrace_mask))) {
@@ -86,21 +74,16 @@ arch_trigger_all_cpu_backtrace_handler(struct notifier_block *self,
8674
show_regs(regs);
8775
arch_spin_unlock(&lock);
8876
cpumask_clear_cpu(cpu, to_cpumask(backtrace_mask));
89-
return NOTIFY_STOP;
77+
return NMI_HANDLED;
9078
}
9179

92-
return NOTIFY_DONE;
80+
return NMI_DONE;
9381
}
9482

95-
static __read_mostly struct notifier_block backtrace_notifier = {
96-
.notifier_call = arch_trigger_all_cpu_backtrace_handler,
97-
.next = NULL,
98-
.priority = NMI_LOCAL_LOW_PRIOR,
99-
};
100-
10183
static int __init register_trigger_all_cpu_backtrace(void)
10284
{
103-
register_die_notifier(&backtrace_notifier);
85+
register_nmi_handler(NMI_LOCAL, arch_trigger_all_cpu_backtrace_handler,
86+
0, "arch_bt");
10487
return 0;
10588
}
10689
early_initcall(register_trigger_all_cpu_backtrace);

arch/x86/kernel/apic/x2apic_uv_x.c

Lines changed: 4 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -672,18 +672,11 @@ void __cpuinit uv_cpu_init(void)
672672
/*
673673
* When NMI is received, print a stack trace.
674674
*/
675-
int uv_handle_nmi(struct notifier_block *self, unsigned long reason, void *data)
675+
int uv_handle_nmi(unsigned int reason, struct pt_regs *regs)
676676
{
677677
unsigned long real_uv_nmi;
678678
int bid;
679679

680-
if (reason != DIE_NMIUNKNOWN)
681-
return NOTIFY_OK;
682-
683-
if (in_crash_kexec)
684-
/* do nothing if entering the crash kernel */
685-
return NOTIFY_OK;
686-
687680
/*
688681
* Each blade has an MMR that indicates when an NMI has been sent
689682
* to cpus on the blade. If an NMI is detected, atomically
@@ -704,7 +697,7 @@ int uv_handle_nmi(struct notifier_block *self, unsigned long reason, void *data)
704697
}
705698

706699
if (likely(__get_cpu_var(cpu_last_nmi_count) == uv_blade_info[bid].nmi_count))
707-
return NOTIFY_DONE;
700+
return NMI_DONE;
708701

709702
__get_cpu_var(cpu_last_nmi_count) = uv_blade_info[bid].nmi_count;
710703

@@ -717,17 +710,12 @@ int uv_handle_nmi(struct notifier_block *self, unsigned long reason, void *data)
717710
dump_stack();
718711
spin_unlock(&uv_nmi_lock);
719712

720-
return NOTIFY_STOP;
713+
return NMI_HANDLED;
721714
}
722715

723-
static struct notifier_block uv_dump_stack_nmi_nb = {
724-
.notifier_call = uv_handle_nmi,
725-
.priority = NMI_LOCAL_LOW_PRIOR - 1,
726-
};
727-
728716
void uv_register_nmi_notifier(void)
729717
{
730-
if (register_die_notifier(&uv_dump_stack_nmi_nb))
718+
if (register_nmi_handler(NMI_UNKNOWN, uv_handle_nmi, 0, "uv"))
731719
printk(KERN_WARNING "UV NMI handler failed to register\n");
732720
}
733721

arch/x86/kernel/cpu/mcheck/mce-inject.c

Lines changed: 7 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -78,27 +78,20 @@ static void raise_exception(struct mce *m, struct pt_regs *pregs)
7878

7979
static cpumask_var_t mce_inject_cpumask;
8080

81-
static int mce_raise_notify(struct notifier_block *self,
82-
unsigned long val, void *data)
81+
static int mce_raise_notify(unsigned int cmd, struct pt_regs *regs)
8382
{
84-
struct die_args *args = (struct die_args *)data;
8583
int cpu = smp_processor_id();
8684
struct mce *m = &__get_cpu_var(injectm);
87-
if (val != DIE_NMI || !cpumask_test_cpu(cpu, mce_inject_cpumask))
88-
return NOTIFY_DONE;
85+
if (!cpumask_test_cpu(cpu, mce_inject_cpumask))
86+
return NMI_DONE;
8987
cpumask_clear_cpu(cpu, mce_inject_cpumask);
9088
if (m->inject_flags & MCJ_EXCEPTION)
91-
raise_exception(m, args->regs);
89+
raise_exception(m, regs);
9290
else if (m->status)
9391
raise_poll(m);
94-
return NOTIFY_STOP;
92+
return NMI_HANDLED;
9593
}
9694

97-
static struct notifier_block mce_raise_nb = {
98-
.notifier_call = mce_raise_notify,
99-
.priority = NMI_LOCAL_NORMAL_PRIOR,
100-
};
101-
10295
/* Inject mce on current CPU */
10396
static int raise_local(void)
10497
{
@@ -216,7 +209,8 @@ static int inject_init(void)
216209
return -ENOMEM;
217210
printk(KERN_INFO "Machine check injector initialized\n");
218211
mce_chrdev_ops.write = mce_write;
219-
register_die_notifier(&mce_raise_nb);
212+
register_nmi_handler(NMI_LOCAL, mce_raise_notify, 0,
213+
"mce_notify");
220214
return 0;
221215
}
222216

arch/x86/kernel/cpu/mcheck/mce.c

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -908,9 +908,6 @@ void do_machine_check(struct pt_regs *regs, long error_code)
908908

909909
percpu_inc(mce_exception_count);
910910

911-
if (notify_die(DIE_NMI, "machine check", regs, error_code,
912-
18, SIGKILL) == NOTIFY_STOP)
913-
goto out;
914911
if (!banks)
915912
goto out;
916913

arch/x86/kernel/cpu/perf_event.c

Lines changed: 4 additions & 65 deletions
Original file line numberDiff line numberDiff line change
@@ -1058,76 +1058,15 @@ void perf_events_lapic_init(void)
10581058
apic_write(APIC_LVTPC, APIC_DM_NMI);
10591059
}
10601060

1061-
struct pmu_nmi_state {
1062-
unsigned int marked;
1063-
int handled;
1064-
};
1065-
1066-
static DEFINE_PER_CPU(struct pmu_nmi_state, pmu_nmi);
1067-
10681061
static int __kprobes
1069-
perf_event_nmi_handler(struct notifier_block *self,
1070-
unsigned long cmd, void *__args)
1062+
perf_event_nmi_handler(unsigned int cmd, struct pt_regs *regs)
10711063
{
1072-
struct die_args *args = __args;
1073-
unsigned int this_nmi;
1074-
int handled;
1075-
10761064
if (!atomic_read(&active_events))
1077-
return NOTIFY_DONE;
1078-
1079-
switch (cmd) {
1080-
case DIE_NMI:
1081-
break;
1082-
case DIE_NMIUNKNOWN:
1083-
this_nmi = percpu_read(irq_stat.__nmi_count);
1084-
if (this_nmi != __this_cpu_read(pmu_nmi.marked))
1085-
/* let the kernel handle the unknown nmi */
1086-
return NOTIFY_DONE;
1087-
/*
1088-
* This one is a PMU back-to-back nmi. Two events
1089-
* trigger 'simultaneously' raising two back-to-back
1090-
* NMIs. If the first NMI handles both, the latter
1091-
* will be empty and daze the CPU. So, we drop it to
1092-
* avoid false-positive 'unknown nmi' messages.
1093-
*/
1094-
return NOTIFY_STOP;
1095-
default:
1096-
return NOTIFY_DONE;
1097-
}
1098-
1099-
handled = x86_pmu.handle_irq(args->regs);
1100-
if (!handled)
1101-
return NOTIFY_DONE;
1065+
return NMI_DONE;
11021066

1103-
this_nmi = percpu_read(irq_stat.__nmi_count);
1104-
if ((handled > 1) ||
1105-
/* the next nmi could be a back-to-back nmi */
1106-
((__this_cpu_read(pmu_nmi.marked) == this_nmi) &&
1107-
(__this_cpu_read(pmu_nmi.handled) > 1))) {
1108-
/*
1109-
* We could have two subsequent back-to-back nmis: The
1110-
* first handles more than one counter, the 2nd
1111-
* handles only one counter and the 3rd handles no
1112-
* counter.
1113-
*
1114-
* This is the 2nd nmi because the previous was
1115-
* handling more than one counter. We will mark the
1116-
* next (3rd) and then drop it if unhandled.
1117-
*/
1118-
__this_cpu_write(pmu_nmi.marked, this_nmi + 1);
1119-
__this_cpu_write(pmu_nmi.handled, handled);
1120-
}
1121-
1122-
return NOTIFY_STOP;
1067+
return x86_pmu.handle_irq(regs);
11231068
}
11241069

1125-
static __read_mostly struct notifier_block perf_event_nmi_notifier = {
1126-
.notifier_call = perf_event_nmi_handler,
1127-
.next = NULL,
1128-
.priority = NMI_LOCAL_LOW_PRIOR,
1129-
};
1130-
11311070
struct event_constraint emptyconstraint;
11321071
struct event_constraint unconstrained;
11331072

@@ -1232,7 +1171,7 @@ static int __init init_hw_perf_events(void)
12321171
((1LL << x86_pmu.num_counters_fixed)-1) << X86_PMC_IDX_FIXED;
12331172

12341173
perf_events_lapic_init();
1235-
register_die_notifier(&perf_event_nmi_notifier);
1174+
register_nmi_handler(NMI_LOCAL, perf_event_nmi_handler, 0, "PMI");
12361175

12371176
unconstrained = (struct event_constraint)
12381177
__EVENT_CONSTRAINT(0, (1ULL << x86_pmu.num_counters) - 1,

arch/x86/kernel/crash.c

Lines changed: 1 addition & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -32,15 +32,12 @@ int in_crash_kexec;
3232

3333
#if defined(CONFIG_SMP) && defined(CONFIG_X86_LOCAL_APIC)
3434

35-
static void kdump_nmi_callback(int cpu, struct die_args *args)
35+
static void kdump_nmi_callback(int cpu, struct pt_regs *regs)
3636
{
37-
struct pt_regs *regs;
3837
#ifdef CONFIG_X86_32
3938
struct pt_regs fixed_regs;
4039
#endif
4140

42-
regs = args->regs;
43-
4441
#ifdef CONFIG_X86_32
4542
if (!user_mode_vm(regs)) {
4643
crash_fixup_ss_esp(&fixed_regs, regs);

arch/x86/kernel/kgdb.c

Lines changed: 45 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -511,28 +511,37 @@ single_step_cont(struct pt_regs *regs, struct die_args *args)
511511

512512
static int was_in_debug_nmi[NR_CPUS];
513513

514-
static int __kgdb_notify(struct die_args *args, unsigned long cmd)
514+
static int kgdb_nmi_handler(unsigned int cmd, struct pt_regs *regs)
515515
{
516-
struct pt_regs *regs = args->regs;
517-
518516
switch (cmd) {
519-
case DIE_NMI:
517+
case NMI_LOCAL:
520518
if (atomic_read(&kgdb_active) != -1) {
521519
/* KGDB CPU roundup */
522520
kgdb_nmicallback(raw_smp_processor_id(), regs);
523521
was_in_debug_nmi[raw_smp_processor_id()] = 1;
524522
touch_nmi_watchdog();
525-
return NOTIFY_STOP;
523+
return NMI_HANDLED;
526524
}
527-
return NOTIFY_DONE;
525+
break;
528526

529-
case DIE_NMIUNKNOWN:
527+
case NMI_UNKNOWN:
530528
if (was_in_debug_nmi[raw_smp_processor_id()]) {
531529
was_in_debug_nmi[raw_smp_processor_id()] = 0;
532-
return NOTIFY_STOP;
530+
return NMI_HANDLED;
533531
}
534-
return NOTIFY_DONE;
532+
break;
533+
default:
534+
/* do nothing */
535+
break;
536+
}
537+
return NMI_DONE;
538+
}
539+
540+
static int __kgdb_notify(struct die_args *args, unsigned long cmd)
541+
{
542+
struct pt_regs *regs = args->regs;
535543

544+
switch (cmd) {
536545
case DIE_DEBUG:
537546
if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
538547
if (user_mode(regs))
@@ -590,11 +599,6 @@ kgdb_notify(struct notifier_block *self, unsigned long cmd, void *ptr)
590599

591600
static struct notifier_block kgdb_notifier = {
592601
.notifier_call = kgdb_notify,
593-
594-
/*
595-
* Lowest-prio notifier priority, we want to be notified last:
596-
*/
597-
.priority = NMI_LOCAL_LOW_PRIOR,
598602
};
599603

600604
/**
@@ -605,7 +609,31 @@ static struct notifier_block kgdb_notifier = {
605609
*/
606610
int kgdb_arch_init(void)
607611
{
608-
return register_die_notifier(&kgdb_notifier);
612+
int retval;
613+
614+
retval = register_die_notifier(&kgdb_notifier);
615+
if (retval)
616+
goto out;
617+
618+
retval = register_nmi_handler(NMI_LOCAL, kgdb_nmi_handler,
619+
0, "kgdb");
620+
if (retval)
621+
goto out1;
622+
623+
retval = register_nmi_handler(NMI_UNKNOWN, kgdb_nmi_handler,
624+
0, "kgdb");
625+
626+
if (retval)
627+
goto out2;
628+
629+
return retval;
630+
631+
out2:
632+
unregister_nmi_handler(NMI_LOCAL, "kgdb");
633+
out1:
634+
unregister_die_notifier(&kgdb_notifier);
635+
out:
636+
return retval;
609637
}
610638

611639
static void kgdb_hw_overflow_handler(struct perf_event *event,
@@ -673,6 +701,8 @@ void kgdb_arch_exit(void)
673701
breakinfo[i].pev = NULL;
674702
}
675703
}
704+
unregister_nmi_handler(NMI_UNKNOWN, "kgdb");
705+
unregister_nmi_handler(NMI_LOCAL, "kgdb");
676706
unregister_die_notifier(&kgdb_notifier);
677707
}
678708

0 commit comments

Comments
 (0)