Skip to content

Commit 1e74016

Browse files
travis@sgi.comIngo Molnar
authored andcommitted
x86/platform/UV: Clean up the NMI code to match current coding style
Update UV NMI to current coding style. Signed-off-by: Mike Travis <travis@sgi.com> Acked-by: Thomas Gleixner <tglx@linutronix.de> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Russ Anderson <rja@hpe.com> Link: http://lkml.kernel.org/r/20170125163518.419094259@asylum.americas.sgi.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
1 parent 9ec808a commit 1e74016

File tree

1 file changed

+37
-37
lines changed

1 file changed

+37
-37
lines changed

arch/x86/platform/uv/uv_nmi.c

Lines changed: 37 additions & 37 deletions
Original file line numberDiff line numberDiff line change
@@ -45,8 +45,8 @@
4545
*
4646
* Handle system-wide NMI events generated by the global 'power nmi' command.
4747
*
48-
* Basic operation is to field the NMI interrupt on each cpu and wait
49-
* until all cpus have arrived into the nmi handler. If some cpus do not
48+
* Basic operation is to field the NMI interrupt on each CPU and wait
49+
* until all CPU's have arrived into the nmi handler. If some CPU's do not
5050
* make it into the handler, try and force them in with the IPI(NMI) signal.
5151
*
5252
* We also have to lessen UV Hub MMR accesses as much as possible as this
@@ -56,7 +56,7 @@
5656
* To do this we register our primary NMI notifier on the NMI_UNKNOWN
5757
* chain. This reduces the number of false NMI calls when the perf
5858
* tools are running which generate an enormous number of NMIs per
59-
* second (~4M/s for 1024 cpu threads). Our secondary NMI handler is
59+
* second (~4M/s for 1024 CPU threads). Our secondary NMI handler is
6060
* very short as it only checks that if it has been "pinged" with the
6161
* IPI(NMI) signal as mentioned above, and does not read the UV Hub's MMR.
6262
*
@@ -113,7 +113,7 @@ static int param_get_local64(char *buffer, const struct kernel_param *kp)
113113

114114
static int param_set_local64(const char *val, const struct kernel_param *kp)
115115
{
116-
/* clear on any write */
116+
/* Clear on any write */
117117
local64_set((local64_t *)kp->arg, 0);
118118
return 0;
119119
}
@@ -322,7 +322,7 @@ static struct init_nmi {
322322
.data = 0x0, /* ACPI Mode */
323323
},
324324

325-
/* clear status */
325+
/* Clear status: */
326326
{ /* GPI_INT_STS_GPP_D_0 */
327327
.offset = 0x104,
328328
.mask = 0x0,
@@ -344,29 +344,29 @@ static struct init_nmi {
344344
.data = 0x1, /* Clear Status */
345345
},
346346

347-
/* disable interrupts */
347+
/* Disable interrupts: */
348348
{ /* GPI_INT_EN_GPP_D_0 */
349349
.offset = 0x114,
350350
.mask = 0x1,
351-
.data = 0x0, /* disable interrupt generation */
351+
.data = 0x0, /* Disable interrupt generation */
352352
},
353353
{ /* GPI_GPE_EN_GPP_D_0 */
354354
.offset = 0x134,
355355
.mask = 0x1,
356-
.data = 0x0, /* disable interrupt generation */
356+
.data = 0x0, /* Disable interrupt generation */
357357
},
358358
{ /* GPI_SMI_EN_GPP_D_0 */
359359
.offset = 0x154,
360360
.mask = 0x1,
361-
.data = 0x0, /* disable interrupt generation */
361+
.data = 0x0, /* Disable interrupt generation */
362362
},
363363
{ /* GPI_NMI_EN_GPP_D_0 */
364364
.offset = 0x174,
365365
.mask = 0x1,
366-
.data = 0x0, /* disable interrupt generation */
366+
.data = 0x0, /* Disable interrupt generation */
367367
},
368368

369-
/* setup GPP_D_0 Pad Config */
369+
/* Setup GPP_D_0 Pad Config: */
370370
{ /* PAD_CFG_DW0_GPP_D_0 */
371371
.offset = 0x4c0,
372372
.mask = 0xffffffff,
@@ -444,7 +444,7 @@ static int uv_nmi_test_hubless(struct uv_hub_nmi_s *hub_nmi)
444444
return 0;
445445

446446
*pstat = STS_GPP_D_0_MASK; /* Is a UV NMI: clear GPP_D_0 status */
447-
(void)*pstat; /* flush write */
447+
(void)*pstat; /* Flush write */
448448

449449
return 1;
450450
}
@@ -461,8 +461,8 @@ static int uv_test_nmi(struct uv_hub_nmi_s *hub_nmi)
461461
}
462462

463463
/*
464-
* If first cpu in on this hub, set hub_nmi "in_nmi" and "owner" values and
465-
* return true. If first cpu in on the system, set global "in_nmi" flag.
464+
* If first CPU in on this hub, set hub_nmi "in_nmi" and "owner" values and
465+
* return true. If first CPU in on the system, set global "in_nmi" flag.
466466
*/
467467
static int uv_set_in_nmi(int cpu, struct uv_hub_nmi_s *hub_nmi)
468468
{
@@ -496,7 +496,7 @@ static int uv_check_nmi(struct uv_hub_nmi_s *hub_nmi)
496496
if (raw_spin_trylock(&hub_nmi->nmi_lock)) {
497497
nmi_detected = uv_test_nmi(hub_nmi);
498498

499-
/* check flag for UV external NMI */
499+
/* Check flag for UV external NMI */
500500
if (nmi_detected > 0) {
501501
uv_set_in_nmi(cpu, hub_nmi);
502502
nmi = 1;
@@ -516,7 +516,7 @@ static int uv_check_nmi(struct uv_hub_nmi_s *hub_nmi)
516516
slave_wait: cpu_relax();
517517
udelay(uv_nmi_slave_delay);
518518

519-
/* re-check hub in_nmi flag */
519+
/* Re-check hub in_nmi flag */
520520
nmi = atomic_read(&hub_nmi->in_nmi);
521521
if (nmi)
522522
break;
@@ -560,7 +560,7 @@ static inline void uv_clear_nmi(int cpu)
560560
}
561561
}
562562

563-
/* Ping non-responding cpus attemping to force them into the NMI handler */
563+
/* Ping non-responding CPU's attemping to force them into the NMI handler */
564564
static void uv_nmi_nr_cpus_ping(void)
565565
{
566566
int cpu;
@@ -571,7 +571,7 @@ static void uv_nmi_nr_cpus_ping(void)
571571
apic->send_IPI_mask(uv_nmi_cpu_mask, APIC_DM_NMI);
572572
}
573573

574-
/* Clean up flags for cpus that ignored both NMI and ping */
574+
/* Clean up flags for CPU's that ignored both NMI and ping */
575575
static void uv_nmi_cleanup_mask(void)
576576
{
577577
int cpu;
@@ -583,7 +583,7 @@ static void uv_nmi_cleanup_mask(void)
583583
}
584584
}
585585

586-
/* Loop waiting as cpus enter NMI handler */
586+
/* Loop waiting as CPU's enter NMI handler */
587587
static int uv_nmi_wait_cpus(int first)
588588
{
589589
int i, j, k, n = num_online_cpus();
@@ -597,7 +597,7 @@ static int uv_nmi_wait_cpus(int first)
597597
k = n - cpumask_weight(uv_nmi_cpu_mask);
598598
}
599599

600-
/* PCH NMI causes only one cpu to respond */
600+
/* PCH NMI causes only one CPU to respond */
601601
if (first && uv_pch_intr_now_enabled) {
602602
cpumask_clear_cpu(cpu, uv_nmi_cpu_mask);
603603
return n - k - 1;
@@ -618,13 +618,13 @@ static int uv_nmi_wait_cpus(int first)
618618
k = n;
619619
break;
620620
}
621-
if (last_k != k) { /* abort if no new cpus coming in */
621+
if (last_k != k) { /* abort if no new CPU's coming in */
622622
last_k = k;
623623
waiting = 0;
624624
} else if (++waiting > uv_nmi_wait_count)
625625
break;
626626

627-
/* extend delay if waiting only for cpu 0 */
627+
/* Extend delay if waiting only for CPU 0: */
628628
if (waiting && (n - k) == 1 &&
629629
cpumask_test_cpu(0, uv_nmi_cpu_mask))
630630
loop_delay *= 100;
@@ -635,29 +635,29 @@ static int uv_nmi_wait_cpus(int first)
635635
return n - k;
636636
}
637637

638-
/* Wait until all slave cpus have entered UV NMI handler */
638+
/* Wait until all slave CPU's have entered UV NMI handler */
639639
static void uv_nmi_wait(int master)
640640
{
641-
/* indicate this cpu is in */
641+
/* Indicate this CPU is in: */
642642
this_cpu_write(uv_cpu_nmi.state, UV_NMI_STATE_IN);
643643

644-
/* if not the first cpu in (the master), then we are a slave cpu */
644+
/* If not the first CPU in (the master), then we are a slave CPU */
645645
if (!master)
646646
return;
647647

648648
do {
649-
/* wait for all other cpus to gather here */
649+
/* Wait for all other CPU's to gather here */
650650
if (!uv_nmi_wait_cpus(1))
651651
break;
652652

653-
/* if not all made it in, send IPI NMI to them */
653+
/* If not all made it in, send IPI NMI to them */
654654
pr_alert("UV: Sending NMI IPI to %d CPUs: %*pbl\n",
655655
cpumask_weight(uv_nmi_cpu_mask),
656656
cpumask_pr_args(uv_nmi_cpu_mask));
657657

658658
uv_nmi_nr_cpus_ping();
659659

660-
/* if all cpus are in, then done */
660+
/* If all CPU's are in, then done */
661661
if (!uv_nmi_wait_cpus(0))
662662
break;
663663

@@ -709,7 +709,7 @@ static void uv_nmi_dump_state_cpu(int cpu, struct pt_regs *regs)
709709
this_cpu_write(uv_cpu_nmi.state, UV_NMI_STATE_DUMP_DONE);
710710
}
711711

712-
/* Trigger a slave cpu to dump it's state */
712+
/* Trigger a slave CPU to dump it's state */
713713
static void uv_nmi_trigger_dump(int cpu)
714714
{
715715
int retry = uv_nmi_trigger_delay;
@@ -730,7 +730,7 @@ static void uv_nmi_trigger_dump(int cpu)
730730
uv_cpu_nmi_per(cpu).state = UV_NMI_STATE_DUMP_DONE;
731731
}
732732

733-
/* Wait until all cpus ready to exit */
733+
/* Wait until all CPU's ready to exit */
734734
static void uv_nmi_sync_exit(int master)
735735
{
736736
atomic_dec(&uv_nmi_cpus_in_nmi);
@@ -760,7 +760,7 @@ static void uv_nmi_action_health(int cpu, struct pt_regs *regs, int master)
760760
uv_nmi_sync_exit(master);
761761
}
762762

763-
/* Walk through cpu list and dump state of each */
763+
/* Walk through CPU list and dump state of each */
764764
static void uv_nmi_dump_state(int cpu, struct pt_regs *regs, int master)
765765
{
766766
if (master) {
@@ -872,23 +872,23 @@ static void uv_call_kgdb_kdb(int cpu, struct pt_regs *regs, int master)
872872
if (reason < 0)
873873
return;
874874

875-
/* call KGDB NMI handler as MASTER */
875+
/* Call KGDB NMI handler as MASTER */
876876
ret = kgdb_nmicallin(cpu, X86_TRAP_NMI, regs, reason,
877877
&uv_nmi_slave_continue);
878878
if (ret) {
879879
pr_alert("KGDB returned error, is kgdboc set?\n");
880880
atomic_set(&uv_nmi_slave_continue, SLAVE_EXIT);
881881
}
882882
} else {
883-
/* wait for KGDB signal that it's ready for slaves to enter */
883+
/* Wait for KGDB signal that it's ready for slaves to enter */
884884
int sig;
885885

886886
do {
887887
cpu_relax();
888888
sig = atomic_read(&uv_nmi_slave_continue);
889889
} while (!sig);
890890

891-
/* call KGDB as slave */
891+
/* Call KGDB as slave */
892892
if (sig == SLAVE_CONTINUE)
893893
kgdb_nmicallback(cpu, regs);
894894
}
@@ -932,7 +932,7 @@ int uv_handle_nmi(unsigned int reason, struct pt_regs *regs)
932932
strncpy(uv_nmi_action, "dump", strlen(uv_nmi_action));
933933
}
934934

935-
/* Pause as all cpus enter the NMI handler */
935+
/* Pause as all CPU's enter the NMI handler */
936936
uv_nmi_wait(master);
937937

938938
/* Process actions other than "kdump": */
@@ -972,7 +972,7 @@ int uv_handle_nmi(unsigned int reason, struct pt_regs *regs)
972972
}
973973

974974
/*
975-
* NMI handler for pulling in CPUs when perf events are grabbing our NMI
975+
* NMI handler for pulling in CPU's when perf events are grabbing our NMI
976976
*/
977977
static int uv_handle_nmi_ping(unsigned int reason, struct pt_regs *regs)
978978
{
@@ -1005,7 +1005,7 @@ void uv_nmi_init(void)
10051005
unsigned int value;
10061006

10071007
/*
1008-
* Unmask NMI on all cpus
1008+
* Unmask NMI on all CPU's
10091009
*/
10101010
value = apic_read(APIC_LVT1) | APIC_DM_NMI;
10111011
value &= ~APIC_LVT_MASKED;

0 commit comments

Comments
 (0)