45
45
*
46
46
* Handle system-wide NMI events generated by the global 'power nmi' command.
47
47
*
48
- * Basic operation is to field the NMI interrupt on each cpu and wait
49
- * until all cpus have arrived into the nmi handler. If some cpus do not
48
+ * Basic operation is to field the NMI interrupt on each CPU and wait
49
+ * until all CPU's have arrived into the nmi handler. If some CPU's do not
50
50
* make it into the handler, try and force them in with the IPI(NMI) signal.
51
51
*
52
52
* We also have to lessen UV Hub MMR accesses as much as possible as this
56
56
* To do this we register our primary NMI notifier on the NMI_UNKNOWN
57
57
* chain. This reduces the number of false NMI calls when the perf
58
58
* tools are running which generate an enormous number of NMIs per
59
- * second (~4M/s for 1024 cpu threads). Our secondary NMI handler is
59
+ * second (~4M/s for 1024 CPU threads). Our secondary NMI handler is
60
60
* very short as it only checks that if it has been "pinged" with the
61
61
* IPI(NMI) signal as mentioned above, and does not read the UV Hub's MMR.
62
62
*
@@ -113,7 +113,7 @@ static int param_get_local64(char *buffer, const struct kernel_param *kp)
113
113
114
114
static int param_set_local64 (const char * val , const struct kernel_param * kp )
115
115
{
116
- /* clear on any write */
116
+ /* Clear on any write */
117
117
local64_set ((local64_t * )kp -> arg , 0 );
118
118
return 0 ;
119
119
}
@@ -322,7 +322,7 @@ static struct init_nmi {
322
322
.data = 0x0 , /* ACPI Mode */
323
323
},
324
324
325
- /* clear status */
325
+ /* Clear status: */
326
326
{ /* GPI_INT_STS_GPP_D_0 */
327
327
.offset = 0x104 ,
328
328
.mask = 0x0 ,
@@ -344,29 +344,29 @@ static struct init_nmi {
344
344
.data = 0x1 , /* Clear Status */
345
345
},
346
346
347
- /* disable interrupts */
347
+ /* Disable interrupts: */
348
348
{ /* GPI_INT_EN_GPP_D_0 */
349
349
.offset = 0x114 ,
350
350
.mask = 0x1 ,
351
- .data = 0x0 , /* disable interrupt generation */
351
+ .data = 0x0 , /* Disable interrupt generation */
352
352
},
353
353
{ /* GPI_GPE_EN_GPP_D_0 */
354
354
.offset = 0x134 ,
355
355
.mask = 0x1 ,
356
- .data = 0x0 , /* disable interrupt generation */
356
+ .data = 0x0 , /* Disable interrupt generation */
357
357
},
358
358
{ /* GPI_SMI_EN_GPP_D_0 */
359
359
.offset = 0x154 ,
360
360
.mask = 0x1 ,
361
- .data = 0x0 , /* disable interrupt generation */
361
+ .data = 0x0 , /* Disable interrupt generation */
362
362
},
363
363
{ /* GPI_NMI_EN_GPP_D_0 */
364
364
.offset = 0x174 ,
365
365
.mask = 0x1 ,
366
- .data = 0x0 , /* disable interrupt generation */
366
+ .data = 0x0 , /* Disable interrupt generation */
367
367
},
368
368
369
- /* setup GPP_D_0 Pad Config */
369
+ /* Setup GPP_D_0 Pad Config: */
370
370
{ /* PAD_CFG_DW0_GPP_D_0 */
371
371
.offset = 0x4c0 ,
372
372
.mask = 0xffffffff ,
@@ -444,7 +444,7 @@ static int uv_nmi_test_hubless(struct uv_hub_nmi_s *hub_nmi)
444
444
return 0 ;
445
445
446
446
* pstat = STS_GPP_D_0_MASK ; /* Is a UV NMI: clear GPP_D_0 status */
447
- (void )* pstat ; /* flush write */
447
+ (void )* pstat ; /* Flush write */
448
448
449
449
return 1 ;
450
450
}
@@ -461,8 +461,8 @@ static int uv_test_nmi(struct uv_hub_nmi_s *hub_nmi)
461
461
}
462
462
463
463
/*
464
- * If first cpu in on this hub, set hub_nmi "in_nmi" and "owner" values and
465
- * return true. If first cpu in on the system, set global "in_nmi" flag.
464
+ * If first CPU in on this hub, set hub_nmi "in_nmi" and "owner" values and
465
+ * return true. If first CPU in on the system, set global "in_nmi" flag.
466
466
*/
467
467
static int uv_set_in_nmi (int cpu , struct uv_hub_nmi_s * hub_nmi )
468
468
{
@@ -496,7 +496,7 @@ static int uv_check_nmi(struct uv_hub_nmi_s *hub_nmi)
496
496
if (raw_spin_trylock (& hub_nmi -> nmi_lock )) {
497
497
nmi_detected = uv_test_nmi (hub_nmi );
498
498
499
- /* check flag for UV external NMI */
499
+ /* Check flag for UV external NMI */
500
500
if (nmi_detected > 0 ) {
501
501
uv_set_in_nmi (cpu , hub_nmi );
502
502
nmi = 1 ;
@@ -516,7 +516,7 @@ static int uv_check_nmi(struct uv_hub_nmi_s *hub_nmi)
516
516
slave_wait : cpu_relax ();
517
517
udelay (uv_nmi_slave_delay );
518
518
519
- /* re -check hub in_nmi flag */
519
+ /* Re -check hub in_nmi flag */
520
520
nmi = atomic_read (& hub_nmi -> in_nmi );
521
521
if (nmi )
522
522
break ;
@@ -560,7 +560,7 @@ static inline void uv_clear_nmi(int cpu)
560
560
}
561
561
}
562
562
563
- /* Ping non-responding cpus attemping to force them into the NMI handler */
563
+ /* Ping non-responding CPU's attemping to force them into the NMI handler */
564
564
static void uv_nmi_nr_cpus_ping (void )
565
565
{
566
566
int cpu ;
@@ -571,7 +571,7 @@ static void uv_nmi_nr_cpus_ping(void)
571
571
apic -> send_IPI_mask (uv_nmi_cpu_mask , APIC_DM_NMI );
572
572
}
573
573
574
- /* Clean up flags for cpus that ignored both NMI and ping */
574
+ /* Clean up flags for CPU's that ignored both NMI and ping */
575
575
static void uv_nmi_cleanup_mask (void )
576
576
{
577
577
int cpu ;
@@ -583,7 +583,7 @@ static void uv_nmi_cleanup_mask(void)
583
583
}
584
584
}
585
585
586
- /* Loop waiting as cpus enter NMI handler */
586
+ /* Loop waiting as CPU's enter NMI handler */
587
587
static int uv_nmi_wait_cpus (int first )
588
588
{
589
589
int i , j , k , n = num_online_cpus ();
@@ -597,7 +597,7 @@ static int uv_nmi_wait_cpus(int first)
597
597
k = n - cpumask_weight (uv_nmi_cpu_mask );
598
598
}
599
599
600
- /* PCH NMI causes only one cpu to respond */
600
+ /* PCH NMI causes only one CPU to respond */
601
601
if (first && uv_pch_intr_now_enabled ) {
602
602
cpumask_clear_cpu (cpu , uv_nmi_cpu_mask );
603
603
return n - k - 1 ;
@@ -618,13 +618,13 @@ static int uv_nmi_wait_cpus(int first)
618
618
k = n ;
619
619
break ;
620
620
}
621
- if (last_k != k ) { /* abort if no new cpus coming in */
621
+ if (last_k != k ) { /* abort if no new CPU's coming in */
622
622
last_k = k ;
623
623
waiting = 0 ;
624
624
} else if (++ waiting > uv_nmi_wait_count )
625
625
break ;
626
626
627
- /* extend delay if waiting only for cpu 0 */
627
+ /* Extend delay if waiting only for CPU 0: */
628
628
if (waiting && (n - k ) == 1 &&
629
629
cpumask_test_cpu (0 , uv_nmi_cpu_mask ))
630
630
loop_delay *= 100 ;
@@ -635,29 +635,29 @@ static int uv_nmi_wait_cpus(int first)
635
635
return n - k ;
636
636
}
637
637
638
- /* Wait until all slave cpus have entered UV NMI handler */
638
+ /* Wait until all slave CPU's have entered UV NMI handler */
639
639
static void uv_nmi_wait (int master )
640
640
{
641
- /* indicate this cpu is in */
641
+ /* Indicate this CPU is in: */
642
642
this_cpu_write (uv_cpu_nmi .state , UV_NMI_STATE_IN );
643
643
644
- /* if not the first cpu in (the master), then we are a slave cpu */
644
+ /* If not the first CPU in (the master), then we are a slave CPU */
645
645
if (!master )
646
646
return ;
647
647
648
648
do {
649
- /* wait for all other cpus to gather here */
649
+ /* Wait for all other CPU's to gather here */
650
650
if (!uv_nmi_wait_cpus (1 ))
651
651
break ;
652
652
653
- /* if not all made it in, send IPI NMI to them */
653
+ /* If not all made it in, send IPI NMI to them */
654
654
pr_alert ("UV: Sending NMI IPI to %d CPUs: %*pbl\n" ,
655
655
cpumask_weight (uv_nmi_cpu_mask ),
656
656
cpumask_pr_args (uv_nmi_cpu_mask ));
657
657
658
658
uv_nmi_nr_cpus_ping ();
659
659
660
- /* if all cpus are in, then done */
660
+ /* If all CPU's are in, then done */
661
661
if (!uv_nmi_wait_cpus (0 ))
662
662
break ;
663
663
@@ -709,7 +709,7 @@ static void uv_nmi_dump_state_cpu(int cpu, struct pt_regs *regs)
709
709
this_cpu_write (uv_cpu_nmi .state , UV_NMI_STATE_DUMP_DONE );
710
710
}
711
711
712
- /* Trigger a slave cpu to dump it's state */
712
+ /* Trigger a slave CPU to dump it's state */
713
713
static void uv_nmi_trigger_dump (int cpu )
714
714
{
715
715
int retry = uv_nmi_trigger_delay ;
@@ -730,7 +730,7 @@ static void uv_nmi_trigger_dump(int cpu)
730
730
uv_cpu_nmi_per (cpu ).state = UV_NMI_STATE_DUMP_DONE ;
731
731
}
732
732
733
- /* Wait until all cpus ready to exit */
733
+ /* Wait until all CPU's ready to exit */
734
734
static void uv_nmi_sync_exit (int master )
735
735
{
736
736
atomic_dec (& uv_nmi_cpus_in_nmi );
@@ -760,7 +760,7 @@ static void uv_nmi_action_health(int cpu, struct pt_regs *regs, int master)
760
760
uv_nmi_sync_exit (master );
761
761
}
762
762
763
- /* Walk through cpu list and dump state of each */
763
+ /* Walk through CPU list and dump state of each */
764
764
static void uv_nmi_dump_state (int cpu , struct pt_regs * regs , int master )
765
765
{
766
766
if (master ) {
@@ -872,23 +872,23 @@ static void uv_call_kgdb_kdb(int cpu, struct pt_regs *regs, int master)
872
872
if (reason < 0 )
873
873
return ;
874
874
875
- /* call KGDB NMI handler as MASTER */
875
+ /* Call KGDB NMI handler as MASTER */
876
876
ret = kgdb_nmicallin (cpu , X86_TRAP_NMI , regs , reason ,
877
877
& uv_nmi_slave_continue );
878
878
if (ret ) {
879
879
pr_alert ("KGDB returned error, is kgdboc set?\n" );
880
880
atomic_set (& uv_nmi_slave_continue , SLAVE_EXIT );
881
881
}
882
882
} else {
883
- /* wait for KGDB signal that it's ready for slaves to enter */
883
+ /* Wait for KGDB signal that it's ready for slaves to enter */
884
884
int sig ;
885
885
886
886
do {
887
887
cpu_relax ();
888
888
sig = atomic_read (& uv_nmi_slave_continue );
889
889
} while (!sig );
890
890
891
- /* call KGDB as slave */
891
+ /* Call KGDB as slave */
892
892
if (sig == SLAVE_CONTINUE )
893
893
kgdb_nmicallback (cpu , regs );
894
894
}
@@ -932,7 +932,7 @@ int uv_handle_nmi(unsigned int reason, struct pt_regs *regs)
932
932
strncpy (uv_nmi_action , "dump" , strlen (uv_nmi_action ));
933
933
}
934
934
935
- /* Pause as all cpus enter the NMI handler */
935
+ /* Pause as all CPU's enter the NMI handler */
936
936
uv_nmi_wait (master );
937
937
938
938
/* Process actions other than "kdump": */
@@ -972,7 +972,7 @@ int uv_handle_nmi(unsigned int reason, struct pt_regs *regs)
972
972
}
973
973
974
974
/*
975
- * NMI handler for pulling in CPUs when perf events are grabbing our NMI
975
+ * NMI handler for pulling in CPU's when perf events are grabbing our NMI
976
976
*/
977
977
static int uv_handle_nmi_ping (unsigned int reason , struct pt_regs * regs )
978
978
{
@@ -1005,7 +1005,7 @@ void uv_nmi_init(void)
1005
1005
unsigned int value ;
1006
1006
1007
1007
/*
1008
- * Unmask NMI on all cpus
1008
+ * Unmask NMI on all CPU's
1009
1009
*/
1010
1010
value = apic_read (APIC_LVT1 ) | APIC_DM_NMI ;
1011
1011
value &= ~APIC_LVT_MASKED ;
0 commit comments