63
63
64
64
static struct uv_hub_nmi_s * * uv_hub_nmi_list ;
65
65
66
- DEFINE_PER_CPU (struct uv_cpu_nmi_s , __uv_cpu_nmi );
67
- EXPORT_PER_CPU_SYMBOL_GPL (__uv_cpu_nmi );
66
+ DEFINE_PER_CPU (struct uv_cpu_nmi_s , uv_cpu_nmi );
67
+ EXPORT_PER_CPU_SYMBOL_GPL (uv_cpu_nmi );
68
68
69
69
static unsigned long nmi_mmr ;
70
70
static unsigned long nmi_mmr_clear ;
@@ -215,7 +215,7 @@ static int uv_check_nmi(struct uv_hub_nmi_s *hub_nmi)
215
215
int nmi = 0 ;
216
216
217
217
local64_inc (& uv_nmi_count );
218
- uv_cpu_nmi .queries ++ ;
218
+ this_cpu_inc ( uv_cpu_nmi .queries ) ;
219
219
220
220
do {
221
221
nmi = atomic_read (& hub_nmi -> in_nmi );
@@ -293,7 +293,7 @@ static void uv_nmi_nr_cpus_ping(void)
293
293
int cpu ;
294
294
295
295
for_each_cpu (cpu , uv_nmi_cpu_mask )
296
- atomic_set ( & uv_cpu_nmi_per (cpu ).pinging , 1 ) ;
296
+ uv_cpu_nmi_per (cpu ).pinging = 1 ;
297
297
298
298
apic -> send_IPI_mask (uv_nmi_cpu_mask , APIC_DM_NMI );
299
299
}
@@ -304,8 +304,8 @@ static void uv_nmi_cleanup_mask(void)
304
304
int cpu ;
305
305
306
306
for_each_cpu (cpu , uv_nmi_cpu_mask ) {
307
- atomic_set ( & uv_cpu_nmi_per (cpu ).pinging , 0 ) ;
308
- atomic_set ( & uv_cpu_nmi_per (cpu ).state , UV_NMI_STATE_OUT ) ;
307
+ uv_cpu_nmi_per (cpu ).pinging = 0 ;
308
+ uv_cpu_nmi_per (cpu ).state = UV_NMI_STATE_OUT ;
309
309
cpumask_clear_cpu (cpu , uv_nmi_cpu_mask );
310
310
}
311
311
}
@@ -328,7 +328,7 @@ static int uv_nmi_wait_cpus(int first)
328
328
int loop_delay = uv_nmi_loop_delay ;
329
329
330
330
for_each_cpu (j , uv_nmi_cpu_mask ) {
331
- if (atomic_read ( & uv_cpu_nmi_per (j ).state ) ) {
331
+ if (uv_cpu_nmi_per (j ).state ) {
332
332
cpumask_clear_cpu (j , uv_nmi_cpu_mask );
333
333
if (++ k >= n )
334
334
break ;
@@ -359,7 +359,7 @@ static int uv_nmi_wait_cpus(int first)
359
359
static void uv_nmi_wait (int master )
360
360
{
361
361
/* indicate this cpu is in */
362
- atomic_set ( & uv_cpu_nmi .state , UV_NMI_STATE_IN );
362
+ this_cpu_write ( uv_cpu_nmi .state , UV_NMI_STATE_IN );
363
363
364
364
/* if not the first cpu in (the master), then we are a slave cpu */
365
365
if (!master )
@@ -419,28 +419,28 @@ static void uv_nmi_dump_state_cpu(int cpu, struct pt_regs *regs)
419
419
"UV:%sNMI process trace for CPU %d\n" , dots , cpu );
420
420
show_regs (regs );
421
421
}
422
- atomic_set ( & uv_cpu_nmi .state , UV_NMI_STATE_DUMP_DONE );
422
+ this_cpu_write ( uv_cpu_nmi .state , UV_NMI_STATE_DUMP_DONE );
423
423
}
424
424
425
425
/* Trigger a slave cpu to dump it's state */
426
426
static void uv_nmi_trigger_dump (int cpu )
427
427
{
428
428
int retry = uv_nmi_trigger_delay ;
429
429
430
- if (atomic_read ( & uv_cpu_nmi_per (cpu ).state ) != UV_NMI_STATE_IN )
430
+ if (uv_cpu_nmi_per (cpu ).state != UV_NMI_STATE_IN )
431
431
return ;
432
432
433
- atomic_set ( & uv_cpu_nmi_per (cpu ).state , UV_NMI_STATE_DUMP ) ;
433
+ uv_cpu_nmi_per (cpu ).state = UV_NMI_STATE_DUMP ;
434
434
do {
435
435
cpu_relax ();
436
436
udelay (10 );
437
- if (atomic_read ( & uv_cpu_nmi_per (cpu ).state )
437
+ if (uv_cpu_nmi_per (cpu ).state
438
438
!= UV_NMI_STATE_DUMP )
439
439
return ;
440
440
} while (-- retry > 0 );
441
441
442
442
pr_crit ("UV: CPU %d stuck in process dump function\n" , cpu );
443
- atomic_set ( & uv_cpu_nmi_per (cpu ).state , UV_NMI_STATE_DUMP_DONE ) ;
443
+ uv_cpu_nmi_per (cpu ).state = UV_NMI_STATE_DUMP_DONE ;
444
444
}
445
445
446
446
/* Wait until all cpus ready to exit */
@@ -488,7 +488,7 @@ static void uv_nmi_dump_state(int cpu, struct pt_regs *regs, int master)
488
488
} else {
489
489
while (!atomic_read (& uv_nmi_slave_continue ))
490
490
cpu_relax ();
491
- while (atomic_read ( & uv_cpu_nmi .state ) != UV_NMI_STATE_DUMP )
491
+ while (this_cpu_read ( uv_cpu_nmi .state ) != UV_NMI_STATE_DUMP )
492
492
cpu_relax ();
493
493
uv_nmi_dump_state_cpu (cpu , regs );
494
494
}
@@ -615,7 +615,7 @@ int uv_handle_nmi(unsigned int reason, struct pt_regs *regs)
615
615
local_irq_save (flags );
616
616
617
617
/* If not a UV System NMI, ignore */
618
- if (!atomic_read ( & uv_cpu_nmi .pinging ) && !uv_check_nmi (hub_nmi )) {
618
+ if (!this_cpu_read ( uv_cpu_nmi .pinging ) && !uv_check_nmi (hub_nmi )) {
619
619
local_irq_restore (flags );
620
620
return NMI_DONE ;
621
621
}
@@ -639,7 +639,7 @@ int uv_handle_nmi(unsigned int reason, struct pt_regs *regs)
639
639
uv_call_kgdb_kdb (cpu , regs , master );
640
640
641
641
/* Clear per_cpu "in nmi" flag */
642
- atomic_set ( & uv_cpu_nmi .state , UV_NMI_STATE_OUT );
642
+ this_cpu_write ( uv_cpu_nmi .state , UV_NMI_STATE_OUT );
643
643
644
644
/* Clear MMR NMI flag on each hub */
645
645
uv_clear_nmi (cpu );
@@ -666,16 +666,16 @@ static int uv_handle_nmi_ping(unsigned int reason, struct pt_regs *regs)
666
666
{
667
667
int ret ;
668
668
669
- uv_cpu_nmi .queries ++ ;
670
- if (!atomic_read ( & uv_cpu_nmi .pinging )) {
669
+ this_cpu_inc ( uv_cpu_nmi .queries ) ;
670
+ if (!this_cpu_read ( uv_cpu_nmi .pinging )) {
671
671
local64_inc (& uv_nmi_ping_misses );
672
672
return NMI_DONE ;
673
673
}
674
674
675
- uv_cpu_nmi .pings ++ ;
675
+ this_cpu_inc ( uv_cpu_nmi .pings ) ;
676
676
local64_inc (& uv_nmi_ping_count );
677
677
ret = uv_handle_nmi (reason , regs );
678
- atomic_set ( & uv_cpu_nmi .pinging , 0 );
678
+ this_cpu_write ( uv_cpu_nmi .pinging , 0 );
679
679
return ret ;
680
680
}
681
681
0 commit comments