@@ -30,8 +30,8 @@ struct cpu_hw_events {
30
30
u64 events [MAX_HWEVENTS ];
31
31
unsigned int flags [MAX_HWEVENTS ];
32
32
unsigned long mmcr [3 ];
33
- struct perf_event * limited_event [ MAX_LIMITED_HWEVENTS ];
34
- u8 limited_hwidx [MAX_LIMITED_HWEVENTS ];
33
+ struct perf_event * limited_counter [ MAX_LIMITED_HWCOUNTERS ];
34
+ u8 limited_hwidx [MAX_LIMITED_HWCOUNTERS ];
35
35
u64 alternatives [MAX_HWEVENTS ][MAX_EVENT_ALTERNATIVES ];
36
36
unsigned long amasks [MAX_HWEVENTS ][MAX_EVENT_ALTERNATIVES ];
37
37
unsigned long avalues [MAX_HWEVENTS ][MAX_EVENT_ALTERNATIVES ];
@@ -253,7 +253,7 @@ static int power_check_constraints(struct cpu_hw_events *cpuhw,
253
253
unsigned long addf = ppmu -> add_fields ;
254
254
unsigned long tadd = ppmu -> test_adder ;
255
255
256
- if (n_ev > ppmu -> n_event )
256
+ if (n_ev > ppmu -> n_counter )
257
257
return -1 ;
258
258
259
259
/* First see if the events will go on as-is */
@@ -426,15 +426,15 @@ static int is_limited_pmc(int pmcnum)
426
426
&& (pmcnum == 5 || pmcnum == 6 );
427
427
}
428
428
429
- static void freeze_limited_events (struct cpu_hw_events * cpuhw ,
429
+ static void freeze_limited_counters (struct cpu_hw_events * cpuhw ,
430
430
unsigned long pmc5 , unsigned long pmc6 )
431
431
{
432
432
struct perf_event * event ;
433
433
u64 val , prev , delta ;
434
434
int i ;
435
435
436
436
for (i = 0 ; i < cpuhw -> n_limited ; ++ i ) {
437
- event = cpuhw -> limited_event [i ];
437
+ event = cpuhw -> limited_counter [i ];
438
438
if (!event -> hw .idx )
439
439
continue ;
440
440
val = (event -> hw .idx == 5 ) ? pmc5 : pmc6 ;
@@ -445,15 +445,15 @@ static void freeze_limited_events(struct cpu_hw_events *cpuhw,
445
445
}
446
446
}
447
447
448
- static void thaw_limited_events (struct cpu_hw_events * cpuhw ,
448
+ static void thaw_limited_counters (struct cpu_hw_events * cpuhw ,
449
449
unsigned long pmc5 , unsigned long pmc6 )
450
450
{
451
451
struct perf_event * event ;
452
452
u64 val ;
453
453
int i ;
454
454
455
455
for (i = 0 ; i < cpuhw -> n_limited ; ++ i ) {
456
- event = cpuhw -> limited_event [i ];
456
+ event = cpuhw -> limited_counter [i ];
457
457
event -> hw .idx = cpuhw -> limited_hwidx [i ];
458
458
val = (event -> hw .idx == 5 ) ? pmc5 : pmc6 ;
459
459
atomic64_set (& event -> hw .prev_count , val );
@@ -495,9 +495,9 @@ static void write_mmcr0(struct cpu_hw_events *cpuhw, unsigned long mmcr0)
495
495
"i" (SPRN_PMC5 ), "i" (SPRN_PMC6 ));
496
496
497
497
if (mmcr0 & MMCR0_FC )
498
- freeze_limited_events (cpuhw , pmc5 , pmc6 );
498
+ freeze_limited_counters (cpuhw , pmc5 , pmc6 );
499
499
else
500
- thaw_limited_events (cpuhw , pmc5 , pmc6 );
500
+ thaw_limited_counters (cpuhw , pmc5 , pmc6 );
501
501
502
502
/*
503
503
* Write the full MMCR0 including the event overflow interrupt
@@ -653,7 +653,7 @@ void hw_perf_enable(void)
653
653
continue ;
654
654
idx = hwc_index [i ] + 1 ;
655
655
if (is_limited_pmc (idx )) {
656
- cpuhw -> limited_event [n_lim ] = event ;
656
+ cpuhw -> limited_counter [n_lim ] = event ;
657
657
cpuhw -> limited_hwidx [n_lim ] = idx ;
658
658
++ n_lim ;
659
659
continue ;
@@ -702,7 +702,7 @@ static int collect_events(struct perf_event *group, int max_count,
702
702
flags [n ] = group -> hw .event_base ;
703
703
events [n ++ ] = group -> hw .config ;
704
704
}
705
- list_for_each_entry (event , & group -> sibling_list , list_entry ) {
705
+ list_for_each_entry (event , & group -> sibling_list , group_entry ) {
706
706
if (!is_software_event (event ) &&
707
707
event -> state != PERF_EVENT_STATE_OFF ) {
708
708
if (n >= max_count )
@@ -742,7 +742,7 @@ int hw_perf_group_sched_in(struct perf_event *group_leader,
742
742
return 0 ;
743
743
cpuhw = & __get_cpu_var (cpu_hw_events );
744
744
n0 = cpuhw -> n_events ;
745
- n = collect_events (group_leader , ppmu -> n_event - n0 ,
745
+ n = collect_events (group_leader , ppmu -> n_counter - n0 ,
746
746
& cpuhw -> event [n0 ], & cpuhw -> events [n0 ],
747
747
& cpuhw -> flags [n0 ]);
748
748
if (n < 0 )
@@ -764,7 +764,7 @@ int hw_perf_group_sched_in(struct perf_event *group_leader,
764
764
cpuctx -> active_oncpu += n ;
765
765
n = 1 ;
766
766
event_sched_in (group_leader , cpu );
767
- list_for_each_entry (sub , & group_leader -> sibling_list , list_entry ) {
767
+ list_for_each_entry (sub , & group_leader -> sibling_list , group_entry ) {
768
768
if (sub -> state != PERF_EVENT_STATE_OFF ) {
769
769
event_sched_in (sub , cpu );
770
770
++ n ;
@@ -797,7 +797,7 @@ static int power_pmu_enable(struct perf_event *event)
797
797
*/
798
798
cpuhw = & __get_cpu_var (cpu_hw_events );
799
799
n0 = cpuhw -> n_events ;
800
- if (n0 >= ppmu -> n_event )
800
+ if (n0 >= ppmu -> n_counter )
801
801
goto out ;
802
802
cpuhw -> event [n0 ] = event ;
803
803
cpuhw -> events [n0 ] = event -> hw .config ;
@@ -848,11 +848,11 @@ static void power_pmu_disable(struct perf_event *event)
848
848
}
849
849
}
850
850
for (i = 0 ; i < cpuhw -> n_limited ; ++ i )
851
- if (event == cpuhw -> limited_event [i ])
851
+ if (event == cpuhw -> limited_counter [i ])
852
852
break ;
853
853
if (i < cpuhw -> n_limited ) {
854
854
while (++ i < cpuhw -> n_limited ) {
855
- cpuhw -> limited_event [i - 1 ] = cpuhw -> limited_event [i ];
855
+ cpuhw -> limited_counter [i - 1 ] = cpuhw -> limited_counter [i ];
856
856
cpuhw -> limited_hwidx [i - 1 ] = cpuhw -> limited_hwidx [i ];
857
857
}
858
858
-- cpuhw -> n_limited ;
@@ -1078,7 +1078,7 @@ const struct pmu *hw_perf_event_init(struct perf_event *event)
1078
1078
*/
1079
1079
n = 0 ;
1080
1080
if (event -> group_leader != event ) {
1081
- n = collect_events (event -> group_leader , ppmu -> n_event - 1 ,
1081
+ n = collect_events (event -> group_leader , ppmu -> n_counter - 1 ,
1082
1082
ctrs , events , cflags );
1083
1083
if (n < 0 )
1084
1084
return ERR_PTR (- EINVAL );
@@ -1230,7 +1230,7 @@ static void perf_event_interrupt(struct pt_regs *regs)
1230
1230
int nmi ;
1231
1231
1232
1232
if (cpuhw -> n_limited )
1233
- freeze_limited_events (cpuhw , mfspr (SPRN_PMC5 ),
1233
+ freeze_limited_counters (cpuhw , mfspr (SPRN_PMC5 ),
1234
1234
mfspr (SPRN_PMC6 ));
1235
1235
1236
1236
perf_read_regs (regs );
@@ -1260,7 +1260,7 @@ static void perf_event_interrupt(struct pt_regs *regs)
1260
1260
* Any that we processed in the previous loop will not be negative.
1261
1261
*/
1262
1262
if (!found ) {
1263
- for (i = 0 ; i < ppmu -> n_event ; ++ i ) {
1263
+ for (i = 0 ; i < ppmu -> n_counter ; ++ i ) {
1264
1264
if (is_limited_pmc (i + 1 ))
1265
1265
continue ;
1266
1266
val = read_pmc (i + 1 );
0 commit comments