@@ -124,9 +124,9 @@ static const u64 p6_perfmon_event_map[] =
124
124
[PERF_COUNT_HW_BUS_CYCLES ] = 0x0062 ,
125
125
};
126
126
127
- static u64 p6_pmu_event_map (int event )
127
+ static u64 p6_pmu_event_map (int hw_event )
128
128
{
129
- return p6_perfmon_event_map [event ];
129
+ return p6_perfmon_event_map [hw_event ];
130
130
}
131
131
132
132
/*
@@ -137,7 +137,7 @@ static u64 p6_pmu_event_map(int event)
137
137
*/
138
138
#define P6_NOP_COUNTER 0x0000002EULL
139
139
140
- static u64 p6_pmu_raw_event (u64 event )
140
+ static u64 p6_pmu_raw_event (u64 hw_event )
141
141
{
142
142
#define P6_EVNTSEL_EVENT_MASK 0x000000FFULL
143
143
#define P6_EVNTSEL_UNIT_MASK 0x0000FF00ULL
@@ -152,7 +152,7 @@ static u64 p6_pmu_raw_event(u64 event)
152
152
P6_EVNTSEL_INV_MASK | \
153
153
P6_EVNTSEL_COUNTER_MASK)
154
154
155
- return event & P6_EVNTSEL_MASK ;
155
+ return hw_event & P6_EVNTSEL_MASK ;
156
156
}
157
157
158
158
@@ -170,16 +170,16 @@ static const u64 intel_perfmon_event_map[] =
170
170
[PERF_COUNT_HW_BUS_CYCLES ] = 0x013c ,
171
171
};
172
172
173
- static u64 intel_pmu_event_map (int event )
173
+ static u64 intel_pmu_event_map (int hw_event )
174
174
{
175
- return intel_perfmon_event_map [event ];
175
+ return intel_perfmon_event_map [hw_event ];
176
176
}
177
177
178
178
/*
179
- * Generalized hw caching related event table, filled
179
+ * Generalized hw caching related hw_event table, filled
180
180
* in on a per model basis. A value of 0 means
181
- * 'not supported', -1 means 'event makes no sense on
182
- * this CPU', any other value means the raw event
181
+ * 'not supported', -1 means 'hw_event makes no sense on
182
+ * this CPU', any other value means the raw hw_event
183
183
* ID.
184
184
*/
185
185
@@ -463,7 +463,7 @@ static const u64 atom_hw_cache_event_ids
463
463
},
464
464
};
465
465
466
- static u64 intel_pmu_raw_event (u64 event )
466
+ static u64 intel_pmu_raw_event (u64 hw_event )
467
467
{
468
468
#define CORE_EVNTSEL_EVENT_MASK 0x000000FFULL
469
469
#define CORE_EVNTSEL_UNIT_MASK 0x0000FF00ULL
@@ -478,7 +478,7 @@ static u64 intel_pmu_raw_event(u64 event)
478
478
CORE_EVNTSEL_INV_MASK | \
479
479
CORE_EVNTSEL_COUNTER_MASK)
480
480
481
- return event & CORE_EVNTSEL_MASK ;
481
+ return hw_event & CORE_EVNTSEL_MASK ;
482
482
}
483
483
484
484
static const u64 amd_hw_cache_event_ids
@@ -585,12 +585,12 @@ static const u64 amd_perfmon_event_map[] =
585
585
[PERF_COUNT_HW_BRANCH_MISSES ] = 0x00c5 ,
586
586
};
587
587
588
- static u64 amd_pmu_event_map (int event )
588
+ static u64 amd_pmu_event_map (int hw_event )
589
589
{
590
- return amd_perfmon_event_map [event ];
590
+ return amd_perfmon_event_map [hw_event ];
591
591
}
592
592
593
- static u64 amd_pmu_raw_event (u64 event )
593
+ static u64 amd_pmu_raw_event (u64 hw_event )
594
594
{
595
595
#define K7_EVNTSEL_EVENT_MASK 0x7000000FFULL
596
596
#define K7_EVNTSEL_UNIT_MASK 0x00000FF00ULL
@@ -605,7 +605,7 @@ static u64 amd_pmu_raw_event(u64 event)
605
605
K7_EVNTSEL_INV_MASK | \
606
606
K7_EVNTSEL_COUNTER_MASK)
607
607
608
- return event & K7_EVNTSEL_MASK ;
608
+ return hw_event & K7_EVNTSEL_MASK ;
609
609
}
610
610
611
611
/*
@@ -956,7 +956,7 @@ static int __hw_perf_counter_init(struct perf_counter *counter)
956
956
}
957
957
958
958
/*
959
- * Raw event type provide the config in the event structure
959
+ * Raw hw_event type provide the config in the hw_event structure
960
960
*/
961
961
if (attr -> type == PERF_TYPE_RAW ) {
962
962
hwc -> config |= x86_pmu .raw_event (attr -> config );
@@ -1245,7 +1245,7 @@ x86_perf_counter_set_period(struct perf_counter *counter,
1245
1245
ret = 1 ;
1246
1246
}
1247
1247
/*
1248
- * Quirk: certain CPUs dont like it if just 1 event is left:
1248
+ * Quirk: certain CPUs dont like it if just 1 hw_event is left:
1249
1249
*/
1250
1250
if (unlikely (left < 2 ))
1251
1251
left = 2 ;
@@ -1337,23 +1337,23 @@ static void amd_pmu_enable_counter(struct hw_perf_counter *hwc, int idx)
1337
1337
static int
1338
1338
fixed_mode_idx (struct perf_counter * counter , struct hw_perf_counter * hwc )
1339
1339
{
1340
- unsigned int event ;
1340
+ unsigned int hw_event ;
1341
1341
1342
- event = hwc -> config & ARCH_PERFMON_EVENT_MASK ;
1342
+ hw_event = hwc -> config & ARCH_PERFMON_EVENT_MASK ;
1343
1343
1344
- if (unlikely ((event ==
1344
+ if (unlikely ((hw_event ==
1345
1345
x86_pmu .event_map (PERF_COUNT_HW_BRANCH_INSTRUCTIONS )) &&
1346
1346
(hwc -> sample_period == 1 )))
1347
1347
return X86_PMC_IDX_FIXED_BTS ;
1348
1348
1349
1349
if (!x86_pmu .num_counters_fixed )
1350
1350
return -1 ;
1351
1351
1352
- if (unlikely (event == x86_pmu .event_map (PERF_COUNT_HW_INSTRUCTIONS )))
1352
+ if (unlikely (hw_event == x86_pmu .event_map (PERF_COUNT_HW_INSTRUCTIONS )))
1353
1353
return X86_PMC_IDX_FIXED_INSTRUCTIONS ;
1354
- if (unlikely (event == x86_pmu .event_map (PERF_COUNT_HW_CPU_CYCLES )))
1354
+ if (unlikely (hw_event == x86_pmu .event_map (PERF_COUNT_HW_CPU_CYCLES )))
1355
1355
return X86_PMC_IDX_FIXED_CPU_CYCLES ;
1356
- if (unlikely (event == x86_pmu .event_map (PERF_COUNT_HW_BUS_CYCLES )))
1356
+ if (unlikely (hw_event == x86_pmu .event_map (PERF_COUNT_HW_BUS_CYCLES )))
1357
1357
return X86_PMC_IDX_FIXED_BUS_CYCLES ;
1358
1358
1359
1359
return -1 ;
@@ -1970,7 +1970,7 @@ static int intel_pmu_init(void)
1970
1970
1971
1971
/*
1972
1972
* Check whether the Architectural PerfMon supports
1973
- * Branch Misses Retired Event or not.
1973
+ * Branch Misses Retired hw_event or not.
1974
1974
*/
1975
1975
cpuid (10 , & eax .full , & ebx , & unused , & edx .full );
1976
1976
if (eax .split .mask_length <= ARCH_PERFMON_BRANCH_MISSES_RETIRED )
0 commit comments