Skip to content

Commit dfc6509

Browse files
author
Ingo Molnar
committed
perf_counter: Rename 'event' to event_id/hw_event
In preparation to the renames, to avoid a namespace clash. Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Paul Mackerras <paulus@samba.org> Cc: Frederic Weisbecker <fweisbec@gmail.com> LKML-Reference: <new-submission> Signed-off-by: Ingo Molnar <mingo@elte.hu>
1 parent 65abc86 commit dfc6509

File tree

2 files changed

+37
-37
lines changed

2 files changed

+37
-37
lines changed

arch/x86/kernel/cpu/perf_counter.c

Lines changed: 24 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -124,9 +124,9 @@ static const u64 p6_perfmon_event_map[] =
124124
[PERF_COUNT_HW_BUS_CYCLES] = 0x0062,
125125
};
126126

127-
static u64 p6_pmu_event_map(int event)
127+
static u64 p6_pmu_event_map(int hw_event)
128128
{
129-
return p6_perfmon_event_map[event];
129+
return p6_perfmon_event_map[hw_event];
130130
}
131131

132132
/*
@@ -137,7 +137,7 @@ static u64 p6_pmu_event_map(int event)
137137
*/
138138
#define P6_NOP_COUNTER 0x0000002EULL
139139

140-
static u64 p6_pmu_raw_event(u64 event)
140+
static u64 p6_pmu_raw_event(u64 hw_event)
141141
{
142142
#define P6_EVNTSEL_EVENT_MASK 0x000000FFULL
143143
#define P6_EVNTSEL_UNIT_MASK 0x0000FF00ULL
@@ -152,7 +152,7 @@ static u64 p6_pmu_raw_event(u64 event)
152152
P6_EVNTSEL_INV_MASK | \
153153
P6_EVNTSEL_COUNTER_MASK)
154154

155-
return event & P6_EVNTSEL_MASK;
155+
return hw_event & P6_EVNTSEL_MASK;
156156
}
157157

158158

@@ -170,16 +170,16 @@ static const u64 intel_perfmon_event_map[] =
170170
[PERF_COUNT_HW_BUS_CYCLES] = 0x013c,
171171
};
172172

173-
static u64 intel_pmu_event_map(int event)
173+
static u64 intel_pmu_event_map(int hw_event)
174174
{
175-
return intel_perfmon_event_map[event];
175+
return intel_perfmon_event_map[hw_event];
176176
}
177177

178178
/*
179-
* Generalized hw caching related event table, filled
179+
* Generalized hw caching related hw_event table, filled
180180
* in on a per model basis. A value of 0 means
181-
* 'not supported', -1 means 'event makes no sense on
182-
* this CPU', any other value means the raw event
181+
* 'not supported', -1 means 'hw_event makes no sense on
182+
* this CPU', any other value means the raw hw_event
183183
* ID.
184184
*/
185185

@@ -463,7 +463,7 @@ static const u64 atom_hw_cache_event_ids
463463
},
464464
};
465465

466-
static u64 intel_pmu_raw_event(u64 event)
466+
static u64 intel_pmu_raw_event(u64 hw_event)
467467
{
468468
#define CORE_EVNTSEL_EVENT_MASK 0x000000FFULL
469469
#define CORE_EVNTSEL_UNIT_MASK 0x0000FF00ULL
@@ -478,7 +478,7 @@ static u64 intel_pmu_raw_event(u64 event)
478478
CORE_EVNTSEL_INV_MASK | \
479479
CORE_EVNTSEL_COUNTER_MASK)
480480

481-
return event & CORE_EVNTSEL_MASK;
481+
return hw_event & CORE_EVNTSEL_MASK;
482482
}
483483

484484
static const u64 amd_hw_cache_event_ids
@@ -585,12 +585,12 @@ static const u64 amd_perfmon_event_map[] =
585585
[PERF_COUNT_HW_BRANCH_MISSES] = 0x00c5,
586586
};
587587

588-
static u64 amd_pmu_event_map(int event)
588+
static u64 amd_pmu_event_map(int hw_event)
589589
{
590-
return amd_perfmon_event_map[event];
590+
return amd_perfmon_event_map[hw_event];
591591
}
592592

593-
static u64 amd_pmu_raw_event(u64 event)
593+
static u64 amd_pmu_raw_event(u64 hw_event)
594594
{
595595
#define K7_EVNTSEL_EVENT_MASK 0x7000000FFULL
596596
#define K7_EVNTSEL_UNIT_MASK 0x00000FF00ULL
@@ -605,7 +605,7 @@ static u64 amd_pmu_raw_event(u64 event)
605605
K7_EVNTSEL_INV_MASK | \
606606
K7_EVNTSEL_COUNTER_MASK)
607607

608-
return event & K7_EVNTSEL_MASK;
608+
return hw_event & K7_EVNTSEL_MASK;
609609
}
610610

611611
/*
@@ -956,7 +956,7 @@ static int __hw_perf_counter_init(struct perf_counter *counter)
956956
}
957957

958958
/*
959-
* Raw event type provide the config in the event structure
959+
* Raw hw_event type provide the config in the hw_event structure
960960
*/
961961
if (attr->type == PERF_TYPE_RAW) {
962962
hwc->config |= x86_pmu.raw_event(attr->config);
@@ -1245,7 +1245,7 @@ x86_perf_counter_set_period(struct perf_counter *counter,
12451245
ret = 1;
12461246
}
12471247
/*
1248-
* Quirk: certain CPUs dont like it if just 1 event is left:
1248+
* Quirk: certain CPUs dont like it if just 1 hw_event is left:
12491249
*/
12501250
if (unlikely(left < 2))
12511251
left = 2;
@@ -1337,23 +1337,23 @@ static void amd_pmu_enable_counter(struct hw_perf_counter *hwc, int idx)
13371337
static int
13381338
fixed_mode_idx(struct perf_counter *counter, struct hw_perf_counter *hwc)
13391339
{
1340-
unsigned int event;
1340+
unsigned int hw_event;
13411341

1342-
event = hwc->config & ARCH_PERFMON_EVENT_MASK;
1342+
hw_event = hwc->config & ARCH_PERFMON_EVENT_MASK;
13431343

1344-
if (unlikely((event ==
1344+
if (unlikely((hw_event ==
13451345
x86_pmu.event_map(PERF_COUNT_HW_BRANCH_INSTRUCTIONS)) &&
13461346
(hwc->sample_period == 1)))
13471347
return X86_PMC_IDX_FIXED_BTS;
13481348

13491349
if (!x86_pmu.num_counters_fixed)
13501350
return -1;
13511351

1352-
if (unlikely(event == x86_pmu.event_map(PERF_COUNT_HW_INSTRUCTIONS)))
1352+
if (unlikely(hw_event == x86_pmu.event_map(PERF_COUNT_HW_INSTRUCTIONS)))
13531353
return X86_PMC_IDX_FIXED_INSTRUCTIONS;
1354-
if (unlikely(event == x86_pmu.event_map(PERF_COUNT_HW_CPU_CYCLES)))
1354+
if (unlikely(hw_event == x86_pmu.event_map(PERF_COUNT_HW_CPU_CYCLES)))
13551355
return X86_PMC_IDX_FIXED_CPU_CYCLES;
1356-
if (unlikely(event == x86_pmu.event_map(PERF_COUNT_HW_BUS_CYCLES)))
1356+
if (unlikely(hw_event == x86_pmu.event_map(PERF_COUNT_HW_BUS_CYCLES)))
13571357
return X86_PMC_IDX_FIXED_BUS_CYCLES;
13581358

13591359
return -1;
@@ -1970,7 +1970,7 @@ static int intel_pmu_init(void)
19701970

19711971
/*
19721972
* Check whether the Architectural PerfMon supports
1973-
* Branch Misses Retired Event or not.
1973+
* Branch Misses Retired hw_event or not.
19741974
*/
19751975
cpuid(10, &eax.full, &ebx, &unused, &edx.full);
19761976
if (eax.split.mask_length <= ARCH_PERFMON_BRANCH_MISSES_RETIRED)

kernel/perf_counter.c

Lines changed: 13 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -3044,22 +3044,22 @@ perf_counter_read_event(struct perf_counter *counter,
30443044
struct task_struct *task)
30453045
{
30463046
struct perf_output_handle handle;
3047-
struct perf_read_event event = {
3047+
struct perf_read_event read_event = {
30483048
.header = {
30493049
.type = PERF_EVENT_READ,
30503050
.misc = 0,
3051-
.size = sizeof(event) + perf_counter_read_size(counter),
3051+
.size = sizeof(read_event) + perf_counter_read_size(counter),
30523052
},
30533053
.pid = perf_counter_pid(counter, task),
30543054
.tid = perf_counter_tid(counter, task),
30553055
};
30563056
int ret;
30573057

3058-
ret = perf_output_begin(&handle, counter, event.header.size, 0, 0);
3058+
ret = perf_output_begin(&handle, counter, read_event.header.size, 0, 0);
30593059
if (ret)
30603060
return;
30613061

3062-
perf_output_put(&handle, event);
3062+
perf_output_put(&handle, read_event);
30633063
perf_output_read(&handle, counter);
30643064

30653065
perf_output_end(&handle);
@@ -3698,14 +3698,14 @@ static int perf_swcounter_is_counting(struct perf_counter *counter)
36983698

36993699
static int perf_swcounter_match(struct perf_counter *counter,
37003700
enum perf_type_id type,
3701-
u32 event, struct pt_regs *regs)
3701+
u32 event_id, struct pt_regs *regs)
37023702
{
37033703
if (!perf_swcounter_is_counting(counter))
37043704
return 0;
37053705

37063706
if (counter->attr.type != type)
37073707
return 0;
3708-
if (counter->attr.config != event)
3708+
if (counter->attr.config != event_id)
37093709
return 0;
37103710

37113711
if (regs) {
@@ -3721,7 +3721,7 @@ static int perf_swcounter_match(struct perf_counter *counter,
37213721

37223722
static void perf_swcounter_ctx_event(struct perf_counter_context *ctx,
37233723
enum perf_type_id type,
3724-
u32 event, u64 nr, int nmi,
3724+
u32 event_id, u64 nr, int nmi,
37253725
struct perf_sample_data *data,
37263726
struct pt_regs *regs)
37273727
{
@@ -3732,7 +3732,7 @@ static void perf_swcounter_ctx_event(struct perf_counter_context *ctx,
37323732

37333733
rcu_read_lock();
37343734
list_for_each_entry_rcu(counter, &ctx->event_list, event_entry) {
3735-
if (perf_swcounter_match(counter, type, event, regs))
3735+
if (perf_swcounter_match(counter, type, event_id, regs))
37363736
perf_swcounter_add(counter, nr, nmi, data, regs);
37373737
}
37383738
rcu_read_unlock();
@@ -4036,17 +4036,17 @@ atomic_t perf_swcounter_enabled[PERF_COUNT_SW_MAX];
40364036

40374037
static void sw_perf_counter_destroy(struct perf_counter *counter)
40384038
{
4039-
u64 event = counter->attr.config;
4039+
u64 event_id = counter->attr.config;
40404040

40414041
WARN_ON(counter->parent);
40424042

4043-
atomic_dec(&perf_swcounter_enabled[event]);
4043+
atomic_dec(&perf_swcounter_enabled[event_id]);
40444044
}
40454045

40464046
static const struct pmu *sw_perf_counter_init(struct perf_counter *counter)
40474047
{
40484048
const struct pmu *pmu = NULL;
4049-
u64 event = counter->attr.config;
4049+
u64 event_id = counter->attr.config;
40504050

40514051
/*
40524052
* Software counters (currently) can't in general distinguish
@@ -4055,7 +4055,7 @@ static const struct pmu *sw_perf_counter_init(struct perf_counter *counter)
40554055
* to be kernel events, and page faults are never hypervisor
40564056
* events.
40574057
*/
4058-
switch (event) {
4058+
switch (event_id) {
40594059
case PERF_COUNT_SW_CPU_CLOCK:
40604060
pmu = &perf_ops_cpu_clock;
40614061

@@ -4077,7 +4077,7 @@ static const struct pmu *sw_perf_counter_init(struct perf_counter *counter)
40774077
case PERF_COUNT_SW_CONTEXT_SWITCHES:
40784078
case PERF_COUNT_SW_CPU_MIGRATIONS:
40794079
if (!counter->parent) {
4080-
atomic_inc(&perf_swcounter_enabled[event]);
4080+
atomic_inc(&perf_swcounter_enabled[event_id]);
40814081
counter->destroy = sw_perf_counter_destroy;
40824082
}
40834083
pmu = &perf_ops_generic;

0 commit comments

Comments
 (0)