Skip to content

Commit 57c0c15

Browse files
author
Ingo Molnar
committed
perf: Tidy up after the big rename
- provide compatibility Kconfig entry for existing PERF_COUNTERS .config's - provide courtesy copy of old perf_counter.h, for user-space projects - small indentation fixups - fix up MAINTAINERS - fix small x86 printout fallout - fix up small PowerPC comment fallout (use 'counter' as in register) Reviewed-by: Arjan van de Ven <arjan@linux.intel.com> Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Mike Galbraith <efault@gmx.de> Cc: Paul Mackerras <paulus@samba.org> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: Frederic Weisbecker <fweisbec@gmail.com> LKML-Reference: <new-submission> Signed-off-by: Ingo Molnar <mingo@elte.hu>
1 parent cdd6c48 commit 57c0c15

File tree

8 files changed

+534
-76
lines changed

8 files changed

+534
-76
lines changed

MAINTAINERS

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -4000,7 +4000,7 @@ S: Maintained
40004000
F: include/linux/delayacct.h
40014001
F: kernel/delayacct.c
40024002

4003-
PERFORMANCE COUNTER SUBSYSTEM
4003+
PERFORMANCE EVENTS SUBSYSTEM
40044004
M: Peter Zijlstra <a.p.zijlstra@chello.nl>
40054005
M: Paul Mackerras <paulus@samba.org>
40064006
M: Ingo Molnar <mingo@elte.hu>

arch/powerpc/include/asm/paca.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -122,7 +122,7 @@ struct paca_struct {
122122
u8 soft_enabled; /* irq soft-enable flag */
123123
u8 hard_enabled; /* set if irqs are enabled in MSR */
124124
u8 io_sync; /* writel() needs spin_unlock sync */
125-
u8 perf_event_pending; /* PM interrupt while soft-disabled */
125+
u8 perf_event_pending; /* PM interrupt while soft-disabled */
126126

127127
/* Stuff for accurate time accounting */
128128
u64 user_time; /* accumulated usermode TB ticks */

arch/powerpc/kernel/perf_event.c

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -41,7 +41,7 @@ DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events);
4141
struct power_pmu *ppmu;
4242

4343
/*
44-
* Normally, to ignore kernel events we set the FCS (freeze events
44+
* Normally, to ignore kernel events we set the FCS (freeze counters
4545
* in supervisor mode) bit in MMCR0, but if the kernel runs with the
4646
* hypervisor bit set in the MSR, or if we are running on a processor
4747
* where the hypervisor bit is forced to 1 (as on Apple G5 processors),
@@ -159,7 +159,7 @@ void perf_event_print_debug(void)
159159
}
160160

161161
/*
162-
* Read one performance monitor event (PMC).
162+
* Read one performance monitor counter (PMC).
163163
*/
164164
static unsigned long read_pmc(int idx)
165165
{
@@ -409,7 +409,7 @@ static void power_pmu_read(struct perf_event *event)
409409
val = read_pmc(event->hw.idx);
410410
} while (atomic64_cmpxchg(&event->hw.prev_count, prev, val) != prev);
411411

412-
/* The events are only 32 bits wide */
412+
/* The counters are only 32 bits wide */
413413
delta = (val - prev) & 0xfffffffful;
414414
atomic64_add(delta, &event->count);
415415
atomic64_sub(delta, &event->hw.period_left);
@@ -543,7 +543,7 @@ void hw_perf_disable(void)
543543
}
544544

545545
/*
546-
* Set the 'freeze events' bit.
546+
* Set the 'freeze counters' bit.
547547
* The barrier is to make sure the mtspr has been
548548
* executed and the PMU has frozen the events
549549
* before we return.
@@ -1124,7 +1124,7 @@ const struct pmu *hw_perf_event_init(struct perf_event *event)
11241124
}
11251125

11261126
/*
1127-
* A event has overflowed; update its count and record
1127+
* A counter has overflowed; update its count and record
11281128
* things if requested. Note that interrupts are hard-disabled
11291129
* here so there is no possibility of being interrupted.
11301130
*/
@@ -1271,7 +1271,7 @@ static void perf_event_interrupt(struct pt_regs *regs)
12711271

12721272
/*
12731273
* Reset MMCR0 to its normal value. This will set PMXE and
1274-
* clear FC (freeze events) and PMAO (perf mon alert occurred)
1274+
* clear FC (freeze counters) and PMAO (perf mon alert occurred)
12751275
* and thus allow interrupts to occur again.
12761276
* XXX might want to use MSR.PM to keep the events frozen until
12771277
* we get back out of this interrupt.

arch/x86/kernel/cpu/perf_event.c

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -2081,13 +2081,13 @@ void __init init_hw_perf_events(void)
20812081
perf_events_lapic_init();
20822082
register_die_notifier(&perf_event_nmi_notifier);
20832083

2084-
pr_info("... version: %d\n", x86_pmu.version);
2085-
pr_info("... bit width: %d\n", x86_pmu.event_bits);
2086-
pr_info("... generic events: %d\n", x86_pmu.num_events);
2087-
pr_info("... value mask: %016Lx\n", x86_pmu.event_mask);
2088-
pr_info("... max period: %016Lx\n", x86_pmu.max_period);
2089-
pr_info("... fixed-purpose events: %d\n", x86_pmu.num_events_fixed);
2090-
pr_info("... event mask: %016Lx\n", perf_event_mask);
2084+
pr_info("... version: %d\n", x86_pmu.version);
2085+
pr_info("... bit width: %d\n", x86_pmu.event_bits);
2086+
pr_info("... generic registers: %d\n", x86_pmu.num_events);
2087+
pr_info("... value mask: %016Lx\n", x86_pmu.event_mask);
2088+
pr_info("... max period: %016Lx\n", x86_pmu.max_period);
2089+
pr_info("... fixed-purpose events: %d\n", x86_pmu.num_events_fixed);
2090+
pr_info("... event mask: %016Lx\n", perf_event_mask);
20912091
}
20922092

20932093
static inline void x86_pmu_read(struct perf_event *event)

0 commit comments

Comments
 (0)