Skip to content

Commit d01b1f9

Browse files
Peter Zijlstra (Intel)KAGA-KOKO
authored andcommitted
perf/x86/intel: Make cpuc allocations consistent
The cpuc data structure allocation is different between fake and real cpuc's; use the same code to init/free both. Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
1 parent 1c163f4 commit d01b1f9

File tree

3 files changed

+31
-22
lines changed

3 files changed

+31
-22
lines changed

arch/x86/events/core.c

Lines changed: 5 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -1995,7 +1995,7 @@ static int x86_pmu_commit_txn(struct pmu *pmu)
19951995
*/
19961996
static void free_fake_cpuc(struct cpu_hw_events *cpuc)
19971997
{
1998-
kfree(cpuc->shared_regs);
1998+
intel_cpuc_finish(cpuc);
19991999
kfree(cpuc);
20002000
}
20012001

@@ -2007,14 +2007,11 @@ static struct cpu_hw_events *allocate_fake_cpuc(void)
20072007
cpuc = kzalloc(sizeof(*cpuc), GFP_KERNEL);
20082008
if (!cpuc)
20092009
return ERR_PTR(-ENOMEM);
2010-
2011-
/* only needed, if we have extra_regs */
2012-
if (x86_pmu.extra_regs) {
2013-
cpuc->shared_regs = allocate_shared_regs(cpu);
2014-
if (!cpuc->shared_regs)
2015-
goto error;
2016-
}
20172010
cpuc->is_fake = 1;
2011+
2012+
if (intel_cpuc_prepare(cpuc, cpu))
2013+
goto error;
2014+
20182015
return cpuc;
20192016
error:
20202017
free_fake_cpuc(cpuc);

arch/x86/events/intel/core.c

Lines changed: 18 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -3398,7 +3398,7 @@ ssize_t intel_event_sysfs_show(char *page, u64 config)
33983398
return x86_event_sysfs_show(page, config, event);
33993399
}
34003400

3401-
struct intel_shared_regs *allocate_shared_regs(int cpu)
3401+
static struct intel_shared_regs *allocate_shared_regs(int cpu)
34023402
{
34033403
struct intel_shared_regs *regs;
34043404
int i;
@@ -3430,10 +3430,9 @@ static struct intel_excl_cntrs *allocate_excl_cntrs(int cpu)
34303430
return c;
34313431
}
34323432

3433-
static int intel_pmu_cpu_prepare(int cpu)
3434-
{
3435-
struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
34363433

3434+
int intel_cpuc_prepare(struct cpu_hw_events *cpuc, int cpu)
3435+
{
34373436
if (x86_pmu.extra_regs || x86_pmu.lbr_sel_map) {
34383437
cpuc->shared_regs = allocate_shared_regs(cpu);
34393438
if (!cpuc->shared_regs)
@@ -3443,7 +3442,7 @@ static int intel_pmu_cpu_prepare(int cpu)
34433442
if (x86_pmu.flags & PMU_FL_EXCL_CNTRS) {
34443443
size_t sz = X86_PMC_IDX_MAX * sizeof(struct event_constraint);
34453444

3446-
cpuc->constraint_list = kzalloc(sz, GFP_KERNEL);
3445+
cpuc->constraint_list = kzalloc_node(sz, GFP_KERNEL, cpu_to_node(cpu));
34473446
if (!cpuc->constraint_list)
34483447
goto err_shared_regs;
34493448

@@ -3468,6 +3467,11 @@ static int intel_pmu_cpu_prepare(int cpu)
34683467
return -ENOMEM;
34693468
}
34703469

3470+
static int intel_pmu_cpu_prepare(int cpu)
3471+
{
3472+
return intel_cpuc_prepare(&per_cpu(cpu_hw_events, cpu), cpu);
3473+
}
3474+
34713475
static void flip_smm_bit(void *data)
34723476
{
34733477
unsigned long set = *(unsigned long *)data;
@@ -3542,9 +3546,8 @@ static void intel_pmu_cpu_starting(int cpu)
35423546
}
35433547
}
35443548

3545-
static void free_excl_cntrs(int cpu)
3549+
static void free_excl_cntrs(struct cpu_hw_events *cpuc)
35463550
{
3547-
struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
35483551
struct intel_excl_cntrs *c;
35493552

35503553
c = cpuc->excl_cntrs;
@@ -3565,9 +3568,8 @@ static void intel_pmu_cpu_dying(int cpu)
35653568
disable_counter_freeze();
35663569
}
35673570

3568-
static void intel_pmu_cpu_dead(int cpu)
3571+
void intel_cpuc_finish(struct cpu_hw_events *cpuc)
35693572
{
3570-
struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
35713573
struct intel_shared_regs *pc;
35723574

35733575
pc = cpuc->shared_regs;
@@ -3577,7 +3579,12 @@ static void intel_pmu_cpu_dead(int cpu)
35773579
cpuc->shared_regs = NULL;
35783580
}
35793581

3580-
free_excl_cntrs(cpu);
3582+
free_excl_cntrs(cpuc);
3583+
}
3584+
3585+
static void intel_pmu_cpu_dead(int cpu)
3586+
{
3587+
intel_cpuc_finish(&per_cpu(cpu_hw_events, cpu));
35813588
}
35823589

35833590
static void intel_pmu_sched_task(struct perf_event_context *ctx,
@@ -4715,7 +4722,7 @@ static __init int fixup_ht_bug(void)
47154722
hardlockup_detector_perf_restart();
47164723

47174724
for_each_online_cpu(c)
4718-
free_excl_cntrs(c);
4725+
free_excl_cntrs(&per_cpu(cpu_hw_events, c));
47194726

47204727
cpus_read_unlock();
47214728
pr_info("PMU erratum BJ122, BV98, HSD29 workaround disabled, HT off\n");

arch/x86/events/perf_event.h

Lines changed: 8 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -889,7 +889,8 @@ struct event_constraint *
889889
x86_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
890890
struct perf_event *event);
891891

892-
struct intel_shared_regs *allocate_shared_regs(int cpu);
892+
extern int intel_cpuc_prepare(struct cpu_hw_events *cpuc, int cpu);
893+
extern void intel_cpuc_finish(struct cpu_hw_events *cpuc);
893894

894895
int intel_pmu_init(void);
895896

@@ -1025,9 +1026,13 @@ static inline int intel_pmu_init(void)
10251026
return 0;
10261027
}
10271028

1028-
static inline struct intel_shared_regs *allocate_shared_regs(int cpu)
1029+
static inline int intel_cpuc_prepare(struct cpu_hw_event *cpuc, int cpu)
1030+
{
1031+
return 0;
1032+
}
1033+
1034+
static inline void intel_cpuc_finish(struct cpu_hw_event *cpuc)
10291035
{
1030-
return NULL;
10311036
}
10321037

10331038
static inline int is_ht_workaround_enabled(void)

0 commit comments

Comments
 (0)