Skip to content

Commit 602cae0

Browse files
Peter ZijlstraIngo Molnar
authored andcommitted
perf/x86/intel: Delay memory deallocation until x86_pmu_dead_cpu()
intel_pmu_cpu_prepare() allocated memory for ->shared_regs among other members of struct cpu_hw_events. This memory is released in intel_pmu_cpu_dying() which is wrong. The counterpart of the intel_pmu_cpu_prepare() callback is x86_pmu_dead_cpu(). Otherwise if the CPU fails on the UP path between CPUHP_PERF_X86_PREPARE and CPUHP_AP_PERF_X86_STARTING then it won't release the memory but allocate new memory on the next attempt to online the CPU (leaking the old memory). Also, if the CPU down path fails between CPUHP_AP_PERF_X86_STARTING and CPUHP_PERF_X86_PREPARE then the CPU will go back online but never allocate the memory that was released in x86_pmu_dying_cpu(). Make the memory allocation/free symmetrical in regard to the CPU hotplug notifier by moving the deallocation to intel_pmu_cpu_dead(). This started in commit: a7e3ed1 ("perf: Add support for supplementary event registers"). In principle the bug was introduced in v2.6.39 (!), but it will almost certainly not backport cleanly across the big CPU hotplug rewrite between v4.7-v4.15... [ bigeasy: Added patch description. ] [ mingo: Added backporting guidance. ] Reported-by: He Zhe <zhe.he@windriver.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> # With developer hat on Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> # With maintainer hat on Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Arnaldo Carvalho de Melo <acme@redhat.com> Cc: Jiri Olsa <jolsa@redhat.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: acme@kernel.org Cc: bp@alien8.de Cc: hpa@zytor.com Cc: jolsa@kernel.org Cc: kan.liang@linux.intel.com Cc: namhyung@kernel.org Cc: <stable@vger.kernel.org> Fixes: a7e3ed1 ("perf: Add support for supplementary event registers"). Link: https://lkml.kernel.org/r/20181219165350.6s3jvyxbibpvlhtq@linutronix.de Signed-off-by: Ingo Molnar <mingo@kernel.org>
1 parent 9e63a78 commit 602cae0

File tree

1 file changed

+11
-5
lines changed

1 file changed

+11
-5
lines changed

arch/x86/events/intel/core.c

Lines changed: 11 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -3558,6 +3558,14 @@ static void free_excl_cntrs(int cpu)
35583558
}
35593559

35603560
static void intel_pmu_cpu_dying(int cpu)
3561+
{
3562+
fini_debug_store_on_cpu(cpu);
3563+
3564+
if (x86_pmu.counter_freezing)
3565+
disable_counter_freeze();
3566+
}
3567+
3568+
static void intel_pmu_cpu_dead(int cpu)
35613569
{
35623570
struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
35633571
struct intel_shared_regs *pc;
@@ -3570,11 +3578,6 @@ static void intel_pmu_cpu_dying(int cpu)
35703578
}
35713579

35723580
free_excl_cntrs(cpu);
3573-
3574-
fini_debug_store_on_cpu(cpu);
3575-
3576-
if (x86_pmu.counter_freezing)
3577-
disable_counter_freeze();
35783581
}
35793582

35803583
static void intel_pmu_sched_task(struct perf_event_context *ctx,
@@ -3663,6 +3666,7 @@ static __initconst const struct x86_pmu core_pmu = {
36633666
.cpu_prepare = intel_pmu_cpu_prepare,
36643667
.cpu_starting = intel_pmu_cpu_starting,
36653668
.cpu_dying = intel_pmu_cpu_dying,
3669+
.cpu_dead = intel_pmu_cpu_dead,
36663670
};
36673671

36683672
static struct attribute *intel_pmu_attrs[];
@@ -3703,6 +3707,8 @@ static __initconst const struct x86_pmu intel_pmu = {
37033707
.cpu_prepare = intel_pmu_cpu_prepare,
37043708
.cpu_starting = intel_pmu_cpu_starting,
37053709
.cpu_dying = intel_pmu_cpu_dying,
3710+
.cpu_dead = intel_pmu_cpu_dead,
3711+
37063712
.guest_get_msrs = intel_guest_get_msrs,
37073713
.sched_task = intel_pmu_sched_task,
37083714
};

0 commit comments

Comments
 (0)