Skip to content

Commit bfe3349

Browse files
Peter ZijlstraIngo Molnar
authored andcommitted
perf/x86: Fix RDPMC vs. mm_struct tracking
Vince reported the following rdpmc() testcase failure: > Failing test case: > > fd=perf_event_open(); > addr=mmap(fd); > exec() // without closing or unmapping the event > fd=perf_event_open(); > addr=mmap(fd); > rdpmc() // GPFs due to rdpmc being disabled The problem is of course that exec() plays tricks with what is current->mm, only destroying the old mappings after having installed the new mm. Fix this confusion by passing along vma->vm_mm instead of relying on current->mm. Reported-by: Vince Weaver <vincent.weaver@maine.edu> Tested-by: Vince Weaver <vincent.weaver@maine.edu> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Reviewed-by: Andy Lutomirski <luto@kernel.org> Cc: Arnaldo Carvalho de Melo <acme@kernel.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Stephane Eranian <eranian@gmail.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: stable@vger.kernel.org Fixes: 1e0fb9e ("perf: Add pmu callbacks to track event mapping and unmapping") Link: http://lkml.kernel.org/r/20170802173930.cstykcqefmqt7jau@hirez.programming.kicks-ass.net [ Minor cleanups. ] Signed-off-by: Ingo Molnar <mingo@kernel.org>
1 parent 8d31f80 commit bfe3349

File tree

3 files changed

+12
-14
lines changed

3 files changed

+12
-14
lines changed

arch/x86/events/core.c

Lines changed: 7 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -2114,7 +2114,7 @@ static void refresh_pce(void *ignored)
21142114
load_mm_cr4(this_cpu_read(cpu_tlbstate.loaded_mm));
21152115
}
21162116

2117-
static void x86_pmu_event_mapped(struct perf_event *event)
2117+
static void x86_pmu_event_mapped(struct perf_event *event, struct mm_struct *mm)
21182118
{
21192119
if (!(event->hw.flags & PERF_X86_EVENT_RDPMC_ALLOWED))
21202120
return;
@@ -2129,22 +2129,20 @@ static void x86_pmu_event_mapped(struct perf_event *event)
21292129
* For now, this can't happen because all callers hold mmap_sem
21302130
* for write. If this changes, we'll need a different solution.
21312131
*/
2132-
lockdep_assert_held_exclusive(&current->mm->mmap_sem);
2132+
lockdep_assert_held_exclusive(&mm->mmap_sem);
21332133

2134-
if (atomic_inc_return(&current->mm->context.perf_rdpmc_allowed) == 1)
2135-
on_each_cpu_mask(mm_cpumask(current->mm), refresh_pce, NULL, 1);
2134+
if (atomic_inc_return(&mm->context.perf_rdpmc_allowed) == 1)
2135+
on_each_cpu_mask(mm_cpumask(mm), refresh_pce, NULL, 1);
21362136
}
21372137

2138-
static void x86_pmu_event_unmapped(struct perf_event *event)
2138+
static void x86_pmu_event_unmapped(struct perf_event *event, struct mm_struct *mm)
21392139
{
2140-
if (!current->mm)
2141-
return;
21422140

21432141
if (!(event->hw.flags & PERF_X86_EVENT_RDPMC_ALLOWED))
21442142
return;
21452143

2146-
if (atomic_dec_and_test(&current->mm->context.perf_rdpmc_allowed))
2147-
on_each_cpu_mask(mm_cpumask(current->mm), refresh_pce, NULL, 1);
2144+
if (atomic_dec_and_test(&mm->context.perf_rdpmc_allowed))
2145+
on_each_cpu_mask(mm_cpumask(mm), refresh_pce, NULL, 1);
21482146
}
21492147

21502148
static int x86_pmu_event_idx(struct perf_event *event)

include/linux/perf_event.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -310,8 +310,8 @@ struct pmu {
310310
* Notification that the event was mapped or unmapped. Called
311311
* in the context of the mapping task.
312312
*/
313-
void (*event_mapped) (struct perf_event *event); /*optional*/
314-
void (*event_unmapped) (struct perf_event *event); /*optional*/
313+
void (*event_mapped) (struct perf_event *event, struct mm_struct *mm); /* optional */
314+
void (*event_unmapped) (struct perf_event *event, struct mm_struct *mm); /* optional */
315315

316316
/*
317317
* Flags for ->add()/->del()/ ->start()/->stop(). There are

kernel/events/core.c

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -5090,7 +5090,7 @@ static void perf_mmap_open(struct vm_area_struct *vma)
50905090
atomic_inc(&event->rb->aux_mmap_count);
50915091

50925092
if (event->pmu->event_mapped)
5093-
event->pmu->event_mapped(event);
5093+
event->pmu->event_mapped(event, vma->vm_mm);
50945094
}
50955095

50965096
static void perf_pmu_output_stop(struct perf_event *event);
@@ -5113,7 +5113,7 @@ static void perf_mmap_close(struct vm_area_struct *vma)
51135113
unsigned long size = perf_data_size(rb);
51145114

51155115
if (event->pmu->event_unmapped)
5116-
event->pmu->event_unmapped(event);
5116+
event->pmu->event_unmapped(event, vma->vm_mm);
51175117

51185118
/*
51195119
* rb->aux_mmap_count will always drop before rb->mmap_count and
@@ -5411,7 +5411,7 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma)
54115411
vma->vm_ops = &perf_mmap_vmops;
54125412

54135413
if (event->pmu->event_mapped)
5414-
event->pmu->event_mapped(event);
5414+
event->pmu->event_mapped(event, vma->vm_mm);
54155415

54165416
return ret;
54175417
}

0 commit comments

Comments
 (0)