Skip to content

Commit 81ec3f3

Browse files
Jiri OlsaIngo Molnar
authored andcommitted
perf/x86: Add check_period PMU callback
Vince (and later on Ravi) reported crashes in the BTS code during fuzzing with the following backtrace: general protection fault: 0000 [#1] SMP PTI ... RIP: 0010:perf_prepare_sample+0x8f/0x510 ... Call Trace: <IRQ> ? intel_pmu_drain_bts_buffer+0x194/0x230 intel_pmu_drain_bts_buffer+0x160/0x230 ? tick_nohz_irq_exit+0x31/0x40 ? smp_call_function_single_interrupt+0x48/0xe0 ? call_function_single_interrupt+0xf/0x20 ? call_function_single_interrupt+0xa/0x20 ? x86_schedule_events+0x1a0/0x2f0 ? x86_pmu_commit_txn+0xb4/0x100 ? find_busiest_group+0x47/0x5d0 ? perf_event_set_state.part.42+0x12/0x50 ? perf_mux_hrtimer_restart+0x40/0xb0 intel_pmu_disable_event+0xae/0x100 ? intel_pmu_disable_event+0xae/0x100 x86_pmu_stop+0x7a/0xb0 x86_pmu_del+0x57/0x120 event_sched_out.isra.101+0x83/0x180 group_sched_out.part.103+0x57/0xe0 ctx_sched_out+0x188/0x240 ctx_resched+0xa8/0xd0 __perf_event_enable+0x193/0x1e0 event_function+0x8e/0xc0 remote_function+0x41/0x50 flush_smp_call_function_queue+0x68/0x100 generic_smp_call_function_single_interrupt+0x13/0x30 smp_call_function_single_interrupt+0x3e/0xe0 call_function_single_interrupt+0xf/0x20 </IRQ> The reason is that while event init code does several checks for BTS events and prevents several unwanted config bits for BTS event (like precise_ip), the PERF_EVENT_IOC_PERIOD allows to create BTS event without those checks being done. Following sequence will cause the crash: If we create an 'almost' BTS event with precise_ip and callchains, and it into a BTS event it will crash the perf_prepare_sample() function because precise_ip events are expected to come in with callchain data initialized, but that's not the case for intel_pmu_drain_bts_buffer() caller. Adding a check_period callback to be called before the period is changed via PERF_EVENT_IOC_PERIOD. It will deny the change if the event would become BTS. Plus adding also the limit_period check as well. Reported-by: Vince Weaver <vincent.weaver@maine.edu> Signed-off-by: Jiri Olsa <jolsa@kernel.org> Acked-by: Peter Zijlstra <peterz@infradead.org> Cc: <stable@vger.kernel.org> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Arnaldo Carvalho de Melo <acme@kernel.org> Cc: Arnaldo Carvalho de Melo <acme@redhat.com> Cc: Jiri Olsa <jolsa@redhat.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com> Cc: Ravi Bangoria <ravi.bangoria@linux.ibm.com> Cc: Stephane Eranian <eranian@google.com> Cc: Thomas Gleixner <tglx@linutronix.de> Link: http://lkml.kernel.org/r/20190204123532.GA4794@krava Signed-off-by: Ingo Molnar <mingo@kernel.org>
1 parent d139371 commit 81ec3f3

File tree

5 files changed

+58
-2
lines changed

5 files changed

+58
-2
lines changed

arch/x86/events/core.c

Lines changed: 14 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2278,6 +2278,19 @@ void perf_check_microcode(void)
22782278
x86_pmu.check_microcode();
22792279
}
22802280

2281+
static int x86_pmu_check_period(struct perf_event *event, u64 value)
2282+
{
2283+
if (x86_pmu.check_period && x86_pmu.check_period(event, value))
2284+
return -EINVAL;
2285+
2286+
if (value && x86_pmu.limit_period) {
2287+
if (x86_pmu.limit_period(event, value) > value)
2288+
return -EINVAL;
2289+
}
2290+
2291+
return 0;
2292+
}
2293+
22812294
static struct pmu pmu = {
22822295
.pmu_enable = x86_pmu_enable,
22832296
.pmu_disable = x86_pmu_disable,
@@ -2302,6 +2315,7 @@ static struct pmu pmu = {
23022315
.event_idx = x86_pmu_event_idx,
23032316
.sched_task = x86_pmu_sched_task,
23042317
.task_ctx_size = sizeof(struct x86_perf_task_context),
2318+
.check_period = x86_pmu_check_period,
23052319
};
23062320

23072321
void arch_perf_update_userpage(struct perf_event *event,

arch/x86/events/intel/core.c

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -3587,6 +3587,11 @@ static void intel_pmu_sched_task(struct perf_event_context *ctx,
35873587
intel_pmu_lbr_sched_task(ctx, sched_in);
35883588
}
35893589

3590+
static int intel_pmu_check_period(struct perf_event *event, u64 value)
3591+
{
3592+
return intel_pmu_has_bts_period(event, value) ? -EINVAL : 0;
3593+
}
3594+
35903595
PMU_FORMAT_ATTR(offcore_rsp, "config1:0-63");
35913596

35923597
PMU_FORMAT_ATTR(ldlat, "config1:0-15");
@@ -3667,6 +3672,8 @@ static __initconst const struct x86_pmu core_pmu = {
36673672
.cpu_starting = intel_pmu_cpu_starting,
36683673
.cpu_dying = intel_pmu_cpu_dying,
36693674
.cpu_dead = intel_pmu_cpu_dead,
3675+
3676+
.check_period = intel_pmu_check_period,
36703677
};
36713678

36723679
static struct attribute *intel_pmu_attrs[];
@@ -3711,6 +3718,8 @@ static __initconst const struct x86_pmu intel_pmu = {
37113718

37123719
.guest_get_msrs = intel_guest_get_msrs,
37133720
.sched_task = intel_pmu_sched_task,
3721+
3722+
.check_period = intel_pmu_check_period,
37143723
};
37153724

37163725
static __init void intel_clovertown_quirk(void)

arch/x86/events/perf_event.h

Lines changed: 14 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -646,6 +646,11 @@ struct x86_pmu {
646646
* Intel host/guest support (KVM)
647647
*/
648648
struct perf_guest_switch_msr *(*guest_get_msrs)(int *nr);
649+
650+
/*
651+
* Check period value for PERF_EVENT_IOC_PERIOD ioctl.
652+
*/
653+
int (*check_period) (struct perf_event *event, u64 period);
649654
};
650655

651656
struct x86_perf_task_context {
@@ -857,7 +862,7 @@ static inline int amd_pmu_init(void)
857862

858863
#ifdef CONFIG_CPU_SUP_INTEL
859864

860-
static inline bool intel_pmu_has_bts(struct perf_event *event)
865+
static inline bool intel_pmu_has_bts_period(struct perf_event *event, u64 period)
861866
{
862867
struct hw_perf_event *hwc = &event->hw;
863868
unsigned int hw_event, bts_event;
@@ -868,7 +873,14 @@ static inline bool intel_pmu_has_bts(struct perf_event *event)
868873
hw_event = hwc->config & INTEL_ARCH_EVENT_MASK;
869874
bts_event = x86_pmu.event_map(PERF_COUNT_HW_BRANCH_INSTRUCTIONS);
870875

871-
return hw_event == bts_event && hwc->sample_period == 1;
876+
return hw_event == bts_event && period == 1;
877+
}
878+
879+
static inline bool intel_pmu_has_bts(struct perf_event *event)
880+
{
881+
struct hw_perf_event *hwc = &event->hw;
882+
883+
return intel_pmu_has_bts_period(event, hwc->sample_period);
872884
}
873885

874886
int intel_pmu_save_and_restart(struct perf_event *event);

include/linux/perf_event.h

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -447,6 +447,11 @@ struct pmu {
447447
* Filter events for PMU-specific reasons.
448448
*/
449449
int (*filter_match) (struct perf_event *event); /* optional */
450+
451+
/*
452+
* Check period value for PERF_EVENT_IOC_PERIOD ioctl.
453+
*/
454+
int (*check_period) (struct perf_event *event, u64 value); /* optional */
450455
};
451456

452457
enum perf_addr_filter_action_t {

kernel/events/core.c

Lines changed: 16 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -4963,6 +4963,11 @@ static void __perf_event_period(struct perf_event *event,
49634963
}
49644964
}
49654965

4966+
static int perf_event_check_period(struct perf_event *event, u64 value)
4967+
{
4968+
return event->pmu->check_period(event, value);
4969+
}
4970+
49664971
static int perf_event_period(struct perf_event *event, u64 __user *arg)
49674972
{
49684973
u64 value;
@@ -4979,6 +4984,9 @@ static int perf_event_period(struct perf_event *event, u64 __user *arg)
49794984
if (event->attr.freq && value > sysctl_perf_event_sample_rate)
49804985
return -EINVAL;
49814986

4987+
if (perf_event_check_period(event, value))
4988+
return -EINVAL;
4989+
49824990
event_function_call(event, __perf_event_period, &value);
49834991

49844992
return 0;
@@ -9391,6 +9399,11 @@ static int perf_pmu_nop_int(struct pmu *pmu)
93919399
return 0;
93929400
}
93939401

9402+
static int perf_event_nop_int(struct perf_event *event, u64 value)
9403+
{
9404+
return 0;
9405+
}
9406+
93949407
static DEFINE_PER_CPU(unsigned int, nop_txn_flags);
93959408

93969409
static void perf_pmu_start_txn(struct pmu *pmu, unsigned int flags)
@@ -9691,6 +9704,9 @@ int perf_pmu_register(struct pmu *pmu, const char *name, int type)
96919704
pmu->pmu_disable = perf_pmu_nop_void;
96929705
}
96939706

9707+
if (!pmu->check_period)
9708+
pmu->check_period = perf_event_nop_int;
9709+
96949710
if (!pmu->event_idx)
96959711
pmu->event_idx = perf_event_idx_default;
96969712

0 commit comments

Comments
 (0)