Skip to content

Commit 283ca52

Browse files
borkmannAlexei Starovoitov
authored andcommitted
bpf: fix corruption on concurrent perf_event_output calls
When tracing and networking programs are both attached in the system and both use event-output helpers that eventually call into perf_event_output(), then we could end up in a situation where the tracing attached program runs in user context while a cls_bpf program is triggered on that same CPU out of softirq context. Since both rely on the same per-cpu perf_sample_data, we could potentially corrupt it. This can only ever happen in a combination of the two types; all tracing programs use a bpf_prog_active counter to bail out in case a program is already running on that CPU out of a different context. XDP and cls_bpf programs by themselves don't have this issue as they run in the same context only. Therefore, split both perf_sample_data so they cannot be accessed from each other. Fixes: 20b9d7a ("bpf: avoid excessive stack usage for perf_sample_data") Reported-by: Alexei Starovoitov <ast@fb.com> Signed-off-by: Daniel Borkmann <daniel@iogearbox.net> Tested-by: Song Liu <songliubraving@fb.com> Acked-by: Alexei Starovoitov <ast@kernel.org> Signed-off-by: Alexei Starovoitov <ast@kernel.org>
1 parent 30791ac commit 283ca52

File tree

1 file changed

+12
-7
lines changed

1 file changed

+12
-7
lines changed

kernel/trace/bpf_trace.c

Lines changed: 12 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -343,14 +343,13 @@ static const struct bpf_func_proto bpf_perf_event_read_value_proto = {
343343
.arg4_type = ARG_CONST_SIZE,
344344
};
345345

346-
static DEFINE_PER_CPU(struct perf_sample_data, bpf_sd);
346+
static DEFINE_PER_CPU(struct perf_sample_data, bpf_trace_sd);
347347

348348
static __always_inline u64
349349
__bpf_perf_event_output(struct pt_regs *regs, struct bpf_map *map,
350-
u64 flags, struct perf_raw_record *raw)
350+
u64 flags, struct perf_sample_data *sd)
351351
{
352352
struct bpf_array *array = container_of(map, struct bpf_array, map);
353-
struct perf_sample_data *sd = this_cpu_ptr(&bpf_sd);
354353
unsigned int cpu = smp_processor_id();
355354
u64 index = flags & BPF_F_INDEX_MASK;
356355
struct bpf_event_entry *ee;
@@ -373,15 +372,14 @@ __bpf_perf_event_output(struct pt_regs *regs, struct bpf_map *map,
373372
if (unlikely(event->oncpu != cpu))
374373
return -EOPNOTSUPP;
375374

376-
perf_sample_data_init(sd, 0, 0);
377-
sd->raw = raw;
378375
perf_event_output(event, sd, regs);
379376
return 0;
380377
}
381378

382379
BPF_CALL_5(bpf_perf_event_output, struct pt_regs *, regs, struct bpf_map *, map,
383380
u64, flags, void *, data, u64, size)
384381
{
382+
struct perf_sample_data *sd = this_cpu_ptr(&bpf_trace_sd);
385383
struct perf_raw_record raw = {
386384
.frag = {
387385
.size = size,
@@ -392,7 +390,10 @@ BPF_CALL_5(bpf_perf_event_output, struct pt_regs *, regs, struct bpf_map *, map,
392390
if (unlikely(flags & ~(BPF_F_INDEX_MASK)))
393391
return -EINVAL;
394392

395-
return __bpf_perf_event_output(regs, map, flags, &raw);
393+
perf_sample_data_init(sd, 0, 0);
394+
sd->raw = &raw;
395+
396+
return __bpf_perf_event_output(regs, map, flags, sd);
396397
}
397398

398399
static const struct bpf_func_proto bpf_perf_event_output_proto = {
@@ -407,10 +408,12 @@ static const struct bpf_func_proto bpf_perf_event_output_proto = {
407408
};
408409

409410
static DEFINE_PER_CPU(struct pt_regs, bpf_pt_regs);
411+
static DEFINE_PER_CPU(struct perf_sample_data, bpf_misc_sd);
410412

411413
u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size,
412414
void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy)
413415
{
416+
struct perf_sample_data *sd = this_cpu_ptr(&bpf_misc_sd);
414417
struct pt_regs *regs = this_cpu_ptr(&bpf_pt_regs);
415418
struct perf_raw_frag frag = {
416419
.copy = ctx_copy,
@@ -428,8 +431,10 @@ u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size,
428431
};
429432

430433
perf_fetch_caller_regs(regs);
434+
perf_sample_data_init(sd, 0, 0);
435+
sd->raw = &raw;
431436

432-
return __bpf_perf_event_output(regs, map, flags, &raw);
437+
return __bpf_perf_event_output(regs, map, flags, sd);
433438
}
434439

435440
BPF_CALL_0(bpf_get_current_task)

0 commit comments

Comments
 (0)