Skip to content

Commit d793133

Browse files
borkmanndavem330
authored andcommitted
bpf, trace: fetch current cpu only once
We currently have two invocations, which is unnecessary. Fetch it only once and use the smp_processor_id() variant, so we also get preemption checks along with it when DEBUG_PREEMPT is set. Signed-off-by: Daniel Borkmann <daniel@iogearbox.net> Acked-by: Alexei Starovoitov <ast@kernel.org> Signed-off-by: David S. Miller <davem@davemloft.net>
1 parent 1ca1cc9 commit d793133

File tree

1 file changed

+3
-2
lines changed

1 file changed

+3
-2
lines changed

kernel/trace/bpf_trace.c

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -233,6 +233,7 @@ static u64 bpf_perf_event_output(u64 r1, u64 r2, u64 flags, u64 r4, u64 size)
233233
struct pt_regs *regs = (struct pt_regs *) (long) r1;
234234
struct bpf_map *map = (struct bpf_map *) (long) r2;
235235
struct bpf_array *array = container_of(map, struct bpf_array, map);
236+
unsigned int cpu = smp_processor_id();
236237
u64 index = flags & BPF_F_INDEX_MASK;
237238
void *data = (void *) (long) r4;
238239
struct perf_sample_data sample_data;
@@ -246,7 +247,7 @@ static u64 bpf_perf_event_output(u64 r1, u64 r2, u64 flags, u64 r4, u64 size)
246247
if (unlikely(flags & ~(BPF_F_INDEX_MASK)))
247248
return -EINVAL;
248249
if (index == BPF_F_CURRENT_CPU)
249-
index = raw_smp_processor_id();
250+
index = cpu;
250251
if (unlikely(index >= array->map.max_entries))
251252
return -E2BIG;
252253

@@ -259,7 +260,7 @@ static u64 bpf_perf_event_output(u64 r1, u64 r2, u64 flags, u64 r4, u64 size)
259260
event->attr.config != PERF_COUNT_SW_BPF_OUTPUT))
260261
return -EINVAL;
261262

262-
if (unlikely(event->oncpu != smp_processor_id()))
263+
if (unlikely(event->oncpu != cpu))
263264
return -EOPNOTSUPP;
264265

265266
perf_sample_data_init(&sample_data, 0, 0);

0 commit comments

Comments
 (0)