Skip to content

Commit df1a2cb

Browse files
fomichevborkmann
authored andcommitted
bpf/test_run: fix unkillable BPF_PROG_TEST_RUN
Syzbot found out that running BPF_PROG_TEST_RUN with repeat=0xffffffff makes process unkillable. The problem is that when CONFIG_PREEMPT is enabled, we never see need_resched() return true. This is due to the fact that preempt_enable() (which we do in bpf_test_run_one on each iteration) now handles resched if it's needed. Let's disable preemption for the whole run, not per test. In this case we can properly see whether resched is needed. Let's also properly return -EINTR to the userspace in case of a signal interrupt. See recent discussion: http://lore.kernel.org/netdev/CAH3MdRWHr4N8jei8jxDppXjmw-Nw=puNDLbu1dQOFQHxfU2onA@mail.gmail.com I'll follow up with the same fix bpf_prog_test_run_flow_dissector in bpf-next. Reported-by: syzbot <syzkaller@googlegroups.com> Signed-off-by: Stanislav Fomichev <sdf@google.com> Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
1 parent 21d2cb4 commit df1a2cb

File tree

1 file changed

+24
-21
lines changed

1 file changed

+24
-21
lines changed

net/bpf/test_run.c

Lines changed: 24 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -13,27 +13,13 @@
1313
#include <net/sock.h>
1414
#include <net/tcp.h>
1515

16-
static __always_inline u32 bpf_test_run_one(struct bpf_prog *prog, void *ctx,
17-
struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE])
18-
{
19-
u32 ret;
20-
21-
preempt_disable();
22-
rcu_read_lock();
23-
bpf_cgroup_storage_set(storage);
24-
ret = BPF_PROG_RUN(prog, ctx);
25-
rcu_read_unlock();
26-
preempt_enable();
27-
28-
return ret;
29-
}
30-
31-
static int bpf_test_run(struct bpf_prog *prog, void *ctx, u32 repeat, u32 *ret,
32-
u32 *time)
16+
static int bpf_test_run(struct bpf_prog *prog, void *ctx, u32 repeat,
17+
u32 *retval, u32 *time)
3318
{
3419
struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE] = { 0 };
3520
enum bpf_cgroup_storage_type stype;
3621
u64 time_start, time_spent = 0;
22+
int ret = 0;
3723
u32 i;
3824

3925
for_each_cgroup_storage_type(stype) {
@@ -48,25 +34,42 @@ static int bpf_test_run(struct bpf_prog *prog, void *ctx, u32 repeat, u32 *ret,
4834

4935
if (!repeat)
5036
repeat = 1;
37+
38+
rcu_read_lock();
39+
preempt_disable();
5140
time_start = ktime_get_ns();
5241
for (i = 0; i < repeat; i++) {
53-
*ret = bpf_test_run_one(prog, ctx, storage);
42+
bpf_cgroup_storage_set(storage);
43+
*retval = BPF_PROG_RUN(prog, ctx);
44+
45+
if (signal_pending(current)) {
46+
ret = -EINTR;
47+
break;
48+
}
49+
5450
if (need_resched()) {
55-
if (signal_pending(current))
56-
break;
5751
time_spent += ktime_get_ns() - time_start;
52+
preempt_enable();
53+
rcu_read_unlock();
54+
5855
cond_resched();
56+
57+
rcu_read_lock();
58+
preempt_disable();
5959
time_start = ktime_get_ns();
6060
}
6161
}
6262
time_spent += ktime_get_ns() - time_start;
63+
preempt_enable();
64+
rcu_read_unlock();
65+
6366
do_div(time_spent, repeat);
6467
*time = time_spent > U32_MAX ? U32_MAX : (u32)time_spent;
6568

6669
for_each_cgroup_storage_type(stype)
6770
bpf_cgroup_storage_free(storage[stype]);
6871

69-
return 0;
72+
return ret;
7073
}
7174

7275
static int bpf_test_finish(const union bpf_attr *kattr,

0 commit comments

Comments
 (0)