Skip to content

Commit 290af86

Browse files
Alexei Starovoitovborkmann
authored andcommitted
bpf: introduce BPF_JIT_ALWAYS_ON config
The BPF interpreter has been used as part of the spectre 2 attack CVE-2017-5715. A quote from goolge project zero blog: "At this point, it would normally be necessary to locate gadgets in the host kernel code that can be used to actually leak data by reading from an attacker-controlled location, shifting and masking the result appropriately and then using the result of that as offset to an attacker-controlled address for a load. But piecing gadgets together and figuring out which ones work in a speculation context seems annoying. So instead, we decided to use the eBPF interpreter, which is built into the host kernel - while there is no legitimate way to invoke it from inside a VM, the presence of the code in the host kernel's text section is sufficient to make it usable for the attack, just like with ordinary ROP gadgets." To make attacker job harder introduce BPF_JIT_ALWAYS_ON config option that removes interpreter from the kernel in favor of JIT-only mode. So far eBPF JIT is supported by: x64, arm64, arm32, sparc64, s390, powerpc64, mips64 The start of JITed program is randomized and code page is marked as read-only. In addition "constant blinding" can be turned on with net.core.bpf_jit_harden v2->v3: - move __bpf_prog_ret0 under ifdef (Daniel) v1->v2: - fix init order, test_bpf and cBPF (Daniel's feedback) - fix offloaded bpf (Jakub's feedback) - add 'return 0' dummy in case something can invoke prog->bpf_func - retarget bpf tree. For bpf-next the patch would need one extra hunk. It will be sent when the trees are merged back to net-next Considered doing: int bpf_jit_enable __read_mostly = BPF_EBPF_JIT_DEFAULT; but it seems better to land the patch as-is and in bpf-next remove bpf_jit_enable global variable from all JITs, consolidate in one place and remove this jit_init() function. Signed-off-by: Alexei Starovoitov <ast@kernel.org> Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
1 parent be95a84 commit 290af86

File tree

6 files changed

+50
-8
lines changed

6 files changed

+50
-8
lines changed

init/Kconfig

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1392,6 +1392,13 @@ config BPF_SYSCALL
13921392
Enable the bpf() system call that allows to manipulate eBPF
13931393
programs and maps via file descriptors.
13941394

1395+
config BPF_JIT_ALWAYS_ON
1396+
bool "Permanently enable BPF JIT and remove BPF interpreter"
1397+
depends on BPF_SYSCALL && HAVE_EBPF_JIT && BPF_JIT
1398+
help
1399+
Enables BPF JIT and removes BPF interpreter to avoid
1400+
speculative execution of BPF instructions by the interpreter
1401+
13951402
config USERFAULTFD
13961403
bool "Enable userfaultfd() system call"
13971404
select ANON_INODES

kernel/bpf/core.c

Lines changed: 19 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -767,6 +767,7 @@ noinline u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
767767
}
768768
EXPORT_SYMBOL_GPL(__bpf_call_base);
769769

770+
#ifndef CONFIG_BPF_JIT_ALWAYS_ON
770771
/**
771772
* __bpf_prog_run - run eBPF program on a given context
772773
* @ctx: is the data we are operating on
@@ -1317,6 +1318,14 @@ EVAL6(PROG_NAME_LIST, 224, 256, 288, 320, 352, 384)
13171318
EVAL4(PROG_NAME_LIST, 416, 448, 480, 512)
13181319
};
13191320

1321+
#else
1322+
static unsigned int __bpf_prog_ret0(const void *ctx,
1323+
const struct bpf_insn *insn)
1324+
{
1325+
return 0;
1326+
}
1327+
#endif
1328+
13201329
bool bpf_prog_array_compatible(struct bpf_array *array,
13211330
const struct bpf_prog *fp)
13221331
{
@@ -1364,9 +1373,13 @@ static int bpf_check_tail_call(const struct bpf_prog *fp)
13641373
*/
13651374
struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err)
13661375
{
1376+
#ifndef CONFIG_BPF_JIT_ALWAYS_ON
13671377
u32 stack_depth = max_t(u32, fp->aux->stack_depth, 1);
13681378

13691379
fp->bpf_func = interpreters[(round_up(stack_depth, 32) / 32) - 1];
1380+
#else
1381+
fp->bpf_func = __bpf_prog_ret0;
1382+
#endif
13701383

13711384
/* eBPF JITs can rewrite the program in case constant
13721385
* blinding is active. However, in case of error during
@@ -1376,6 +1389,12 @@ struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err)
13761389
*/
13771390
if (!bpf_prog_is_dev_bound(fp->aux)) {
13781391
fp = bpf_int_jit_compile(fp);
1392+
#ifdef CONFIG_BPF_JIT_ALWAYS_ON
1393+
if (!fp->jited) {
1394+
*err = -ENOTSUPP;
1395+
return fp;
1396+
}
1397+
#endif
13791398
} else {
13801399
*err = bpf_prog_offload_compile(fp);
13811400
if (*err)

lib/test_bpf.c

Lines changed: 7 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -6250,9 +6250,8 @@ static struct bpf_prog *generate_filter(int which, int *err)
62506250
return NULL;
62516251
}
62526252
}
6253-
/* We don't expect to fail. */
62546253
if (*err) {
6255-
pr_cont("FAIL to attach err=%d len=%d\n",
6254+
pr_cont("FAIL to prog_create err=%d len=%d\n",
62566255
*err, fprog.len);
62576256
return NULL;
62586257
}
@@ -6276,6 +6275,10 @@ static struct bpf_prog *generate_filter(int which, int *err)
62766275
* checks.
62776276
*/
62786277
fp = bpf_prog_select_runtime(fp, err);
6278+
if (*err) {
6279+
pr_cont("FAIL to select_runtime err=%d\n", *err);
6280+
return NULL;
6281+
}
62796282
break;
62806283
}
62816284

@@ -6461,8 +6464,8 @@ static __init int test_bpf(void)
64616464
pass_cnt++;
64626465
continue;
64636466
}
6464-
6465-
return err;
6467+
err_cnt++;
6468+
continue;
64666469
}
64676470

64686471
pr_cont("jited:%u ", fp->jited);

net/core/filter.c

Lines changed: 2 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1054,11 +1054,9 @@ static struct bpf_prog *bpf_migrate_filter(struct bpf_prog *fp)
10541054
*/
10551055
goto out_err_free;
10561056

1057-
/* We are guaranteed to never error here with cBPF to eBPF
1058-
* transitions, since there's no issue with type compatibility
1059-
* checks on program arrays.
1060-
*/
10611057
fp = bpf_prog_select_runtime(fp, &err);
1058+
if (err)
1059+
goto out_err_free;
10621060

10631061
kfree(old_prog);
10641062
return fp;

net/core/sysctl_net_core.c

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -325,7 +325,13 @@ static struct ctl_table net_core_table[] = {
325325
.data = &bpf_jit_enable,
326326
.maxlen = sizeof(int),
327327
.mode = 0644,
328+
#ifndef CONFIG_BPF_JIT_ALWAYS_ON
328329
.proc_handler = proc_dointvec
330+
#else
331+
.proc_handler = proc_dointvec_minmax,
332+
.extra1 = &one,
333+
.extra2 = &one,
334+
#endif
329335
},
330336
# ifdef CONFIG_HAVE_EBPF_JIT
331337
{

net/socket.c

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2619,6 +2619,15 @@ static int __init sock_init(void)
26192619

26202620
core_initcall(sock_init); /* early initcall */
26212621

2622+
static int __init jit_init(void)
2623+
{
2624+
#ifdef CONFIG_BPF_JIT_ALWAYS_ON
2625+
bpf_jit_enable = 1;
2626+
#endif
2627+
return 0;
2628+
}
2629+
pure_initcall(jit_init);
2630+
26222631
#ifdef CONFIG_PROC_FS
26232632
void socket_seq_show(struct seq_file *seq)
26242633
{

0 commit comments

Comments
 (0)