Skip to content

Commit 7c8cbac

Browse files
committed
Merge branch 'arm-bpf-fixes'
Nicolas Schichan says: ==================== BPF JIT fixes for ARM These patches are fixing bugs in the ARM JIT and should probably find their way to a stable kernel. All 60 test_bpf tests in Linux 4.1 release are now passing OK (was 54 out of 60 before). ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
2 parents 89e478a + c18fe54 commit 7c8cbac

File tree

1 file changed

+44
-13
lines changed

1 file changed

+44
-13
lines changed

arch/arm/net/bpf_jit_32.c

Lines changed: 44 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -74,32 +74,52 @@ struct jit_ctx {
7474

7575
int bpf_jit_enable __read_mostly;
7676

77-
static u64 jit_get_skb_b(struct sk_buff *skb, unsigned offset)
77+
static inline int call_neg_helper(struct sk_buff *skb, int offset, void *ret,
78+
unsigned int size)
79+
{
80+
void *ptr = bpf_internal_load_pointer_neg_helper(skb, offset, size);
81+
82+
if (!ptr)
83+
return -EFAULT;
84+
memcpy(ret, ptr, size);
85+
return 0;
86+
}
87+
88+
static u64 jit_get_skb_b(struct sk_buff *skb, int offset)
7889
{
7990
u8 ret;
8091
int err;
8192

82-
err = skb_copy_bits(skb, offset, &ret, 1);
93+
if (offset < 0)
94+
err = call_neg_helper(skb, offset, &ret, 1);
95+
else
96+
err = skb_copy_bits(skb, offset, &ret, 1);
8397

8498
return (u64)err << 32 | ret;
8599
}
86100

87-
static u64 jit_get_skb_h(struct sk_buff *skb, unsigned offset)
101+
static u64 jit_get_skb_h(struct sk_buff *skb, int offset)
88102
{
89103
u16 ret;
90104
int err;
91105

92-
err = skb_copy_bits(skb, offset, &ret, 2);
106+
if (offset < 0)
107+
err = call_neg_helper(skb, offset, &ret, 2);
108+
else
109+
err = skb_copy_bits(skb, offset, &ret, 2);
93110

94111
return (u64)err << 32 | ntohs(ret);
95112
}
96113

97-
static u64 jit_get_skb_w(struct sk_buff *skb, unsigned offset)
114+
static u64 jit_get_skb_w(struct sk_buff *skb, int offset)
98115
{
99116
u32 ret;
100117
int err;
101118

102-
err = skb_copy_bits(skb, offset, &ret, 4);
119+
if (offset < 0)
120+
err = call_neg_helper(skb, offset, &ret, 4);
121+
else
122+
err = skb_copy_bits(skb, offset, &ret, 4);
103123

104124
return (u64)err << 32 | ntohl(ret);
105125
}
@@ -536,9 +556,6 @@ static int build_body(struct jit_ctx *ctx)
536556
case BPF_LD | BPF_B | BPF_ABS:
537557
load_order = 0;
538558
load:
539-
/* the interpreter will deal with the negative K */
540-
if ((int)k < 0)
541-
return -ENOTSUPP;
542559
emit_mov_i(r_off, k, ctx);
543560
load_common:
544561
ctx->seen |= SEEN_DATA | SEEN_CALL;
@@ -547,12 +564,24 @@ static int build_body(struct jit_ctx *ctx)
547564
emit(ARM_SUB_I(r_scratch, r_skb_hl,
548565
1 << load_order), ctx);
549566
emit(ARM_CMP_R(r_scratch, r_off), ctx);
550-
condt = ARM_COND_HS;
567+
condt = ARM_COND_GE;
551568
} else {
552569
emit(ARM_CMP_R(r_skb_hl, r_off), ctx);
553570
condt = ARM_COND_HI;
554571
}
555572

573+
/*
574+
* test for negative offset, only if we are
575+
* currently scheduled to take the fast
576+
* path. this will update the flags so that
577+
* the slowpath instruction are ignored if the
578+
* offset is negative.
579+
*
580+
* for loard_order == 0 the HI condition will
581+
* make loads at offset 0 take the slow path too.
582+
*/
583+
_emit(condt, ARM_CMP_I(r_off, 0), ctx);
584+
556585
_emit(condt, ARM_ADD_R(r_scratch, r_off, r_skb_data),
557586
ctx);
558587

@@ -860,9 +889,11 @@ static int build_body(struct jit_ctx *ctx)
860889
off = offsetof(struct sk_buff, vlan_tci);
861890
emit(ARM_LDRH_I(r_A, r_skb, off), ctx);
862891
if (code == (BPF_ANC | SKF_AD_VLAN_TAG))
863-
OP_IMM3(ARM_AND, r_A, r_A, VLAN_VID_MASK, ctx);
864-
else
865-
OP_IMM3(ARM_AND, r_A, r_A, VLAN_TAG_PRESENT, ctx);
892+
OP_IMM3(ARM_AND, r_A, r_A, ~VLAN_TAG_PRESENT, ctx);
893+
else {
894+
OP_IMM3(ARM_LSR, r_A, r_A, 12, ctx);
895+
OP_IMM3(ARM_AND, r_A, r_A, 0x1, ctx);
896+
}
866897
break;
867898
case BPF_ANC | SKF_AD_QUEUE:
868899
ctx->seen |= SEEN_SKB;

0 commit comments

Comments
 (0)