@@ -74,32 +74,52 @@ struct jit_ctx {
74
74
75
75
int bpf_jit_enable __read_mostly ;
76
76
77
- static u64 jit_get_skb_b (struct sk_buff * skb , unsigned offset )
77
+ static inline int call_neg_helper (struct sk_buff * skb , int offset , void * ret ,
78
+ unsigned int size )
79
+ {
80
+ void * ptr = bpf_internal_load_pointer_neg_helper (skb , offset , size );
81
+
82
+ if (!ptr )
83
+ return - EFAULT ;
84
+ memcpy (ret , ptr , size );
85
+ return 0 ;
86
+ }
87
+
88
+ static u64 jit_get_skb_b (struct sk_buff * skb , int offset )
78
89
{
79
90
u8 ret ;
80
91
int err ;
81
92
82
- err = skb_copy_bits (skb , offset , & ret , 1 );
93
+ if (offset < 0 )
94
+ err = call_neg_helper (skb , offset , & ret , 1 );
95
+ else
96
+ err = skb_copy_bits (skb , offset , & ret , 1 );
83
97
84
98
return (u64 )err << 32 | ret ;
85
99
}
86
100
87
- static u64 jit_get_skb_h (struct sk_buff * skb , unsigned offset )
101
+ static u64 jit_get_skb_h (struct sk_buff * skb , int offset )
88
102
{
89
103
u16 ret ;
90
104
int err ;
91
105
92
- err = skb_copy_bits (skb , offset , & ret , 2 );
106
+ if (offset < 0 )
107
+ err = call_neg_helper (skb , offset , & ret , 2 );
108
+ else
109
+ err = skb_copy_bits (skb , offset , & ret , 2 );
93
110
94
111
return (u64 )err << 32 | ntohs (ret );
95
112
}
96
113
97
- static u64 jit_get_skb_w (struct sk_buff * skb , unsigned offset )
114
+ static u64 jit_get_skb_w (struct sk_buff * skb , int offset )
98
115
{
99
116
u32 ret ;
100
117
int err ;
101
118
102
- err = skb_copy_bits (skb , offset , & ret , 4 );
119
+ if (offset < 0 )
120
+ err = call_neg_helper (skb , offset , & ret , 4 );
121
+ else
122
+ err = skb_copy_bits (skb , offset , & ret , 4 );
103
123
104
124
return (u64 )err << 32 | ntohl (ret );
105
125
}
@@ -536,9 +556,6 @@ static int build_body(struct jit_ctx *ctx)
536
556
case BPF_LD | BPF_B | BPF_ABS :
537
557
load_order = 0 ;
538
558
load :
539
- /* the interpreter will deal with the negative K */
540
- if ((int )k < 0 )
541
- return - ENOTSUPP ;
542
559
emit_mov_i (r_off , k , ctx );
543
560
load_common :
544
561
ctx -> seen |= SEEN_DATA | SEEN_CALL ;
@@ -547,12 +564,24 @@ static int build_body(struct jit_ctx *ctx)
547
564
emit (ARM_SUB_I (r_scratch , r_skb_hl ,
548
565
1 << load_order ), ctx );
549
566
emit (ARM_CMP_R (r_scratch , r_off ), ctx );
550
- condt = ARM_COND_HS ;
567
+ condt = ARM_COND_GE ;
551
568
} else {
552
569
emit (ARM_CMP_R (r_skb_hl , r_off ), ctx );
553
570
condt = ARM_COND_HI ;
554
571
}
555
572
573
+ /*
574
+ * test for negative offset, only if we are
575
+ * currently scheduled to take the fast
576
+ * path. this will update the flags so that
577
+ * the slowpath instruction are ignored if the
578
+ * offset is negative.
579
+ *
580
+ * for loard_order == 0 the HI condition will
581
+ * make loads at offset 0 take the slow path too.
582
+ */
583
+ _emit (condt , ARM_CMP_I (r_off , 0 ), ctx );
584
+
556
585
_emit (condt , ARM_ADD_R (r_scratch , r_off , r_skb_data ),
557
586
ctx );
558
587
@@ -860,9 +889,11 @@ static int build_body(struct jit_ctx *ctx)
860
889
off = offsetof(struct sk_buff , vlan_tci );
861
890
emit (ARM_LDRH_I (r_A , r_skb , off ), ctx );
862
891
if (code == (BPF_ANC | SKF_AD_VLAN_TAG ))
863
- OP_IMM3 (ARM_AND , r_A , r_A , VLAN_VID_MASK , ctx );
864
- else
865
- OP_IMM3 (ARM_AND , r_A , r_A , VLAN_TAG_PRESENT , ctx );
892
+ OP_IMM3 (ARM_AND , r_A , r_A , ~VLAN_TAG_PRESENT , ctx );
893
+ else {
894
+ OP_IMM3 (ARM_LSR , r_A , r_A , 12 , ctx );
895
+ OP_IMM3 (ARM_AND , r_A , r_A , 0x1 , ctx );
896
+ }
866
897
break ;
867
898
case BPF_ANC | SKF_AD_QUEUE :
868
899
ctx -> seen |= SEEN_SKB ;
0 commit comments