@@ -1059,6 +1059,11 @@ static int check_ptr_alignment(struct bpf_verifier_env *env,
1059
1059
break ;
1060
1060
case PTR_TO_STACK :
1061
1061
pointer_desc = "stack " ;
1062
+ /* The stack spill tracking logic in check_stack_write()
1063
+ * and check_stack_read() relies on stack accesses being
1064
+ * aligned.
1065
+ */
1066
+ strict = true;
1062
1067
break ;
1063
1068
default :
1064
1069
break ;
@@ -1067,6 +1072,29 @@ static int check_ptr_alignment(struct bpf_verifier_env *env,
1067
1072
strict );
1068
1073
}
1069
1074
1075
+ /* truncate register to smaller size (in bytes)
1076
+ * must be called with size < BPF_REG_SIZE
1077
+ */
1078
+ static void coerce_reg_to_size (struct bpf_reg_state * reg , int size )
1079
+ {
1080
+ u64 mask ;
1081
+
1082
+ /* clear high bits in bit representation */
1083
+ reg -> var_off = tnum_cast (reg -> var_off , size );
1084
+
1085
+ /* fix arithmetic bounds */
1086
+ mask = ((u64 )1 << (size * 8 )) - 1 ;
1087
+ if ((reg -> umin_value & ~mask ) == (reg -> umax_value & ~mask )) {
1088
+ reg -> umin_value &= mask ;
1089
+ reg -> umax_value &= mask ;
1090
+ } else {
1091
+ reg -> umin_value = 0 ;
1092
+ reg -> umax_value = mask ;
1093
+ }
1094
+ reg -> smin_value = reg -> umin_value ;
1095
+ reg -> smax_value = reg -> umax_value ;
1096
+ }
1097
+
1070
1098
/* check whether memory at (regno + off) is accessible for t = (read | write)
1071
1099
* if t==write, value_regno is a register which value is stored into memory
1072
1100
* if t==read, value_regno is a register which will receive the value from memory
@@ -1200,9 +1228,7 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regn
1200
1228
if (!err && size < BPF_REG_SIZE && value_regno >= 0 && t == BPF_READ &&
1201
1229
regs [value_regno ].type == SCALAR_VALUE ) {
1202
1230
/* b/h/w load zero-extends, mark upper bits as known 0 */
1203
- regs [value_regno ].var_off =
1204
- tnum_cast (regs [value_regno ].var_off , size );
1205
- __update_reg_bounds (& regs [value_regno ]);
1231
+ coerce_reg_to_size (& regs [value_regno ], size );
1206
1232
}
1207
1233
return err ;
1208
1234
}
@@ -1282,6 +1308,7 @@ static int check_stack_boundary(struct bpf_verifier_env *env, int regno,
1282
1308
tnum_strn (tn_buf , sizeof (tn_buf ), regs [regno ].var_off );
1283
1309
verbose (env , "invalid variable stack read R%d var_off=%s\n" ,
1284
1310
regno , tn_buf );
1311
+ return - EACCES ;
1285
1312
}
1286
1313
off = regs [regno ].off + regs [regno ].var_off .value ;
1287
1314
if (off >= 0 || off < - MAX_BPF_STACK || off + access_size > 0 ||
@@ -1772,14 +1799,6 @@ static int check_call(struct bpf_verifier_env *env, int func_id, int insn_idx)
1772
1799
return 0 ;
1773
1800
}
1774
1801
1775
- static void coerce_reg_to_32 (struct bpf_reg_state * reg )
1776
- {
1777
- /* clear high 32 bits */
1778
- reg -> var_off = tnum_cast (reg -> var_off , 4 );
1779
- /* Update bounds */
1780
- __update_reg_bounds (reg );
1781
- }
1782
-
1783
1802
static bool signed_add_overflows (s64 a , s64 b )
1784
1803
{
1785
1804
/* Do the add in u64, where overflow is well-defined */
@@ -1800,6 +1819,41 @@ static bool signed_sub_overflows(s64 a, s64 b)
1800
1819
return res > a ;
1801
1820
}
1802
1821
1822
+ static bool check_reg_sane_offset (struct bpf_verifier_env * env ,
1823
+ const struct bpf_reg_state * reg ,
1824
+ enum bpf_reg_type type )
1825
+ {
1826
+ bool known = tnum_is_const (reg -> var_off );
1827
+ s64 val = reg -> var_off .value ;
1828
+ s64 smin = reg -> smin_value ;
1829
+
1830
+ if (known && (val >= BPF_MAX_VAR_OFF || val <= - BPF_MAX_VAR_OFF )) {
1831
+ verbose (env , "math between %s pointer and %lld is not allowed\n" ,
1832
+ reg_type_str [type ], val );
1833
+ return false;
1834
+ }
1835
+
1836
+ if (reg -> off >= BPF_MAX_VAR_OFF || reg -> off <= - BPF_MAX_VAR_OFF ) {
1837
+ verbose (env , "%s pointer offset %d is not allowed\n" ,
1838
+ reg_type_str [type ], reg -> off );
1839
+ return false;
1840
+ }
1841
+
1842
+ if (smin == S64_MIN ) {
1843
+ verbose (env , "math between %s pointer and register with unbounded min value is not allowed\n" ,
1844
+ reg_type_str [type ]);
1845
+ return false;
1846
+ }
1847
+
1848
+ if (smin >= BPF_MAX_VAR_OFF || smin <= - BPF_MAX_VAR_OFF ) {
1849
+ verbose (env , "value %lld makes %s pointer be out of bounds\n" ,
1850
+ smin , reg_type_str [type ]);
1851
+ return false;
1852
+ }
1853
+
1854
+ return true;
1855
+ }
1856
+
1803
1857
/* Handles arithmetic on a pointer and a scalar: computes new min/max and var_off.
1804
1858
* Caller should also handle BPF_MOV case separately.
1805
1859
* If we return -EACCES, caller may want to try again treating pointer as a
@@ -1868,6 +1922,10 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
1868
1922
dst_reg -> type = ptr_reg -> type ;
1869
1923
dst_reg -> id = ptr_reg -> id ;
1870
1924
1925
+ if (!check_reg_sane_offset (env , off_reg , ptr_reg -> type ) ||
1926
+ !check_reg_sane_offset (env , ptr_reg , ptr_reg -> type ))
1927
+ return - EINVAL ;
1928
+
1871
1929
switch (opcode ) {
1872
1930
case BPF_ADD :
1873
1931
/* We can take a fixed offset as long as it doesn't overflow
@@ -1998,12 +2056,19 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
1998
2056
return - EACCES ;
1999
2057
}
2000
2058
2059
+ if (!check_reg_sane_offset (env , dst_reg , ptr_reg -> type ))
2060
+ return - EINVAL ;
2061
+
2001
2062
__update_reg_bounds (dst_reg );
2002
2063
__reg_deduce_bounds (dst_reg );
2003
2064
__reg_bound_offset (dst_reg );
2004
2065
return 0 ;
2005
2066
}
2006
2067
2068
+ /* WARNING: This function does calculations on 64-bit values, but the actual
2069
+ * execution may occur on 32-bit values. Therefore, things like bitshifts
2070
+ * need extra checks in the 32-bit case.
2071
+ */
2007
2072
static int adjust_scalar_min_max_vals (struct bpf_verifier_env * env ,
2008
2073
struct bpf_insn * insn ,
2009
2074
struct bpf_reg_state * dst_reg ,
@@ -2014,19 +2079,21 @@ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
2014
2079
bool src_known , dst_known ;
2015
2080
s64 smin_val , smax_val ;
2016
2081
u64 umin_val , umax_val ;
2082
+ u64 insn_bitness = (BPF_CLASS (insn -> code ) == BPF_ALU64 ) ? 64 : 32 ;
2017
2083
2018
- if (BPF_CLASS (insn -> code ) != BPF_ALU64 ) {
2019
- /* 32-bit ALU ops are (32,32)->64 */
2020
- coerce_reg_to_32 (dst_reg );
2021
- coerce_reg_to_32 (& src_reg );
2022
- }
2023
2084
smin_val = src_reg .smin_value ;
2024
2085
smax_val = src_reg .smax_value ;
2025
2086
umin_val = src_reg .umin_value ;
2026
2087
umax_val = src_reg .umax_value ;
2027
2088
src_known = tnum_is_const (src_reg .var_off );
2028
2089
dst_known = tnum_is_const (dst_reg -> var_off );
2029
2090
2091
+ if (!src_known &&
2092
+ opcode != BPF_ADD && opcode != BPF_SUB && opcode != BPF_AND ) {
2093
+ __mark_reg_unknown (dst_reg );
2094
+ return 0 ;
2095
+ }
2096
+
2030
2097
switch (opcode ) {
2031
2098
case BPF_ADD :
2032
2099
if (signed_add_overflows (dst_reg -> smin_value , smin_val ) ||
@@ -2155,9 +2222,9 @@ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
2155
2222
__update_reg_bounds (dst_reg );
2156
2223
break ;
2157
2224
case BPF_LSH :
2158
- if (umax_val > 63 ) {
2159
- /* Shifts greater than 63 are undefined. This includes
2160
- * shifts by a negative number.
2225
+ if (umax_val >= insn_bitness ) {
2226
+ /* Shifts greater than 31 or 63 are undefined.
2227
+ * This includes shifts by a negative number.
2161
2228
*/
2162
2229
mark_reg_unknown (env , regs , insn -> dst_reg );
2163
2230
break ;
@@ -2183,27 +2250,29 @@ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
2183
2250
__update_reg_bounds (dst_reg );
2184
2251
break ;
2185
2252
case BPF_RSH :
2186
- if (umax_val > 63 ) {
2187
- /* Shifts greater than 63 are undefined. This includes
2188
- * shifts by a negative number.
2253
+ if (umax_val >= insn_bitness ) {
2254
+ /* Shifts greater than 31 or 63 are undefined.
2255
+ * This includes shifts by a negative number.
2189
2256
*/
2190
2257
mark_reg_unknown (env , regs , insn -> dst_reg );
2191
2258
break ;
2192
2259
}
2193
- /* BPF_RSH is an unsigned shift, so make the appropriate casts */
2194
- if (dst_reg -> smin_value < 0 ) {
2195
- if (umin_val ) {
2196
- /* Sign bit will be cleared */
2197
- dst_reg -> smin_value = 0 ;
2198
- } else {
2199
- /* Lost sign bit information */
2200
- dst_reg -> smin_value = S64_MIN ;
2201
- dst_reg -> smax_value = S64_MAX ;
2202
- }
2203
- } else {
2204
- dst_reg -> smin_value =
2205
- (u64 )(dst_reg -> smin_value ) >> umax_val ;
2206
- }
2260
+ /* BPF_RSH is an unsigned shift. If the value in dst_reg might
2261
+ * be negative, then either:
2262
+ * 1) src_reg might be zero, so the sign bit of the result is
2263
+ * unknown, so we lose our signed bounds
2264
+ * 2) it's known negative, thus the unsigned bounds capture the
2265
+ * signed bounds
2266
+ * 3) the signed bounds cross zero, so they tell us nothing
2267
+ * about the result
2268
+ * If the value in dst_reg is known nonnegative, then again the
2269
+ * unsigned bounts capture the signed bounds.
2270
+ * Thus, in all cases it suffices to blow away our signed bounds
2271
+ * and rely on inferring new ones from the unsigned bounds and
2272
+ * var_off of the result.
2273
+ */
2274
+ dst_reg -> smin_value = S64_MIN ;
2275
+ dst_reg -> smax_value = S64_MAX ;
2207
2276
if (src_known )
2208
2277
dst_reg -> var_off = tnum_rshift (dst_reg -> var_off ,
2209
2278
umin_val );
@@ -2219,6 +2288,12 @@ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
2219
2288
break ;
2220
2289
}
2221
2290
2291
+ if (BPF_CLASS (insn -> code ) != BPF_ALU64 ) {
2292
+ /* 32-bit ALU ops are (32,32)->32 */
2293
+ coerce_reg_to_size (dst_reg , 4 );
2294
+ coerce_reg_to_size (& src_reg , 4 );
2295
+ }
2296
+
2222
2297
__reg_deduce_bounds (dst_reg );
2223
2298
__reg_bound_offset (dst_reg );
2224
2299
return 0 ;
@@ -2396,17 +2471,20 @@ static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn)
2396
2471
return - EACCES ;
2397
2472
}
2398
2473
mark_reg_unknown (env , regs , insn -> dst_reg );
2399
- /* high 32 bits are known zero. */
2400
- regs [insn -> dst_reg ].var_off = tnum_cast (
2401
- regs [insn -> dst_reg ].var_off , 4 );
2402
- __update_reg_bounds (& regs [insn -> dst_reg ]);
2474
+ coerce_reg_to_size (& regs [insn -> dst_reg ], 4 );
2403
2475
}
2404
2476
} else {
2405
2477
/* case: R = imm
2406
2478
* remember the value we stored into this reg
2407
2479
*/
2408
2480
regs [insn -> dst_reg ].type = SCALAR_VALUE ;
2409
- __mark_reg_known (regs + insn -> dst_reg , insn -> imm );
2481
+ if (BPF_CLASS (insn -> code ) == BPF_ALU64 ) {
2482
+ __mark_reg_known (regs + insn -> dst_reg ,
2483
+ insn -> imm );
2484
+ } else {
2485
+ __mark_reg_known (regs + insn -> dst_reg ,
2486
+ (u32 )insn -> imm );
2487
+ }
2410
2488
}
2411
2489
2412
2490
} else if (opcode > BPF_END ) {
@@ -3437,15 +3515,14 @@ static bool regsafe(struct bpf_reg_state *rold, struct bpf_reg_state *rcur,
3437
3515
return range_within (rold , rcur ) &&
3438
3516
tnum_in (rold -> var_off , rcur -> var_off );
3439
3517
} else {
3440
- /* if we knew anything about the old value, we're not
3441
- * equal, because we can't know anything about the
3442
- * scalar value of the pointer in the new value.
3518
+ /* We're trying to use a pointer in place of a scalar.
3519
+ * Even if the scalar was unbounded, this could lead to
3520
+ * pointer leaks because scalars are allowed to leak
3521
+ * while pointers are not. We could make this safe in
3522
+ * special cases if root is calling us, but it's
3523
+ * probably not worth the hassle.
3443
3524
*/
3444
- return rold -> umin_value == 0 &&
3445
- rold -> umax_value == U64_MAX &&
3446
- rold -> smin_value == S64_MIN &&
3447
- rold -> smax_value == S64_MAX &&
3448
- tnum_is_unknown (rold -> var_off );
3525
+ return false;
3449
3526
}
3450
3527
case PTR_TO_MAP_VALUE :
3451
3528
/* If the new min/max/var_off satisfy the old ones and
0 commit comments