@@ -530,12 +530,14 @@ static int bpf_convert_filter(struct sock_filter *prog, int len,
530
530
* insn = BPF_MOV64_REG (BPF_REG_A , BPF_REG_TMP );
531
531
break ;
532
532
533
- /* RET_K, RET_A are remaped into 2 insns. */
533
+ /* RET_K is remaped into 2 insns. RET_A case doesn't need an
534
+ * extra mov as BPF_REG_0 is already mapped into BPF_REG_A.
535
+ */
534
536
case BPF_RET | BPF_A :
535
537
case BPF_RET | BPF_K :
536
- * insn ++ = BPF_MOV32_RAW (BPF_RVAL (fp -> code ) == BPF_K ?
537
- BPF_K : BPF_X , BPF_REG_0 ,
538
- BPF_REG_A , fp -> k );
538
+ if (BPF_RVAL (fp -> code ) == BPF_K )
539
+ * insn ++ = BPF_MOV32_RAW ( BPF_K , BPF_REG_0 ,
540
+ 0 , fp -> k );
539
541
* insn = BPF_EXIT_INSN ();
540
542
break ;
541
543
@@ -1333,15 +1335,22 @@ int sk_reuseport_attach_bpf(u32 ufd, struct sock *sk)
1333
1335
return 0 ;
1334
1336
}
1335
1337
1336
- #define BPF_LDST_LEN 16U
1338
+ struct bpf_scratchpad {
1339
+ union {
1340
+ __be32 diff [MAX_BPF_STACK / sizeof (__be32 )];
1341
+ u8 buff [MAX_BPF_STACK ];
1342
+ };
1343
+ };
1344
+
1345
+ static DEFINE_PER_CPU (struct bpf_scratchpad , bpf_sp ) ;
1337
1346
1338
1347
static u64 bpf_skb_store_bytes (u64 r1 , u64 r2 , u64 r3 , u64 r4 , u64 flags )
1339
1348
{
1349
+ struct bpf_scratchpad * sp = this_cpu_ptr (& bpf_sp );
1340
1350
struct sk_buff * skb = (struct sk_buff * ) (long ) r1 ;
1341
1351
int offset = (int ) r2 ;
1342
1352
void * from = (void * ) (long ) r3 ;
1343
1353
unsigned int len = (unsigned int ) r4 ;
1344
- char buf [BPF_LDST_LEN ];
1345
1354
void * ptr ;
1346
1355
1347
1356
if (unlikely (flags & ~(BPF_F_RECOMPUTE_CSUM )))
@@ -1355,14 +1364,12 @@ static u64 bpf_skb_store_bytes(u64 r1, u64 r2, u64 r3, u64 r4, u64 flags)
1355
1364
*
1356
1365
* so check for invalid 'offset' and too large 'len'
1357
1366
*/
1358
- if (unlikely ((u32 ) offset > 0xffff || len > sizeof (buf )))
1367
+ if (unlikely ((u32 ) offset > 0xffff || len > sizeof (sp -> buff )))
1359
1368
return - EFAULT ;
1360
-
1361
- if (unlikely (skb_cloned (skb ) &&
1362
- !skb_clone_writable (skb , offset + len )))
1369
+ if (unlikely (skb_try_make_writable (skb , offset + len )))
1363
1370
return - EFAULT ;
1364
1371
1365
- ptr = skb_header_pointer (skb , offset , len , buf );
1372
+ ptr = skb_header_pointer (skb , offset , len , sp -> buff );
1366
1373
if (unlikely (!ptr ))
1367
1374
return - EFAULT ;
1368
1375
@@ -1371,7 +1378,7 @@ static u64 bpf_skb_store_bytes(u64 r1, u64 r2, u64 r3, u64 r4, u64 flags)
1371
1378
1372
1379
memcpy (ptr , from , len );
1373
1380
1374
- if (ptr == buf )
1381
+ if (ptr == sp -> buff )
1375
1382
/* skb_store_bits cannot return -EFAULT here */
1376
1383
skb_store_bits (skb , offset , ptr , len );
1377
1384
@@ -1400,7 +1407,7 @@ static u64 bpf_skb_load_bytes(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
1400
1407
unsigned int len = (unsigned int ) r4 ;
1401
1408
void * ptr ;
1402
1409
1403
- if (unlikely ((u32 ) offset > 0xffff || len > BPF_LDST_LEN ))
1410
+ if (unlikely ((u32 ) offset > 0xffff || len > MAX_BPF_STACK ))
1404
1411
return - EFAULT ;
1405
1412
1406
1413
ptr = skb_header_pointer (skb , offset , len , to );
@@ -1432,9 +1439,7 @@ static u64 bpf_l3_csum_replace(u64 r1, u64 r2, u64 from, u64 to, u64 flags)
1432
1439
return - EINVAL ;
1433
1440
if (unlikely ((u32 ) offset > 0xffff ))
1434
1441
return - EFAULT ;
1435
-
1436
- if (unlikely (skb_cloned (skb ) &&
1437
- !skb_clone_writable (skb , offset + sizeof (sum ))))
1442
+ if (unlikely (skb_try_make_writable (skb , offset + sizeof (sum ))))
1438
1443
return - EFAULT ;
1439
1444
1440
1445
ptr = skb_header_pointer (skb , offset , sizeof (sum ), & sum );
@@ -1474,23 +1479,31 @@ static u64 bpf_l4_csum_replace(u64 r1, u64 r2, u64 from, u64 to, u64 flags)
1474
1479
{
1475
1480
struct sk_buff * skb = (struct sk_buff * ) (long ) r1 ;
1476
1481
bool is_pseudo = flags & BPF_F_PSEUDO_HDR ;
1482
+ bool is_mmzero = flags & BPF_F_MARK_MANGLED_0 ;
1477
1483
int offset = (int ) r2 ;
1478
1484
__sum16 sum , * ptr ;
1479
1485
1480
- if (unlikely (flags & ~(BPF_F_PSEUDO_HDR | BPF_F_HDR_FIELD_MASK )))
1486
+ if (unlikely (flags & ~(BPF_F_MARK_MANGLED_0 | BPF_F_PSEUDO_HDR |
1487
+ BPF_F_HDR_FIELD_MASK )))
1481
1488
return - EINVAL ;
1482
1489
if (unlikely ((u32 ) offset > 0xffff ))
1483
1490
return - EFAULT ;
1484
-
1485
- if (unlikely (skb_cloned (skb ) &&
1486
- !skb_clone_writable (skb , offset + sizeof (sum ))))
1491
+ if (unlikely (skb_try_make_writable (skb , offset + sizeof (sum ))))
1487
1492
return - EFAULT ;
1488
1493
1489
1494
ptr = skb_header_pointer (skb , offset , sizeof (sum ), & sum );
1490
1495
if (unlikely (!ptr ))
1491
1496
return - EFAULT ;
1497
+ if (is_mmzero && !* ptr )
1498
+ return 0 ;
1492
1499
1493
1500
switch (flags & BPF_F_HDR_FIELD_MASK ) {
1501
+ case 0 :
1502
+ if (unlikely (from != 0 ))
1503
+ return - EINVAL ;
1504
+
1505
+ inet_proto_csum_replace_by_diff (ptr , skb , to , is_pseudo );
1506
+ break ;
1494
1507
case 2 :
1495
1508
inet_proto_csum_replace2 (ptr , skb , from , to , is_pseudo );
1496
1509
break ;
@@ -1501,6 +1514,8 @@ static u64 bpf_l4_csum_replace(u64 r1, u64 r2, u64 from, u64 to, u64 flags)
1501
1514
return - EINVAL ;
1502
1515
}
1503
1516
1517
+ if (is_mmzero && !* ptr )
1518
+ * ptr = CSUM_MANGLED_0 ;
1504
1519
if (ptr == & sum )
1505
1520
/* skb_store_bits guaranteed to not return -EFAULT here */
1506
1521
skb_store_bits (skb , offset , ptr , sizeof (sum ));
@@ -1519,6 +1534,45 @@ const struct bpf_func_proto bpf_l4_csum_replace_proto = {
1519
1534
.arg5_type = ARG_ANYTHING ,
1520
1535
};
1521
1536
1537
+ static u64 bpf_csum_diff (u64 r1 , u64 from_size , u64 r3 , u64 to_size , u64 seed )
1538
+ {
1539
+ struct bpf_scratchpad * sp = this_cpu_ptr (& bpf_sp );
1540
+ u64 diff_size = from_size + to_size ;
1541
+ __be32 * from = (__be32 * ) (long ) r1 ;
1542
+ __be32 * to = (__be32 * ) (long ) r3 ;
1543
+ int i , j = 0 ;
1544
+
1545
+ /* This is quite flexible, some examples:
1546
+ *
1547
+ * from_size == 0, to_size > 0, seed := csum --> pushing data
1548
+ * from_size > 0, to_size == 0, seed := csum --> pulling data
1549
+ * from_size > 0, to_size > 0, seed := 0 --> diffing data
1550
+ *
1551
+ * Even for diffing, from_size and to_size don't need to be equal.
1552
+ */
1553
+ if (unlikely (((from_size | to_size ) & (sizeof (__be32 ) - 1 )) ||
1554
+ diff_size > sizeof (sp -> diff )))
1555
+ return - EINVAL ;
1556
+
1557
+ for (i = 0 ; i < from_size / sizeof (__be32 ); i ++ , j ++ )
1558
+ sp -> diff [j ] = ~from [i ];
1559
+ for (i = 0 ; i < to_size / sizeof (__be32 ); i ++ , j ++ )
1560
+ sp -> diff [j ] = to [i ];
1561
+
1562
+ return csum_partial (sp -> diff , diff_size , seed );
1563
+ }
1564
+
1565
+ const struct bpf_func_proto bpf_csum_diff_proto = {
1566
+ .func = bpf_csum_diff ,
1567
+ .gpl_only = false,
1568
+ .ret_type = RET_INTEGER ,
1569
+ .arg1_type = ARG_PTR_TO_STACK ,
1570
+ .arg2_type = ARG_CONST_STACK_SIZE_OR_ZERO ,
1571
+ .arg3_type = ARG_PTR_TO_STACK ,
1572
+ .arg4_type = ARG_CONST_STACK_SIZE_OR_ZERO ,
1573
+ .arg5_type = ARG_ANYTHING ,
1574
+ };
1575
+
1522
1576
static u64 bpf_clone_redirect (u64 r1 , u64 ifindex , u64 flags , u64 r4 , u64 r5 )
1523
1577
{
1524
1578
struct sk_buff * skb = (struct sk_buff * ) (long ) r1 , * skb2 ;
@@ -1682,6 +1736,13 @@ bool bpf_helper_changes_skb_data(void *func)
1682
1736
return true;
1683
1737
if (func == bpf_skb_vlan_pop )
1684
1738
return true;
1739
+ if (func == bpf_skb_store_bytes )
1740
+ return true;
1741
+ if (func == bpf_l3_csum_replace )
1742
+ return true;
1743
+ if (func == bpf_l4_csum_replace )
1744
+ return true;
1745
+
1685
1746
return false;
1686
1747
}
1687
1748
@@ -1849,6 +1910,8 @@ tc_cls_act_func_proto(enum bpf_func_id func_id)
1849
1910
return & bpf_skb_store_bytes_proto ;
1850
1911
case BPF_FUNC_skb_load_bytes :
1851
1912
return & bpf_skb_load_bytes_proto ;
1913
+ case BPF_FUNC_csum_diff :
1914
+ return & bpf_csum_diff_proto ;
1852
1915
case BPF_FUNC_l3_csum_replace :
1853
1916
return & bpf_l3_csum_replace_proto ;
1854
1917
case BPF_FUNC_l4_csum_replace :
0 commit comments