Skip to content

Commit 44fa2db

Browse files
netoptimizerdavem330
authored andcommitted
xdp: transition into using xdp_frame for ndo_xdp_xmit
Changing API ndo_xdp_xmit to take a struct xdp_frame instead of struct xdp_buff. This brings xdp_return_frame and ndp_xdp_xmit in sync. This builds towards changing the API further to become a bulk API, because xdp_buff is not a queue-able object while xdp_frame is. V4: Adjust for commit 59655a5 ("tuntap: XDP_TX can use native XDP") V7: Adjust for commit d9314c4 ("i40e: add support for XDP_REDIRECT") Signed-off-by: Jesper Dangaard Brouer <brouer@redhat.com> Signed-off-by: David S. Miller <davem@davemloft.net>
1 parent 0399309 commit 44fa2db

File tree

7 files changed

+74
-46
lines changed

7 files changed

+74
-46
lines changed

drivers/net/ethernet/intel/i40e/i40e_txrx.c

Lines changed: 17 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -2203,9 +2203,20 @@ static bool i40e_is_non_eop(struct i40e_ring *rx_ring,
22032203
#define I40E_XDP_CONSUMED 1
22042204
#define I40E_XDP_TX 2
22052205

2206-
static int i40e_xmit_xdp_ring(struct xdp_buff *xdp,
2206+
static int i40e_xmit_xdp_ring(struct xdp_frame *xdpf,
22072207
struct i40e_ring *xdp_ring);
22082208

2209+
static int i40e_xmit_xdp_tx_ring(struct xdp_buff *xdp,
2210+
struct i40e_ring *xdp_ring)
2211+
{
2212+
struct xdp_frame *xdpf = convert_to_xdp_frame(xdp);
2213+
2214+
if (unlikely(!xdpf))
2215+
return I40E_XDP_CONSUMED;
2216+
2217+
return i40e_xmit_xdp_ring(xdpf, xdp_ring);
2218+
}
2219+
22092220
/**
22102221
* i40e_run_xdp - run an XDP program
22112222
* @rx_ring: Rx ring being processed
@@ -2233,7 +2244,7 @@ static struct sk_buff *i40e_run_xdp(struct i40e_ring *rx_ring,
22332244
break;
22342245
case XDP_TX:
22352246
xdp_ring = rx_ring->vsi->xdp_rings[rx_ring->queue_index];
2236-
result = i40e_xmit_xdp_ring(xdp, xdp_ring);
2247+
result = i40e_xmit_xdp_tx_ring(xdp, xdp_ring);
22372248
break;
22382249
case XDP_REDIRECT:
22392250
err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog);
@@ -3480,21 +3491,14 @@ static inline int i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
34803491
* @xdp: data to transmit
34813492
* @xdp_ring: XDP Tx ring
34823493
**/
3483-
static int i40e_xmit_xdp_ring(struct xdp_buff *xdp,
3494+
static int i40e_xmit_xdp_ring(struct xdp_frame *xdpf,
34843495
struct i40e_ring *xdp_ring)
34853496
{
34863497
u16 i = xdp_ring->next_to_use;
34873498
struct i40e_tx_buffer *tx_bi;
34883499
struct i40e_tx_desc *tx_desc;
3489-
struct xdp_frame *xdpf;
3500+
u32 size = xdpf->len;
34903501
dma_addr_t dma;
3491-
u32 size;
3492-
3493-
xdpf = convert_to_xdp_frame(xdp);
3494-
if (unlikely(!xdpf))
3495-
return I40E_XDP_CONSUMED;
3496-
3497-
size = xdpf->len;
34983502

34993503
if (!unlikely(I40E_DESC_UNUSED(xdp_ring))) {
35003504
xdp_ring->tx_stats.tx_busy++;
@@ -3684,7 +3688,7 @@ netdev_tx_t i40e_lan_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
36843688
*
36853689
* Returns Zero if sent, else an error code
36863690
**/
3687-
int i40e_xdp_xmit(struct net_device *dev, struct xdp_buff *xdp)
3691+
int i40e_xdp_xmit(struct net_device *dev, struct xdp_frame *xdpf)
36883692
{
36893693
struct i40e_netdev_priv *np = netdev_priv(dev);
36903694
unsigned int queue_index = smp_processor_id();
@@ -3697,7 +3701,7 @@ int i40e_xdp_xmit(struct net_device *dev, struct xdp_buff *xdp)
36973701
if (!i40e_enabled_xdp_vsi(vsi) || queue_index >= vsi->num_queue_pairs)
36983702
return -ENXIO;
36993703

3700-
err = i40e_xmit_xdp_ring(xdp, vsi->xdp_rings[queue_index]);
3704+
err = i40e_xmit_xdp_ring(xdpf, vsi->xdp_rings[queue_index]);
37013705
if (err != I40E_XDP_TX)
37023706
return -ENOSPC;
37033707

drivers/net/ethernet/intel/i40e/i40e_txrx.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -511,7 +511,7 @@ u32 i40e_get_tx_pending(struct i40e_ring *ring, bool in_sw);
511511
void i40e_detect_recover_hung(struct i40e_vsi *vsi);
512512
int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size);
513513
bool __i40e_chk_linearize(struct sk_buff *skb);
514-
int i40e_xdp_xmit(struct net_device *dev, struct xdp_buff *xdp);
514+
int i40e_xdp_xmit(struct net_device *dev, struct xdp_frame *xdpf);
515515
void i40e_xdp_flush(struct net_device *dev);
516516

517517
/**

drivers/net/ethernet/intel/ixgbe/ixgbe_main.c

Lines changed: 13 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -2262,14 +2262,15 @@ static struct sk_buff *ixgbe_build_skb(struct ixgbe_ring *rx_ring,
22622262
#define IXGBE_XDP_TX 2
22632263

22642264
static int ixgbe_xmit_xdp_ring(struct ixgbe_adapter *adapter,
2265-
struct xdp_buff *xdp);
2265+
struct xdp_frame *xdpf);
22662266

22672267
static struct sk_buff *ixgbe_run_xdp(struct ixgbe_adapter *adapter,
22682268
struct ixgbe_ring *rx_ring,
22692269
struct xdp_buff *xdp)
22702270
{
22712271
int err, result = IXGBE_XDP_PASS;
22722272
struct bpf_prog *xdp_prog;
2273+
struct xdp_frame *xdpf;
22732274
u32 act;
22742275

22752276
rcu_read_lock();
@@ -2278,12 +2279,19 @@ static struct sk_buff *ixgbe_run_xdp(struct ixgbe_adapter *adapter,
22782279
if (!xdp_prog)
22792280
goto xdp_out;
22802281

2282+
prefetchw(xdp->data_hard_start); /* xdp_frame write */
2283+
22812284
act = bpf_prog_run_xdp(xdp_prog, xdp);
22822285
switch (act) {
22832286
case XDP_PASS:
22842287
break;
22852288
case XDP_TX:
2286-
result = ixgbe_xmit_xdp_ring(adapter, xdp);
2289+
xdpf = convert_to_xdp_frame(xdp);
2290+
if (unlikely(!xdpf)) {
2291+
result = IXGBE_XDP_CONSUMED;
2292+
break;
2293+
}
2294+
result = ixgbe_xmit_xdp_ring(adapter, xdpf);
22872295
break;
22882296
case XDP_REDIRECT:
22892297
err = xdp_do_redirect(adapter->netdev, xdp, xdp_prog);
@@ -2386,7 +2394,6 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
23862394
xdp.data_hard_start = xdp.data -
23872395
ixgbe_rx_offset(rx_ring);
23882396
xdp.data_end = xdp.data + size;
2389-
prefetchw(xdp.data_hard_start); /* xdp_frame write */
23902397

23912398
skb = ixgbe_run_xdp(adapter, rx_ring, &xdp);
23922399
}
@@ -8344,20 +8351,15 @@ static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb,
83448351
}
83458352

83468353
static int ixgbe_xmit_xdp_ring(struct ixgbe_adapter *adapter,
8347-
struct xdp_buff *xdp)
8354+
struct xdp_frame *xdpf)
83488355
{
83498356
struct ixgbe_ring *ring = adapter->xdp_ring[smp_processor_id()];
83508357
struct ixgbe_tx_buffer *tx_buffer;
83518358
union ixgbe_adv_tx_desc *tx_desc;
8352-
struct xdp_frame *xdpf;
83538359
u32 len, cmd_type;
83548360
dma_addr_t dma;
83558361
u16 i;
83568362

8357-
xdpf = convert_to_xdp_frame(xdp);
8358-
if (unlikely(!xdpf))
8359-
return -EOVERFLOW;
8360-
83618363
len = xdpf->len;
83628364

83638365
if (unlikely(!ixgbe_desc_unused(ring)))
@@ -10010,7 +10012,7 @@ static int ixgbe_xdp(struct net_device *dev, struct netdev_bpf *xdp)
1001010012
}
1001110013
}
1001210014

10013-
static int ixgbe_xdp_xmit(struct net_device *dev, struct xdp_buff *xdp)
10015+
static int ixgbe_xdp_xmit(struct net_device *dev, struct xdp_frame *xdpf)
1001410016
{
1001510017
struct ixgbe_adapter *adapter = netdev_priv(dev);
1001610018
struct ixgbe_ring *ring;
@@ -10026,7 +10028,7 @@ static int ixgbe_xdp_xmit(struct net_device *dev, struct xdp_buff *xdp)
1002610028
if (unlikely(!ring))
1002710029
return -ENXIO;
1002810030

10029-
err = ixgbe_xmit_xdp_ring(adapter, xdp);
10031+
err = ixgbe_xmit_xdp_ring(adapter, xdpf);
1003010032
if (err != IXGBE_XDP_TX)
1003110033
return -ENOSPC;
1003210034

drivers/net/tun.c

Lines changed: 12 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -1301,18 +1301,13 @@ static const struct net_device_ops tun_netdev_ops = {
13011301
.ndo_get_stats64 = tun_net_get_stats64,
13021302
};
13031303

1304-
static int tun_xdp_xmit(struct net_device *dev, struct xdp_buff *xdp)
1304+
static int tun_xdp_xmit(struct net_device *dev, struct xdp_frame *frame)
13051305
{
13061306
struct tun_struct *tun = netdev_priv(dev);
1307-
struct xdp_frame *frame;
13081307
struct tun_file *tfile;
13091308
u32 numqueues;
13101309
int ret = 0;
13111310

1312-
frame = convert_to_xdp_frame(xdp);
1313-
if (unlikely(!frame))
1314-
return -EOVERFLOW;
1315-
13161311
rcu_read_lock();
13171312

13181313
numqueues = READ_ONCE(tun->numqueues);
@@ -1336,6 +1331,16 @@ static int tun_xdp_xmit(struct net_device *dev, struct xdp_buff *xdp)
13361331
return ret;
13371332
}
13381333

1334+
static int tun_xdp_tx(struct net_device *dev, struct xdp_buff *xdp)
1335+
{
1336+
struct xdp_frame *frame = convert_to_xdp_frame(xdp);
1337+
1338+
if (unlikely(!frame))
1339+
return -EOVERFLOW;
1340+
1341+
return tun_xdp_xmit(dev, frame);
1342+
}
1343+
13391344
static void tun_xdp_flush(struct net_device *dev)
13401345
{
13411346
struct tun_struct *tun = netdev_priv(dev);
@@ -1683,7 +1688,7 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun,
16831688
case XDP_TX:
16841689
get_page(alloc_frag->page);
16851690
alloc_frag->offset += buflen;
1686-
if (tun_xdp_xmit(tun->dev, &xdp))
1691+
if (tun_xdp_tx(tun->dev, &xdp))
16871692
goto err_redirect;
16881693
tun_xdp_flush(tun->dev);
16891694
rcu_read_unlock();

drivers/net/virtio_net.c

Lines changed: 14 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -416,10 +416,10 @@ static void virtnet_xdp_flush(struct net_device *dev)
416416
}
417417

418418
static int __virtnet_xdp_xmit(struct virtnet_info *vi,
419-
struct xdp_buff *xdp)
419+
struct xdp_frame *xdpf)
420420
{
421421
struct virtio_net_hdr_mrg_rxbuf *hdr;
422-
struct xdp_frame *xdpf, *xdpf_sent;
422+
struct xdp_frame *xdpf_sent;
423423
struct send_queue *sq;
424424
unsigned int len;
425425
unsigned int qp;
@@ -432,10 +432,6 @@ static int __virtnet_xdp_xmit(struct virtnet_info *vi,
432432
while ((xdpf_sent = virtqueue_get_buf(sq->vq, &len)) != NULL)
433433
xdp_return_frame(xdpf_sent);
434434

435-
xdpf = convert_to_xdp_frame(xdp);
436-
if (unlikely(!xdpf))
437-
return -EOVERFLOW;
438-
439435
/* virtqueue want to use data area in-front of packet */
440436
if (unlikely(xdpf->metasize > 0))
441437
return -EOPNOTSUPP;
@@ -459,7 +455,7 @@ static int __virtnet_xdp_xmit(struct virtnet_info *vi,
459455
return 0;
460456
}
461457

462-
static int virtnet_xdp_xmit(struct net_device *dev, struct xdp_buff *xdp)
458+
static int virtnet_xdp_xmit(struct net_device *dev, struct xdp_frame *xdpf)
463459
{
464460
struct virtnet_info *vi = netdev_priv(dev);
465461
struct receive_queue *rq = vi->rq;
@@ -472,7 +468,7 @@ static int virtnet_xdp_xmit(struct net_device *dev, struct xdp_buff *xdp)
472468
if (!xdp_prog)
473469
return -ENXIO;
474470

475-
return __virtnet_xdp_xmit(vi, xdp);
471+
return __virtnet_xdp_xmit(vi, xdpf);
476472
}
477473

478474
static unsigned int virtnet_get_headroom(struct virtnet_info *vi)
@@ -569,6 +565,7 @@ static struct sk_buff *receive_small(struct net_device *dev,
569565
xdp_prog = rcu_dereference(rq->xdp_prog);
570566
if (xdp_prog) {
571567
struct virtio_net_hdr_mrg_rxbuf *hdr = buf + header_offset;
568+
struct xdp_frame *xdpf;
572569
struct xdp_buff xdp;
573570
void *orig_data;
574571
u32 act;
@@ -611,7 +608,10 @@ static struct sk_buff *receive_small(struct net_device *dev,
611608
delta = orig_data - xdp.data;
612609
break;
613610
case XDP_TX:
614-
err = __virtnet_xdp_xmit(vi, &xdp);
611+
xdpf = convert_to_xdp_frame(&xdp);
612+
if (unlikely(!xdpf))
613+
goto err_xdp;
614+
err = __virtnet_xdp_xmit(vi, xdpf);
615615
if (unlikely(err)) {
616616
trace_xdp_exception(vi->dev, xdp_prog, act);
617617
goto err_xdp;
@@ -702,6 +702,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
702702
rcu_read_lock();
703703
xdp_prog = rcu_dereference(rq->xdp_prog);
704704
if (xdp_prog) {
705+
struct xdp_frame *xdpf;
705706
struct page *xdp_page;
706707
struct xdp_buff xdp;
707708
void *data;
@@ -766,7 +767,10 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
766767
}
767768
break;
768769
case XDP_TX:
769-
err = __virtnet_xdp_xmit(vi, &xdp);
770+
xdpf = convert_to_xdp_frame(&xdp);
771+
if (unlikely(!xdpf))
772+
goto err_xdp;
773+
err = __virtnet_xdp_xmit(vi, xdpf);
770774
if (unlikely(err)) {
771775
trace_xdp_exception(vi->dev, xdp_prog, act);
772776
if (unlikely(xdp_page != page))

include/linux/netdevice.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1165,7 +1165,7 @@ struct dev_ifalias {
11651165
* This function is used to set or query state related to XDP on the
11661166
* netdevice and manage BPF offload. See definition of
11671167
* enum bpf_netdev_command for details.
1168-
* int (*ndo_xdp_xmit)(struct net_device *dev, struct xdp_buff *xdp);
1168+
* int (*ndo_xdp_xmit)(struct net_device *dev, struct xdp_frame *xdp);
11691169
* This function is used to submit a XDP packet for transmit on a
11701170
* netdevice.
11711171
* void (*ndo_xdp_flush)(struct net_device *dev);
@@ -1356,7 +1356,7 @@ struct net_device_ops {
13561356
int (*ndo_bpf)(struct net_device *dev,
13571357
struct netdev_bpf *bpf);
13581358
int (*ndo_xdp_xmit)(struct net_device *dev,
1359-
struct xdp_buff *xdp);
1359+
struct xdp_frame *xdp);
13601360
void (*ndo_xdp_flush)(struct net_device *dev);
13611361
};
13621362

net/core/filter.c

Lines changed: 15 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -2749,13 +2749,18 @@ static int __bpf_tx_xdp(struct net_device *dev,
27492749
struct xdp_buff *xdp,
27502750
u32 index)
27512751
{
2752+
struct xdp_frame *xdpf;
27522753
int err;
27532754

27542755
if (!dev->netdev_ops->ndo_xdp_xmit) {
27552756
return -EOPNOTSUPP;
27562757
}
27572758

2758-
err = dev->netdev_ops->ndo_xdp_xmit(dev, xdp);
2759+
xdpf = convert_to_xdp_frame(xdp);
2760+
if (unlikely(!xdpf))
2761+
return -EOVERFLOW;
2762+
2763+
err = dev->netdev_ops->ndo_xdp_xmit(dev, xdpf);
27592764
if (err)
27602765
return err;
27612766
dev->netdev_ops->ndo_xdp_flush(dev);
@@ -2771,11 +2776,19 @@ static int __bpf_tx_xdp_map(struct net_device *dev_rx, void *fwd,
27712776

27722777
if (map->map_type == BPF_MAP_TYPE_DEVMAP) {
27732778
struct net_device *dev = fwd;
2779+
struct xdp_frame *xdpf;
27742780

27752781
if (!dev->netdev_ops->ndo_xdp_xmit)
27762782
return -EOPNOTSUPP;
27772783

2778-
err = dev->netdev_ops->ndo_xdp_xmit(dev, xdp);
2784+
xdpf = convert_to_xdp_frame(xdp);
2785+
if (unlikely(!xdpf))
2786+
return -EOVERFLOW;
2787+
2788+
/* TODO: move to inside map code instead, for bulk support
2789+
* err = dev_map_enqueue(dev, xdp);
2790+
*/
2791+
err = dev->netdev_ops->ndo_xdp_xmit(dev, xdpf);
27792792
if (err)
27802793
return err;
27812794
__dev_map_insert_ctx(map, index);

0 commit comments

Comments
 (0)