Skip to content

Commit d9314c4

Browse files
Björn TöpelJeff Kirsher
authored andcommitted
i40e: add support for XDP_REDIRECT
The driver now acts upon the XDP_REDIRECT return action. Two new ndos are implemented, ndo_xdp_xmit and ndo_xdp_flush. XDP_REDIRECT action enables XDP program to redirect frames to other netdevs. Signed-off-by: Björn Töpel <bjorn.topel@intel.com> Tested-by: Andrew Bowers <andrewx.bowers@intel.com> Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
1 parent 8ce29c6 commit d9314c4

File tree

3 files changed

+68
-10
lines changed

3 files changed

+68
-10
lines changed

drivers/net/ethernet/intel/i40e/i40e_main.c

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -11815,6 +11815,8 @@ static const struct net_device_ops i40e_netdev_ops = {
1181511815
.ndo_bridge_getlink = i40e_ndo_bridge_getlink,
1181611816
.ndo_bridge_setlink = i40e_ndo_bridge_setlink,
1181711817
.ndo_bpf = i40e_xdp,
11818+
.ndo_xdp_xmit = i40e_xdp_xmit,
11819+
.ndo_xdp_flush = i40e_xdp_flush,
1181811820
};
1181911821

1182011822
/**

drivers/net/ethernet/intel/i40e/i40e_txrx.c

Lines changed: 64 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -2214,7 +2214,7 @@ static int i40e_xmit_xdp_ring(struct xdp_buff *xdp,
22142214
static struct sk_buff *i40e_run_xdp(struct i40e_ring *rx_ring,
22152215
struct xdp_buff *xdp)
22162216
{
2217-
int result = I40E_XDP_PASS;
2217+
int err, result = I40E_XDP_PASS;
22182218
struct i40e_ring *xdp_ring;
22192219
struct bpf_prog *xdp_prog;
22202220
u32 act;
@@ -2233,6 +2233,10 @@ static struct sk_buff *i40e_run_xdp(struct i40e_ring *rx_ring,
22332233
xdp_ring = rx_ring->vsi->xdp_rings[rx_ring->queue_index];
22342234
result = i40e_xmit_xdp_ring(xdp, xdp_ring);
22352235
break;
2236+
case XDP_REDIRECT:
2237+
err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog);
2238+
result = !err ? I40E_XDP_TX : I40E_XDP_CONSUMED;
2239+
break;
22362240
default:
22372241
bpf_warn_invalid_xdp_action(act);
22382242
case XDP_ABORTED:
@@ -2268,6 +2272,15 @@ static void i40e_rx_buffer_flip(struct i40e_ring *rx_ring,
22682272
#endif
22692273
}
22702274

2275+
static inline void i40e_xdp_ring_update_tail(struct i40e_ring *xdp_ring)
2276+
{
2277+
/* Force memory writes to complete before letting h/w
2278+
* know there are new descriptors to fetch.
2279+
*/
2280+
wmb();
2281+
writel_relaxed(xdp_ring->next_to_use, xdp_ring->tail);
2282+
}
2283+
22712284
/**
22722285
* i40e_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf
22732286
* @rx_ring: rx descriptor ring to transact packets on
@@ -2402,16 +2415,11 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
24022415
}
24032416

24042417
if (xdp_xmit) {
2405-
struct i40e_ring *xdp_ring;
2406-
2407-
xdp_ring = rx_ring->vsi->xdp_rings[rx_ring->queue_index];
2418+
struct i40e_ring *xdp_ring =
2419+
rx_ring->vsi->xdp_rings[rx_ring->queue_index];
24082420

2409-
/* Force memory writes to complete before letting h/w
2410-
* know there are new descriptors to fetch.
2411-
*/
2412-
wmb();
2413-
2414-
writel(xdp_ring->next_to_use, xdp_ring->tail);
2421+
i40e_xdp_ring_update_tail(xdp_ring);
2422+
xdp_do_flush_map();
24152423
}
24162424

24172425
rx_ring->skb = skb;
@@ -3659,3 +3667,49 @@ netdev_tx_t i40e_lan_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
36593667

36603668
return i40e_xmit_frame_ring(skb, tx_ring);
36613669
}
3670+
3671+
/**
3672+
* i40e_xdp_xmit - Implements ndo_xdp_xmit
3673+
* @dev: netdev
3674+
* @xdp: XDP buffer
3675+
*
3676+
* Returns Zero if sent, else an error code
3677+
**/
3678+
int i40e_xdp_xmit(struct net_device *dev, struct xdp_buff *xdp)
3679+
{
3680+
struct i40e_netdev_priv *np = netdev_priv(dev);
3681+
unsigned int queue_index = smp_processor_id();
3682+
struct i40e_vsi *vsi = np->vsi;
3683+
int err;
3684+
3685+
if (test_bit(__I40E_VSI_DOWN, vsi->state))
3686+
return -ENETDOWN;
3687+
3688+
if (!i40e_enabled_xdp_vsi(vsi) || queue_index >= vsi->num_queue_pairs)
3689+
return -ENXIO;
3690+
3691+
err = i40e_xmit_xdp_ring(xdp, vsi->xdp_rings[queue_index]);
3692+
if (err != I40E_XDP_TX)
3693+
return -ENOSPC;
3694+
3695+
return 0;
3696+
}
3697+
3698+
/**
3699+
* i40e_xdp_flush - Implements ndo_xdp_flush
3700+
* @dev: netdev
3701+
**/
3702+
void i40e_xdp_flush(struct net_device *dev)
3703+
{
3704+
struct i40e_netdev_priv *np = netdev_priv(dev);
3705+
unsigned int queue_index = smp_processor_id();
3706+
struct i40e_vsi *vsi = np->vsi;
3707+
3708+
if (test_bit(__I40E_VSI_DOWN, vsi->state))
3709+
return;
3710+
3711+
if (!i40e_enabled_xdp_vsi(vsi) || queue_index >= vsi->num_queue_pairs)
3712+
return;
3713+
3714+
i40e_xdp_ring_update_tail(vsi->xdp_rings[queue_index]);
3715+
}

drivers/net/ethernet/intel/i40e/i40e_txrx.h

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -510,6 +510,8 @@ u32 i40e_get_tx_pending(struct i40e_ring *ring, bool in_sw);
510510
void i40e_detect_recover_hung(struct i40e_vsi *vsi);
511511
int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size);
512512
bool __i40e_chk_linearize(struct sk_buff *skb);
513+
int i40e_xdp_xmit(struct net_device *dev, struct xdp_buff *xdp);
514+
void i40e_xdp_flush(struct net_device *dev);
513515

514516
/**
515517
* i40e_get_head - Retrieve head from head writeback

0 commit comments

Comments
 (0)