@@ -2214,7 +2214,7 @@ static int i40e_xmit_xdp_ring(struct xdp_buff *xdp,
2214
2214
static struct sk_buff * i40e_run_xdp (struct i40e_ring * rx_ring ,
2215
2215
struct xdp_buff * xdp )
2216
2216
{
2217
- int result = I40E_XDP_PASS ;
2217
+ int err , result = I40E_XDP_PASS ;
2218
2218
struct i40e_ring * xdp_ring ;
2219
2219
struct bpf_prog * xdp_prog ;
2220
2220
u32 act ;
@@ -2233,6 +2233,10 @@ static struct sk_buff *i40e_run_xdp(struct i40e_ring *rx_ring,
2233
2233
xdp_ring = rx_ring -> vsi -> xdp_rings [rx_ring -> queue_index ];
2234
2234
result = i40e_xmit_xdp_ring (xdp , xdp_ring );
2235
2235
break ;
2236
+ case XDP_REDIRECT :
2237
+ err = xdp_do_redirect (rx_ring -> netdev , xdp , xdp_prog );
2238
+ result = !err ? I40E_XDP_TX : I40E_XDP_CONSUMED ;
2239
+ break ;
2236
2240
default :
2237
2241
bpf_warn_invalid_xdp_action (act );
2238
2242
case XDP_ABORTED :
@@ -2268,6 +2272,15 @@ static void i40e_rx_buffer_flip(struct i40e_ring *rx_ring,
2268
2272
#endif
2269
2273
}
2270
2274
2275
+ static inline void i40e_xdp_ring_update_tail (struct i40e_ring * xdp_ring )
2276
+ {
2277
+ /* Force memory writes to complete before letting h/w
2278
+ * know there are new descriptors to fetch.
2279
+ */
2280
+ wmb ();
2281
+ writel_relaxed (xdp_ring -> next_to_use , xdp_ring -> tail );
2282
+ }
2283
+
2271
2284
/**
2272
2285
* i40e_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf
2273
2286
* @rx_ring: rx descriptor ring to transact packets on
@@ -2402,16 +2415,11 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
2402
2415
}
2403
2416
2404
2417
if (xdp_xmit ) {
2405
- struct i40e_ring * xdp_ring ;
2406
-
2407
- xdp_ring = rx_ring -> vsi -> xdp_rings [rx_ring -> queue_index ];
2418
+ struct i40e_ring * xdp_ring =
2419
+ rx_ring -> vsi -> xdp_rings [rx_ring -> queue_index ];
2408
2420
2409
- /* Force memory writes to complete before letting h/w
2410
- * know there are new descriptors to fetch.
2411
- */
2412
- wmb ();
2413
-
2414
- writel (xdp_ring -> next_to_use , xdp_ring -> tail );
2421
+ i40e_xdp_ring_update_tail (xdp_ring );
2422
+ xdp_do_flush_map ();
2415
2423
}
2416
2424
2417
2425
rx_ring -> skb = skb ;
@@ -3659,3 +3667,49 @@ netdev_tx_t i40e_lan_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
3659
3667
3660
3668
return i40e_xmit_frame_ring (skb , tx_ring );
3661
3669
}
3670
+
3671
+ /**
3672
+ * i40e_xdp_xmit - Implements ndo_xdp_xmit
3673
+ * @dev: netdev
3674
+ * @xdp: XDP buffer
3675
+ *
3676
+ * Returns Zero if sent, else an error code
3677
+ **/
3678
+ int i40e_xdp_xmit (struct net_device * dev , struct xdp_buff * xdp )
3679
+ {
3680
+ struct i40e_netdev_priv * np = netdev_priv (dev );
3681
+ unsigned int queue_index = smp_processor_id ();
3682
+ struct i40e_vsi * vsi = np -> vsi ;
3683
+ int err ;
3684
+
3685
+ if (test_bit (__I40E_VSI_DOWN , vsi -> state ))
3686
+ return - ENETDOWN ;
3687
+
3688
+ if (!i40e_enabled_xdp_vsi (vsi ) || queue_index >= vsi -> num_queue_pairs )
3689
+ return - ENXIO ;
3690
+
3691
+ err = i40e_xmit_xdp_ring (xdp , vsi -> xdp_rings [queue_index ]);
3692
+ if (err != I40E_XDP_TX )
3693
+ return - ENOSPC ;
3694
+
3695
+ return 0 ;
3696
+ }
3697
+
3698
+ /**
3699
+ * i40e_xdp_flush - Implements ndo_xdp_flush
3700
+ * @dev: netdev
3701
+ **/
3702
+ void i40e_xdp_flush (struct net_device * dev )
3703
+ {
3704
+ struct i40e_netdev_priv * np = netdev_priv (dev );
3705
+ unsigned int queue_index = smp_processor_id ();
3706
+ struct i40e_vsi * vsi = np -> vsi ;
3707
+
3708
+ if (test_bit (__I40E_VSI_DOWN , vsi -> state ))
3709
+ return ;
3710
+
3711
+ if (!i40e_enabled_xdp_vsi (vsi ) || queue_index >= vsi -> num_queue_pairs )
3712
+ return ;
3713
+
3714
+ i40e_xdp_ring_update_tail (vsi -> xdp_rings [queue_index ]);
3715
+ }
0 commit comments