@@ -4843,6 +4843,14 @@ static int generic_xdp_install(struct net_device *dev, struct netdev_bpf *xdp)
4843
4843
return ret ;
4844
4844
}
4845
4845
4846
+ static void __netif_receive_skb_list (struct list_head * head )
4847
+ {
4848
+ struct sk_buff * skb , * next ;
4849
+
4850
+ list_for_each_entry_safe (skb , next , head , list )
4851
+ __netif_receive_skb (skb );
4852
+ }
4853
+
4846
4854
static int netif_receive_skb_internal (struct sk_buff * skb )
4847
4855
{
4848
4856
int ret ;
@@ -4883,6 +4891,50 @@ static int netif_receive_skb_internal(struct sk_buff *skb)
4883
4891
return ret ;
4884
4892
}
4885
4893
4894
+ static void netif_receive_skb_list_internal (struct list_head * head )
4895
+ {
4896
+ struct bpf_prog * xdp_prog = NULL ;
4897
+ struct sk_buff * skb , * next ;
4898
+
4899
+ list_for_each_entry_safe (skb , next , head , list ) {
4900
+ net_timestamp_check (netdev_tstamp_prequeue , skb );
4901
+ if (skb_defer_rx_timestamp (skb ))
4902
+ /* Handled, remove from list */
4903
+ list_del (& skb -> list );
4904
+ }
4905
+
4906
+ if (static_branch_unlikely (& generic_xdp_needed_key )) {
4907
+ preempt_disable ();
4908
+ rcu_read_lock ();
4909
+ list_for_each_entry_safe (skb , next , head , list ) {
4910
+ xdp_prog = rcu_dereference (skb -> dev -> xdp_prog );
4911
+ if (do_xdp_generic (xdp_prog , skb ) != XDP_PASS )
4912
+ /* Dropped, remove from list */
4913
+ list_del (& skb -> list );
4914
+ }
4915
+ rcu_read_unlock ();
4916
+ preempt_enable ();
4917
+ }
4918
+
4919
+ rcu_read_lock ();
4920
+ #ifdef CONFIG_RPS
4921
+ if (static_key_false (& rps_needed )) {
4922
+ list_for_each_entry_safe (skb , next , head , list ) {
4923
+ struct rps_dev_flow voidflow , * rflow = & voidflow ;
4924
+ int cpu = get_rps_cpu (skb -> dev , skb , & rflow );
4925
+
4926
+ if (cpu >= 0 ) {
4927
+ enqueue_to_backlog (skb , cpu , & rflow -> last_qtail );
4928
+ /* Handled, remove from list */
4929
+ list_del (& skb -> list );
4930
+ }
4931
+ }
4932
+ }
4933
+ #endif
4934
+ __netif_receive_skb_list (head );
4935
+ rcu_read_unlock ();
4936
+ }
4937
+
4886
4938
/**
4887
4939
* netif_receive_skb - process receive buffer from network
4888
4940
* @skb: buffer to process
@@ -4910,20 +4962,19 @@ EXPORT_SYMBOL(netif_receive_skb);
4910
4962
* netif_receive_skb_list - process many receive buffers from network
4911
4963
* @head: list of skbs to process.
4912
4964
*
4913
- * For now, just calls netif_receive_skb() in a loop, ignoring the
4914
- * return value .
4965
+ * Since return value of netif_receive_skb() is normally ignored, and
4966
+ * wouldn't be meaningful for a list, this function returns void .
4915
4967
*
4916
4968
* This function may only be called from softirq context and interrupts
4917
4969
* should be enabled.
4918
4970
*/
4919
4971
void netif_receive_skb_list (struct list_head * head )
4920
4972
{
4921
- struct sk_buff * skb , * next ;
4973
+ struct sk_buff * skb ;
4922
4974
4923
4975
list_for_each_entry (skb , head , list )
4924
4976
trace_netif_receive_skb_list_entry (skb );
4925
- list_for_each_entry_safe (skb , next , head , list )
4926
- netif_receive_skb_internal (skb );
4977
+ netif_receive_skb_list_internal (head );
4927
4978
}
4928
4979
EXPORT_SYMBOL (netif_receive_skb_list );
4929
4980
0 commit comments