@@ -4608,7 +4608,8 @@ static inline int nf_ingress(struct sk_buff *skb, struct packet_type **pt_prev,
4608
4608
return 0 ;
4609
4609
}
4610
4610
4611
- static int __netif_receive_skb_core (struct sk_buff * skb , bool pfmemalloc )
4611
+ static int __netif_receive_skb_core (struct sk_buff * skb , bool pfmemalloc ,
4612
+ struct packet_type * * ppt_prev )
4612
4613
{
4613
4614
struct packet_type * ptype , * pt_prev ;
4614
4615
rx_handler_func_t * rx_handler ;
@@ -4738,8 +4739,7 @@ static int __netif_receive_skb_core(struct sk_buff *skb, bool pfmemalloc)
4738
4739
if (pt_prev ) {
4739
4740
if (unlikely (skb_orphan_frags_rx (skb , GFP_ATOMIC )))
4740
4741
goto drop ;
4741
- else
4742
- ret = pt_prev -> func (skb , skb -> dev , pt_prev , orig_dev );
4742
+ * ppt_prev = pt_prev ;
4743
4743
} else {
4744
4744
drop :
4745
4745
if (!deliver_exact )
@@ -4757,6 +4757,18 @@ static int __netif_receive_skb_core(struct sk_buff *skb, bool pfmemalloc)
4757
4757
return ret ;
4758
4758
}
4759
4759
4760
+ static int __netif_receive_skb_one_core (struct sk_buff * skb , bool pfmemalloc )
4761
+ {
4762
+ struct net_device * orig_dev = skb -> dev ;
4763
+ struct packet_type * pt_prev = NULL ;
4764
+ int ret ;
4765
+
4766
+ ret = __netif_receive_skb_core (skb , pfmemalloc , & pt_prev );
4767
+ if (pt_prev )
4768
+ ret = pt_prev -> func (skb , skb -> dev , pt_prev , orig_dev );
4769
+ return ret ;
4770
+ }
4771
+
4760
4772
/**
4761
4773
* netif_receive_skb_core - special purpose version of netif_receive_skb
4762
4774
* @skb: buffer to process
@@ -4777,19 +4789,63 @@ int netif_receive_skb_core(struct sk_buff *skb)
4777
4789
int ret ;
4778
4790
4779
4791
rcu_read_lock ();
4780
- ret = __netif_receive_skb_core (skb , false);
4792
+ ret = __netif_receive_skb_one_core (skb , false);
4781
4793
rcu_read_unlock ();
4782
4794
4783
4795
return ret ;
4784
4796
}
4785
4797
EXPORT_SYMBOL (netif_receive_skb_core );
4786
4798
4787
- static void __netif_receive_skb_list_core (struct list_head * head , bool pfmemalloc )
4799
+ static inline void __netif_receive_skb_list_ptype (struct list_head * head ,
4800
+ struct packet_type * pt_prev ,
4801
+ struct net_device * orig_dev )
4788
4802
{
4789
4803
struct sk_buff * skb , * next ;
4790
4804
4805
+ if (!pt_prev )
4806
+ return ;
4807
+ if (list_empty (head ))
4808
+ return ;
4809
+
4791
4810
list_for_each_entry_safe (skb , next , head , list )
4792
- __netif_receive_skb_core (skb , pfmemalloc );
4811
+ pt_prev -> func (skb , skb -> dev , pt_prev , orig_dev );
4812
+ }
4813
+
4814
+ static void __netif_receive_skb_list_core (struct list_head * head , bool pfmemalloc )
4815
+ {
4816
+ /* Fast-path assumptions:
4817
+ * - There is no RX handler.
4818
+ * - Only one packet_type matches.
4819
+ * If either of these fails, we will end up doing some per-packet
4820
+ * processing in-line, then handling the 'last ptype' for the whole
4821
+ * sublist. This can't cause out-of-order delivery to any single ptype,
4822
+ * because the 'last ptype' must be constant across the sublist, and all
4823
+ * other ptypes are handled per-packet.
4824
+ */
4825
+ /* Current (common) ptype of sublist */
4826
+ struct packet_type * pt_curr = NULL ;
4827
+ /* Current (common) orig_dev of sublist */
4828
+ struct net_device * od_curr = NULL ;
4829
+ struct list_head sublist ;
4830
+ struct sk_buff * skb , * next ;
4831
+
4832
+ list_for_each_entry_safe (skb , next , head , list ) {
4833
+ struct net_device * orig_dev = skb -> dev ;
4834
+ struct packet_type * pt_prev = NULL ;
4835
+
4836
+ __netif_receive_skb_core (skb , pfmemalloc , & pt_prev );
4837
+ if (pt_curr != pt_prev || od_curr != orig_dev ) {
4838
+ /* dispatch old sublist */
4839
+ list_cut_before (& sublist , head , & skb -> list );
4840
+ __netif_receive_skb_list_ptype (& sublist , pt_curr , od_curr );
4841
+ /* start new sublist */
4842
+ pt_curr = pt_prev ;
4843
+ od_curr = orig_dev ;
4844
+ }
4845
+ }
4846
+
4847
+ /* dispatch final sublist */
4848
+ __netif_receive_skb_list_ptype (head , pt_curr , od_curr );
4793
4849
}
4794
4850
4795
4851
static int __netif_receive_skb (struct sk_buff * skb )
@@ -4809,10 +4865,10 @@ static int __netif_receive_skb(struct sk_buff *skb)
4809
4865
* context down to all allocation sites.
4810
4866
*/
4811
4867
noreclaim_flag = memalloc_noreclaim_save ();
4812
- ret = __netif_receive_skb_core (skb , true);
4868
+ ret = __netif_receive_skb_one_core (skb , true);
4813
4869
memalloc_noreclaim_restore (noreclaim_flag );
4814
4870
} else
4815
- ret = __netif_receive_skb_core (skb , false);
4871
+ ret = __netif_receive_skb_one_core (skb , false);
4816
4872
4817
4873
return ret ;
4818
4874
}
0 commit comments