@@ -3448,6 +3448,8 @@ static int enqueue_to_backlog(struct sk_buff *skb, int cpu,
3448
3448
local_irq_save (flags );
3449
3449
3450
3450
rps_lock (sd );
3451
+ if (!netif_running (skb -> dev ))
3452
+ goto drop ;
3451
3453
qlen = skb_queue_len (& sd -> input_pkt_queue );
3452
3454
if (qlen <= netdev_max_backlog && !skb_flow_limit (skb , qlen )) {
3453
3455
if (qlen ) {
@@ -3469,6 +3471,7 @@ static int enqueue_to_backlog(struct sk_buff *skb, int cpu,
3469
3471
goto enqueue ;
3470
3472
}
3471
3473
3474
+ drop :
3472
3475
sd -> dropped ++ ;
3473
3476
rps_unlock (sd );
3474
3477
@@ -3771,8 +3774,6 @@ static int __netif_receive_skb_core(struct sk_buff *skb, bool pfmemalloc)
3771
3774
3772
3775
pt_prev = NULL ;
3773
3776
3774
- rcu_read_lock ();
3775
-
3776
3777
another_round :
3777
3778
skb -> skb_iif = skb -> dev -> ifindex ;
3778
3779
@@ -3782,7 +3783,7 @@ static int __netif_receive_skb_core(struct sk_buff *skb, bool pfmemalloc)
3782
3783
skb -> protocol == cpu_to_be16 (ETH_P_8021AD )) {
3783
3784
skb = skb_vlan_untag (skb );
3784
3785
if (unlikely (!skb ))
3785
- goto unlock ;
3786
+ goto out ;
3786
3787
}
3787
3788
3788
3789
#ifdef CONFIG_NET_CLS_ACT
@@ -3812,10 +3813,10 @@ static int __netif_receive_skb_core(struct sk_buff *skb, bool pfmemalloc)
3812
3813
if (static_key_false (& ingress_needed )) {
3813
3814
skb = handle_ing (skb , & pt_prev , & ret , orig_dev );
3814
3815
if (!skb )
3815
- goto unlock ;
3816
+ goto out ;
3816
3817
3817
3818
if (nf_ingress (skb , & pt_prev , & ret , orig_dev ) < 0 )
3818
- goto unlock ;
3819
+ goto out ;
3819
3820
}
3820
3821
#endif
3821
3822
#ifdef CONFIG_NET_CLS_ACT
@@ -3833,7 +3834,7 @@ static int __netif_receive_skb_core(struct sk_buff *skb, bool pfmemalloc)
3833
3834
if (vlan_do_receive (& skb ))
3834
3835
goto another_round ;
3835
3836
else if (unlikely (!skb ))
3836
- goto unlock ;
3837
+ goto out ;
3837
3838
}
3838
3839
3839
3840
rx_handler = rcu_dereference (skb -> dev -> rx_handler );
@@ -3845,7 +3846,7 @@ static int __netif_receive_skb_core(struct sk_buff *skb, bool pfmemalloc)
3845
3846
switch (rx_handler (& skb )) {
3846
3847
case RX_HANDLER_CONSUMED :
3847
3848
ret = NET_RX_SUCCESS ;
3848
- goto unlock ;
3849
+ goto out ;
3849
3850
case RX_HANDLER_ANOTHER :
3850
3851
goto another_round ;
3851
3852
case RX_HANDLER_EXACT :
@@ -3899,8 +3900,7 @@ static int __netif_receive_skb_core(struct sk_buff *skb, bool pfmemalloc)
3899
3900
ret = NET_RX_DROP ;
3900
3901
}
3901
3902
3902
- unlock :
3903
- rcu_read_unlock ();
3903
+ out :
3904
3904
return ret ;
3905
3905
}
3906
3906
@@ -3931,29 +3931,30 @@ static int __netif_receive_skb(struct sk_buff *skb)
3931
3931
3932
3932
static int netif_receive_skb_internal (struct sk_buff * skb )
3933
3933
{
3934
+ int ret ;
3935
+
3934
3936
net_timestamp_check (netdev_tstamp_prequeue , skb );
3935
3937
3936
3938
if (skb_defer_rx_timestamp (skb ))
3937
3939
return NET_RX_SUCCESS ;
3938
3940
3941
+ rcu_read_lock ();
3942
+
3939
3943
#ifdef CONFIG_RPS
3940
3944
if (static_key_false (& rps_needed )) {
3941
3945
struct rps_dev_flow voidflow , * rflow = & voidflow ;
3942
- int cpu , ret ;
3943
-
3944
- rcu_read_lock ();
3945
-
3946
- cpu = get_rps_cpu (skb -> dev , skb , & rflow );
3946
+ int cpu = get_rps_cpu (skb -> dev , skb , & rflow );
3947
3947
3948
3948
if (cpu >= 0 ) {
3949
3949
ret = enqueue_to_backlog (skb , cpu , & rflow -> last_qtail );
3950
3950
rcu_read_unlock ();
3951
3951
return ret ;
3952
3952
}
3953
- rcu_read_unlock ();
3954
3953
}
3955
3954
#endif
3956
- return __netif_receive_skb (skb );
3955
+ ret = __netif_receive_skb (skb );
3956
+ rcu_read_unlock ();
3957
+ return ret ;
3957
3958
}
3958
3959
3959
3960
/**
@@ -4498,8 +4499,10 @@ static int process_backlog(struct napi_struct *napi, int quota)
4498
4499
struct sk_buff * skb ;
4499
4500
4500
4501
while ((skb = __skb_dequeue (& sd -> process_queue ))) {
4502
+ rcu_read_lock ();
4501
4503
local_irq_enable ();
4502
4504
__netif_receive_skb (skb );
4505
+ rcu_read_unlock ();
4503
4506
local_irq_disable ();
4504
4507
input_queue_head_incr (sd );
4505
4508
if (++ work >= quota ) {
@@ -6135,6 +6138,7 @@ static void rollback_registered_many(struct list_head *head)
6135
6138
unlist_netdevice (dev );
6136
6139
6137
6140
dev -> reg_state = NETREG_UNREGISTERING ;
6141
+ on_each_cpu (flush_backlog , dev , 1 );
6138
6142
}
6139
6143
6140
6144
synchronize_net ();
@@ -6770,8 +6774,6 @@ void netdev_run_todo(void)
6770
6774
6771
6775
dev -> reg_state = NETREG_UNREGISTERED ;
6772
6776
6773
- on_each_cpu (flush_backlog , dev , 1 );
6774
-
6775
6777
netdev_wait_allrefs (dev );
6776
6778
6777
6779
/* paranoia */
0 commit comments