@@ -658,20 +658,19 @@ int netvsc_recv_callback(struct hv_device *device_obj,
658
658
struct sk_buff * skb ;
659
659
struct sk_buff * vf_skb ;
660
660
struct netvsc_stats * rx_stats ;
661
- struct netvsc_device * netvsc_dev = net_device_ctx -> nvdev ;
662
661
u32 bytes_recvd = packet -> total_data_buflen ;
663
662
int ret = 0 ;
664
663
665
664
if (!net || net -> reg_state != NETREG_REGISTERED )
666
665
return NVSP_STAT_FAIL ;
667
666
668
- if (READ_ONCE (netvsc_dev -> vf_inject )) {
669
- atomic_inc (& netvsc_dev -> vf_use_cnt );
670
- if (!READ_ONCE (netvsc_dev -> vf_inject )) {
667
+ if (READ_ONCE (net_device_ctx -> vf_inject )) {
668
+ atomic_inc (& net_device_ctx -> vf_use_cnt );
669
+ if (!READ_ONCE (net_device_ctx -> vf_inject )) {
671
670
/*
672
671
* We raced; just move on.
673
672
*/
674
- atomic_dec (& netvsc_dev -> vf_use_cnt );
673
+ atomic_dec (& net_device_ctx -> vf_use_cnt );
675
674
goto vf_injection_done ;
676
675
}
677
676
@@ -683,17 +682,19 @@ int netvsc_recv_callback(struct hv_device *device_obj,
683
682
* the host). Deliver these via the VF interface
684
683
* in the guest.
685
684
*/
686
- vf_skb = netvsc_alloc_recv_skb (netvsc_dev -> vf_netdev , packet ,
687
- csum_info , * data , vlan_tci );
685
+ vf_skb = netvsc_alloc_recv_skb (net_device_ctx -> vf_netdev ,
686
+ packet , csum_info , * data ,
687
+ vlan_tci );
688
688
if (vf_skb != NULL ) {
689
- ++ netvsc_dev -> vf_netdev -> stats .rx_packets ;
690
- netvsc_dev -> vf_netdev -> stats .rx_bytes += bytes_recvd ;
689
+ ++ net_device_ctx -> vf_netdev -> stats .rx_packets ;
690
+ net_device_ctx -> vf_netdev -> stats .rx_bytes +=
691
+ bytes_recvd ;
691
692
netif_receive_skb (vf_skb );
692
693
} else {
693
694
++ net -> stats .rx_dropped ;
694
695
ret = NVSP_STAT_FAIL ;
695
696
}
696
- atomic_dec (& netvsc_dev -> vf_use_cnt );
697
+ atomic_dec (& net_device_ctx -> vf_use_cnt );
697
698
return ret ;
698
699
}
699
700
@@ -1150,17 +1151,6 @@ static void netvsc_free_netdev(struct net_device *netdev)
1150
1151
free_netdev (netdev );
1151
1152
}
1152
1153
1153
- static void netvsc_notify_peers (struct work_struct * wrk )
1154
- {
1155
- struct garp_wrk * gwrk ;
1156
-
1157
- gwrk = container_of (wrk , struct garp_wrk , dwrk );
1158
-
1159
- netdev_notify_peers (gwrk -> netdev );
1160
-
1161
- atomic_dec (& gwrk -> netvsc_dev -> vf_use_cnt );
1162
- }
1163
-
1164
1154
static struct net_device * get_netvsc_net_device (char * mac )
1165
1155
{
1166
1156
struct net_device * dev , * found = NULL ;
@@ -1203,18 +1193,31 @@ static int netvsc_register_vf(struct net_device *vf_netdev)
1203
1193
1204
1194
net_device_ctx = netdev_priv (ndev );
1205
1195
netvsc_dev = net_device_ctx -> nvdev ;
1206
- if (netvsc_dev == NULL )
1196
+ if (! netvsc_dev || net_device_ctx -> vf_netdev )
1207
1197
return NOTIFY_DONE ;
1208
1198
1209
1199
netdev_info (ndev , "VF registering: %s\n" , vf_netdev -> name );
1210
1200
/*
1211
1201
* Take a reference on the module.
1212
1202
*/
1213
1203
try_module_get (THIS_MODULE );
1214
- netvsc_dev -> vf_netdev = vf_netdev ;
1204
+ net_device_ctx -> vf_netdev = vf_netdev ;
1215
1205
return NOTIFY_OK ;
1216
1206
}
1217
1207
1208
+ static void netvsc_inject_enable (struct net_device_context * net_device_ctx )
1209
+ {
1210
+ net_device_ctx -> vf_inject = true;
1211
+ }
1212
+
1213
+ static void netvsc_inject_disable (struct net_device_context * net_device_ctx )
1214
+ {
1215
+ net_device_ctx -> vf_inject = false;
1216
+
1217
+ /* Wait for currently active users to drain out. */
1218
+ while (atomic_read (& net_device_ctx -> vf_use_cnt ) != 0 )
1219
+ udelay (50 );
1220
+ }
1218
1221
1219
1222
static int netvsc_vf_up (struct net_device * vf_netdev )
1220
1223
{
@@ -1233,11 +1236,11 @@ static int netvsc_vf_up(struct net_device *vf_netdev)
1233
1236
net_device_ctx = netdev_priv (ndev );
1234
1237
netvsc_dev = net_device_ctx -> nvdev ;
1235
1238
1236
- if (( netvsc_dev == NULL ) || ( netvsc_dev -> vf_netdev == NULL ) )
1239
+ if (! netvsc_dev || ! net_device_ctx -> vf_netdev )
1237
1240
return NOTIFY_DONE ;
1238
1241
1239
1242
netdev_info (ndev , "VF up: %s\n" , vf_netdev -> name );
1240
- netvsc_dev -> vf_inject = true ;
1243
+ netvsc_inject_enable ( net_device_ctx ) ;
1241
1244
1242
1245
/*
1243
1246
* Open the device before switching data path.
@@ -1252,15 +1255,8 @@ static int netvsc_vf_up(struct net_device *vf_netdev)
1252
1255
1253
1256
netif_carrier_off (ndev );
1254
1257
1255
- /*
1256
- * Now notify peers. We are scheduling work to
1257
- * notify peers; take a reference to prevent
1258
- * the VF interface from vanishing.
1259
- */
1260
- atomic_inc (& netvsc_dev -> vf_use_cnt );
1261
- net_device_ctx -> gwrk .netdev = vf_netdev ;
1262
- net_device_ctx -> gwrk .netvsc_dev = netvsc_dev ;
1263
- schedule_work (& net_device_ctx -> gwrk .dwrk );
1258
+ /* Now notify peers through VF device. */
1259
+ call_netdevice_notifiers (NETDEV_NOTIFY_PEERS , vf_netdev );
1264
1260
1265
1261
return NOTIFY_OK ;
1266
1262
}
@@ -1283,29 +1279,18 @@ static int netvsc_vf_down(struct net_device *vf_netdev)
1283
1279
net_device_ctx = netdev_priv (ndev );
1284
1280
netvsc_dev = net_device_ctx -> nvdev ;
1285
1281
1286
- if (( netvsc_dev == NULL ) || ( netvsc_dev -> vf_netdev == NULL ) )
1282
+ if (! netvsc_dev || ! net_device_ctx -> vf_netdev )
1287
1283
return NOTIFY_DONE ;
1288
1284
1289
1285
netdev_info (ndev , "VF down: %s\n" , vf_netdev -> name );
1290
- netvsc_dev -> vf_inject = false;
1291
- /*
1292
- * Wait for currently active users to
1293
- * drain out.
1294
- */
1295
-
1296
- while (atomic_read (& netvsc_dev -> vf_use_cnt ) != 0 )
1297
- udelay (50 );
1286
+ netvsc_inject_disable (net_device_ctx );
1298
1287
netvsc_switch_datapath (ndev , false);
1299
1288
netdev_info (ndev , "Data path switched from VF: %s\n" , vf_netdev -> name );
1300
1289
rndis_filter_close (netvsc_dev );
1301
1290
netif_carrier_on (ndev );
1302
- /*
1303
- * Notify peers.
1304
- */
1305
- atomic_inc (& netvsc_dev -> vf_use_cnt );
1306
- net_device_ctx -> gwrk .netdev = ndev ;
1307
- net_device_ctx -> gwrk .netvsc_dev = netvsc_dev ;
1308
- schedule_work (& net_device_ctx -> gwrk .dwrk );
1291
+
1292
+ /* Now notify peers through netvsc device. */
1293
+ call_netdevice_notifiers (NETDEV_NOTIFY_PEERS , ndev );
1309
1294
1310
1295
return NOTIFY_OK ;
1311
1296
}
@@ -1327,11 +1312,11 @@ static int netvsc_unregister_vf(struct net_device *vf_netdev)
1327
1312
1328
1313
net_device_ctx = netdev_priv (ndev );
1329
1314
netvsc_dev = net_device_ctx -> nvdev ;
1330
- if (netvsc_dev == NULL )
1315
+ if (! netvsc_dev || ! net_device_ctx -> vf_netdev )
1331
1316
return NOTIFY_DONE ;
1332
1317
netdev_info (ndev , "VF unregistering: %s\n" , vf_netdev -> name );
1333
-
1334
- netvsc_dev -> vf_netdev = NULL ;
1318
+ netvsc_inject_disable ( net_device_ctx );
1319
+ net_device_ctx -> vf_netdev = NULL ;
1335
1320
module_put (THIS_MODULE );
1336
1321
return NOTIFY_OK ;
1337
1322
}
@@ -1377,11 +1362,14 @@ static int netvsc_probe(struct hv_device *dev,
1377
1362
1378
1363
INIT_DELAYED_WORK (& net_device_ctx -> dwork , netvsc_link_change );
1379
1364
INIT_WORK (& net_device_ctx -> work , do_set_multicast );
1380
- INIT_WORK (& net_device_ctx -> gwrk .dwrk , netvsc_notify_peers );
1381
1365
1382
1366
spin_lock_init (& net_device_ctx -> lock );
1383
1367
INIT_LIST_HEAD (& net_device_ctx -> reconfig_events );
1384
1368
1369
+ atomic_set (& net_device_ctx -> vf_use_cnt , 0 );
1370
+ net_device_ctx -> vf_netdev = NULL ;
1371
+ net_device_ctx -> vf_inject = false;
1372
+
1385
1373
net -> netdev_ops = & device_ops ;
1386
1374
1387
1375
net -> hw_features = NETVSC_HW_FEATURES ;
@@ -1494,8 +1482,13 @@ static int netvsc_netdev_event(struct notifier_block *this,
1494
1482
{
1495
1483
struct net_device * event_dev = netdev_notifier_info_to_dev (ptr );
1496
1484
1497
- /* Avoid Vlan, Bonding dev with same MAC registering as VF */
1498
- if (event_dev -> priv_flags & (IFF_802_1Q_VLAN | IFF_BONDING ))
1485
+ /* Avoid Vlan dev with same MAC registering as VF */
1486
+ if (event_dev -> priv_flags & IFF_802_1Q_VLAN )
1487
+ return NOTIFY_DONE ;
1488
+
1489
+ /* Avoid Bonding master dev with same MAC registering as VF */
1490
+ if (event_dev -> priv_flags & IFF_BONDING &&
1491
+ event_dev -> flags & IFF_MASTER )
1499
1492
return NOTIFY_DONE ;
1500
1493
1501
1494
switch (event ) {
0 commit comments