Skip to content

Commit 4434572

Browse files
Octavian Purdiladavem330
authored andcommitted
net: factorize sync-rcu call in unregister_netdevice_many
Add dev_close_many and dev_deactivate_many to factorize another sync-rcu operation on the netdevice unregister path. $ modprobe dummy numdummies=10000 $ ip link set dev dummy* up $ time rmmod dummy Without the patch With the patch real 0m 24.63s real 0m 5.15s user 0m 0.00s user 0m 0.00s sys 0m 6.05s sys 0m 5.14s Signed-off-by: Octavian Purdila <opurdila@ixiacom.com> Signed-off-by: David S. Miller <davem@davemloft.net>
1 parent c6c8fea commit 4434572

File tree

3 files changed

+99
-49
lines changed

3 files changed

+99
-49
lines changed

include/net/sch_generic.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -321,6 +321,7 @@ extern void dev_init_scheduler(struct net_device *dev);
321321
extern void dev_shutdown(struct net_device *dev);
322322
extern void dev_activate(struct net_device *dev);
323323
extern void dev_deactivate(struct net_device *dev);
324+
extern void dev_deactivate_many(struct list_head *head);
324325
extern struct Qdisc *dev_graft_qdisc(struct netdev_queue *dev_queue,
325326
struct Qdisc *qdisc);
326327
extern void qdisc_reset(struct Qdisc *qdisc);

net/core/dev.c

Lines changed: 76 additions & 42 deletions
Original file line numberDiff line numberDiff line change
@@ -1222,52 +1222,90 @@ int dev_open(struct net_device *dev)
12221222
}
12231223
EXPORT_SYMBOL(dev_open);
12241224

1225-
static int __dev_close(struct net_device *dev)
1225+
static int __dev_close_many(struct list_head *head)
12261226
{
1227-
const struct net_device_ops *ops = dev->netdev_ops;
1227+
struct net_device *dev;
12281228

12291229
ASSERT_RTNL();
12301230
might_sleep();
12311231

1232-
/*
1233-
* Tell people we are going down, so that they can
1234-
* prepare to death, when device is still operating.
1235-
*/
1236-
call_netdevice_notifiers(NETDEV_GOING_DOWN, dev);
1232+
list_for_each_entry(dev, head, unreg_list) {
1233+
/*
1234+
* Tell people we are going down, so that they can
1235+
* prepare to death, when device is still operating.
1236+
*/
1237+
call_netdevice_notifiers(NETDEV_GOING_DOWN, dev);
12371238

1238-
clear_bit(__LINK_STATE_START, &dev->state);
1239+
clear_bit(__LINK_STATE_START, &dev->state);
12391240

1240-
/* Synchronize to scheduled poll. We cannot touch poll list,
1241-
* it can be even on different cpu. So just clear netif_running().
1242-
*
1243-
* dev->stop() will invoke napi_disable() on all of it's
1244-
* napi_struct instances on this device.
1245-
*/
1246-
smp_mb__after_clear_bit(); /* Commit netif_running(). */
1241+
/* Synchronize to scheduled poll. We cannot touch poll list, it
1242+
* can be even on different cpu. So just clear netif_running().
1243+
*
1244+
* dev->stop() will invoke napi_disable() on all of it's
1245+
* napi_struct instances on this device.
1246+
*/
1247+
smp_mb__after_clear_bit(); /* Commit netif_running(). */
1248+
}
12471249

1248-
dev_deactivate(dev);
1250+
dev_deactivate_many(head);
12491251

1250-
/*
1251-
* Call the device specific close. This cannot fail.
1252-
* Only if device is UP
1253-
*
1254-
* We allow it to be called even after a DETACH hot-plug
1255-
* event.
1256-
*/
1257-
if (ops->ndo_stop)
1258-
ops->ndo_stop(dev);
1252+
list_for_each_entry(dev, head, unreg_list) {
1253+
const struct net_device_ops *ops = dev->netdev_ops;
12591254

1260-
/*
1261-
* Device is now down.
1262-
*/
1255+
/*
1256+
* Call the device specific close. This cannot fail.
1257+
* Only if device is UP
1258+
*
1259+
* We allow it to be called even after a DETACH hot-plug
1260+
* event.
1261+
*/
1262+
if (ops->ndo_stop)
1263+
ops->ndo_stop(dev);
1264+
1265+
/*
1266+
* Device is now down.
1267+
*/
1268+
1269+
dev->flags &= ~IFF_UP;
1270+
1271+
/*
1272+
* Shutdown NET_DMA
1273+
*/
1274+
net_dmaengine_put();
1275+
}
12631276

1264-
dev->flags &= ~IFF_UP;
1277+
return 0;
1278+
}
1279+
1280+
static int __dev_close(struct net_device *dev)
1281+
{
1282+
LIST_HEAD(single);
1283+
1284+
list_add(&dev->unreg_list, &single);
1285+
return __dev_close_many(&single);
1286+
}
1287+
1288+
int dev_close_many(struct list_head *head)
1289+
{
1290+
struct net_device *dev, *tmp;
1291+
LIST_HEAD(tmp_list);
1292+
1293+
list_for_each_entry_safe(dev, tmp, head, unreg_list)
1294+
if (!(dev->flags & IFF_UP))
1295+
list_move(&dev->unreg_list, &tmp_list);
1296+
1297+
__dev_close_many(head);
12651298

12661299
/*
1267-
* Shutdown NET_DMA
1300+
* Tell people we are down
12681301
*/
1269-
net_dmaengine_put();
1302+
list_for_each_entry(dev, head, unreg_list) {
1303+
rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING);
1304+
call_netdevice_notifiers(NETDEV_DOWN, dev);
1305+
}
12701306

1307+
/* rollback_registered_many needs the complete original list */
1308+
list_splice(&tmp_list, head);
12711309
return 0;
12721310
}
12731311

@@ -1282,16 +1320,10 @@ static int __dev_close(struct net_device *dev)
12821320
*/
12831321
int dev_close(struct net_device *dev)
12841322
{
1285-
if (!(dev->flags & IFF_UP))
1286-
return 0;
1287-
1288-
__dev_close(dev);
1323+
LIST_HEAD(single);
12891324

1290-
/*
1291-
* Tell people we are down
1292-
*/
1293-
rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING);
1294-
call_netdevice_notifiers(NETDEV_DOWN, dev);
1325+
list_add(&dev->unreg_list, &single);
1326+
dev_close_many(&single);
12951327

12961328
return 0;
12971329
}
@@ -4963,10 +4995,12 @@ static void rollback_registered_many(struct list_head *head)
49634995
}
49644996

49654997
BUG_ON(dev->reg_state != NETREG_REGISTERED);
4998+
}
49664999

4967-
/* If device is running, close it first. */
4968-
dev_close(dev);
5000+
/* If device is running, close it first. */
5001+
dev_close_many(head);
49695002

5003+
list_for_each_entry(dev, head, unreg_list) {
49705004
/* And unlink it from device chain. */
49715005
unlist_netdevice(dev);
49725006

net/sched/sch_generic.c

Lines changed: 22 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -810,20 +810,35 @@ static bool some_qdisc_is_busy(struct net_device *dev)
810810
return false;
811811
}
812812

813-
void dev_deactivate(struct net_device *dev)
813+
void dev_deactivate_many(struct list_head *head)
814814
{
815-
netdev_for_each_tx_queue(dev, dev_deactivate_queue, &noop_qdisc);
816-
if (dev_ingress_queue(dev))
817-
dev_deactivate_queue(dev, dev_ingress_queue(dev), &noop_qdisc);
815+
struct net_device *dev;
818816

819-
dev_watchdog_down(dev);
817+
list_for_each_entry(dev, head, unreg_list) {
818+
netdev_for_each_tx_queue(dev, dev_deactivate_queue,
819+
&noop_qdisc);
820+
if (dev_ingress_queue(dev))
821+
dev_deactivate_queue(dev, dev_ingress_queue(dev),
822+
&noop_qdisc);
823+
824+
dev_watchdog_down(dev);
825+
}
820826

821827
/* Wait for outstanding qdisc-less dev_queue_xmit calls. */
822828
synchronize_rcu();
823829

824830
/* Wait for outstanding qdisc_run calls. */
825-
while (some_qdisc_is_busy(dev))
826-
yield();
831+
list_for_each_entry(dev, head, unreg_list)
832+
while (some_qdisc_is_busy(dev))
833+
yield();
834+
}
835+
836+
void dev_deactivate(struct net_device *dev)
837+
{
838+
LIST_HEAD(single);
839+
840+
list_add(&dev->unreg_list, &single);
841+
dev_deactivate_many(&single);
827842
}
828843

829844
static void dev_init_scheduler_queue(struct net_device *dev,

0 commit comments

Comments
 (0)