Skip to content

Commit 45c9149

Browse files
committed
Merge branch 'team' ("add support for peer notifications and igmp rejoins for team")
Jiri Pirko says: ==================== The middle patch adjusts core infrastructure so the bonding code can be generalized and reused by team. v1->v2: using msecs_to_jiffies() as suggested by Eric Jiri Pirko (3): team: add peer notification net: convert resend IGMP to notifier event team: add support for sending multicast rejoins ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
2 parents ab2cfbb + 492b200 commit 45c9149

File tree

8 files changed

+245
-43
lines changed

8 files changed

+245
-43
lines changed

drivers/net/bonding/bond_main.c

Lines changed: 9 additions & 35 deletions
Original file line numberDiff line numberDiff line change
@@ -715,49 +715,19 @@ static int bond_set_allmulti(struct bonding *bond, int inc)
715715
return err;
716716
}
717717

718-
static void __bond_resend_igmp_join_requests(struct net_device *dev)
719-
{
720-
struct in_device *in_dev;
721-
722-
in_dev = __in_dev_get_rcu(dev);
723-
if (in_dev)
724-
ip_mc_rejoin_groups(in_dev);
725-
}
726-
727718
/*
728719
* Retrieve the list of registered multicast addresses for the bonding
729720
* device and retransmit an IGMP JOIN request to the current active
730721
* slave.
731722
*/
732723
static void bond_resend_igmp_join_requests(struct bonding *bond)
733724
{
734-
struct net_device *bond_dev, *vlan_dev, *upper_dev;
735-
struct vlan_entry *vlan;
736-
737-
read_lock(&bond->lock);
738-
rcu_read_lock();
739-
740-
bond_dev = bond->dev;
741-
742-
/* rejoin all groups on bond device */
743-
__bond_resend_igmp_join_requests(bond_dev);
744-
745-
/*
746-
* if bond is enslaved to a bridge,
747-
* then rejoin all groups on its master
748-
*/
749-
upper_dev = netdev_master_upper_dev_get_rcu(bond_dev);
750-
if (upper_dev && upper_dev->priv_flags & IFF_EBRIDGE)
751-
__bond_resend_igmp_join_requests(upper_dev);
752-
753-
/* rejoin all groups on vlan devices */
754-
list_for_each_entry(vlan, &bond->vlan_list, vlan_list) {
755-
vlan_dev = __vlan_find_dev_deep(bond_dev, htons(ETH_P_8021Q),
756-
vlan->vlan_id);
757-
if (vlan_dev)
758-
__bond_resend_igmp_join_requests(vlan_dev);
725+
if (!rtnl_trylock()) {
726+
queue_delayed_work(bond->wq, &bond->mcast_work, 0);
727+
return;
759728
}
760-
rcu_read_unlock();
729+
call_netdevice_notifiers(NETDEV_RESEND_IGMP, bond->dev);
730+
rtnl_unlock();
761731

762732
/* We use curr_slave_lock to protect against concurrent access to
763733
* igmp_retrans from multiple running instances of this function and
@@ -3234,6 +3204,10 @@ static int bond_slave_netdev_event(unsigned long event,
32343204
case NETDEV_FEAT_CHANGE:
32353205
bond_compute_features(bond);
32363206
break;
3207+
case NETDEV_RESEND_IGMP:
3208+
/* Propagate to master device */
3209+
call_netdevice_notifiers(event, slave->bond->dev);
3210+
break;
32373211
default:
32383212
break;
32393213
}

drivers/net/team/team.c

Lines changed: 176 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -622,6 +622,86 @@ static int team_change_mode(struct team *team, const char *kind)
622622
}
623623

624624

625+
/*********************
626+
* Peers notification
627+
*********************/
628+
629+
static void team_notify_peers_work(struct work_struct *work)
630+
{
631+
struct team *team;
632+
633+
team = container_of(work, struct team, notify_peers.dw.work);
634+
635+
if (!rtnl_trylock()) {
636+
schedule_delayed_work(&team->notify_peers.dw, 0);
637+
return;
638+
}
639+
call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, team->dev);
640+
rtnl_unlock();
641+
if (!atomic_dec_and_test(&team->notify_peers.count_pending))
642+
schedule_delayed_work(&team->notify_peers.dw,
643+
msecs_to_jiffies(team->notify_peers.interval));
644+
}
645+
646+
static void team_notify_peers(struct team *team)
647+
{
648+
if (!team->notify_peers.count || !netif_running(team->dev))
649+
return;
650+
atomic_set(&team->notify_peers.count_pending, team->notify_peers.count);
651+
schedule_delayed_work(&team->notify_peers.dw, 0);
652+
}
653+
654+
static void team_notify_peers_init(struct team *team)
655+
{
656+
INIT_DELAYED_WORK(&team->notify_peers.dw, team_notify_peers_work);
657+
}
658+
659+
static void team_notify_peers_fini(struct team *team)
660+
{
661+
cancel_delayed_work_sync(&team->notify_peers.dw);
662+
}
663+
664+
665+
/*******************************
666+
* Send multicast group rejoins
667+
*******************************/
668+
669+
static void team_mcast_rejoin_work(struct work_struct *work)
670+
{
671+
struct team *team;
672+
673+
team = container_of(work, struct team, mcast_rejoin.dw.work);
674+
675+
if (!rtnl_trylock()) {
676+
schedule_delayed_work(&team->mcast_rejoin.dw, 0);
677+
return;
678+
}
679+
call_netdevice_notifiers(NETDEV_RESEND_IGMP, team->dev);
680+
rtnl_unlock();
681+
if (!atomic_dec_and_test(&team->mcast_rejoin.count_pending))
682+
schedule_delayed_work(&team->mcast_rejoin.dw,
683+
msecs_to_jiffies(team->mcast_rejoin.interval));
684+
}
685+
686+
static void team_mcast_rejoin(struct team *team)
687+
{
688+
if (!team->mcast_rejoin.count || !netif_running(team->dev))
689+
return;
690+
atomic_set(&team->mcast_rejoin.count_pending, team->mcast_rejoin.count);
691+
schedule_delayed_work(&team->mcast_rejoin.dw, 0);
692+
}
693+
694+
static void team_mcast_rejoin_init(struct team *team)
695+
{
696+
INIT_DELAYED_WORK(&team->mcast_rejoin.dw, team_mcast_rejoin_work);
697+
}
698+
699+
static void team_mcast_rejoin_fini(struct team *team)
700+
{
701+
cancel_delayed_work_sync(&team->mcast_rejoin.dw);
702+
}
703+
704+
625705
/************************
626706
* Rx path frame handler
627707
************************/
@@ -846,6 +926,8 @@ static void team_port_enable(struct team *team,
846926
team_queue_override_port_add(team, port);
847927
if (team->ops.port_enabled)
848928
team->ops.port_enabled(team, port);
929+
team_notify_peers(team);
930+
team_mcast_rejoin(team);
849931
}
850932

851933
static void __reconstruct_port_hlist(struct team *team, int rm_index)
@@ -875,6 +957,8 @@ static void team_port_disable(struct team *team,
875957
team->en_port_count--;
876958
team_queue_override_port_del(team, port);
877959
team_adjust_ops(team);
960+
team_notify_peers(team);
961+
team_mcast_rejoin(team);
878962
}
879963

880964
#define TEAM_VLAN_FEATURES (NETIF_F_ALL_CSUM | NETIF_F_SG | \
@@ -1205,6 +1289,62 @@ static int team_mode_option_set(struct team *team, struct team_gsetter_ctx *ctx)
12051289
return team_change_mode(team, ctx->data.str_val);
12061290
}
12071291

1292+
static int team_notify_peers_count_get(struct team *team,
1293+
struct team_gsetter_ctx *ctx)
1294+
{
1295+
ctx->data.u32_val = team->notify_peers.count;
1296+
return 0;
1297+
}
1298+
1299+
static int team_notify_peers_count_set(struct team *team,
1300+
struct team_gsetter_ctx *ctx)
1301+
{
1302+
team->notify_peers.count = ctx->data.u32_val;
1303+
return 0;
1304+
}
1305+
1306+
static int team_notify_peers_interval_get(struct team *team,
1307+
struct team_gsetter_ctx *ctx)
1308+
{
1309+
ctx->data.u32_val = team->notify_peers.interval;
1310+
return 0;
1311+
}
1312+
1313+
static int team_notify_peers_interval_set(struct team *team,
1314+
struct team_gsetter_ctx *ctx)
1315+
{
1316+
team->notify_peers.interval = ctx->data.u32_val;
1317+
return 0;
1318+
}
1319+
1320+
static int team_mcast_rejoin_count_get(struct team *team,
1321+
struct team_gsetter_ctx *ctx)
1322+
{
1323+
ctx->data.u32_val = team->mcast_rejoin.count;
1324+
return 0;
1325+
}
1326+
1327+
static int team_mcast_rejoin_count_set(struct team *team,
1328+
struct team_gsetter_ctx *ctx)
1329+
{
1330+
team->mcast_rejoin.count = ctx->data.u32_val;
1331+
return 0;
1332+
}
1333+
1334+
static int team_mcast_rejoin_interval_get(struct team *team,
1335+
struct team_gsetter_ctx *ctx)
1336+
{
1337+
ctx->data.u32_val = team->mcast_rejoin.interval;
1338+
return 0;
1339+
}
1340+
1341+
static int team_mcast_rejoin_interval_set(struct team *team,
1342+
struct team_gsetter_ctx *ctx)
1343+
{
1344+
team->mcast_rejoin.interval = ctx->data.u32_val;
1345+
return 0;
1346+
}
1347+
12081348
static int team_port_en_option_get(struct team *team,
12091349
struct team_gsetter_ctx *ctx)
12101350
{
@@ -1316,6 +1456,30 @@ static const struct team_option team_options[] = {
13161456
.getter = team_mode_option_get,
13171457
.setter = team_mode_option_set,
13181458
},
1459+
{
1460+
.name = "notify_peers_count",
1461+
.type = TEAM_OPTION_TYPE_U32,
1462+
.getter = team_notify_peers_count_get,
1463+
.setter = team_notify_peers_count_set,
1464+
},
1465+
{
1466+
.name = "notify_peers_interval",
1467+
.type = TEAM_OPTION_TYPE_U32,
1468+
.getter = team_notify_peers_interval_get,
1469+
.setter = team_notify_peers_interval_set,
1470+
},
1471+
{
1472+
.name = "mcast_rejoin_count",
1473+
.type = TEAM_OPTION_TYPE_U32,
1474+
.getter = team_mcast_rejoin_count_get,
1475+
.setter = team_mcast_rejoin_count_set,
1476+
},
1477+
{
1478+
.name = "mcast_rejoin_interval",
1479+
.type = TEAM_OPTION_TYPE_U32,
1480+
.getter = team_mcast_rejoin_interval_get,
1481+
.setter = team_mcast_rejoin_interval_set,
1482+
},
13191483
{
13201484
.name = "enabled",
13211485
.type = TEAM_OPTION_TYPE_BOOL,
@@ -1396,6 +1560,10 @@ static int team_init(struct net_device *dev)
13961560

13971561
INIT_LIST_HEAD(&team->option_list);
13981562
INIT_LIST_HEAD(&team->option_inst_list);
1563+
1564+
team_notify_peers_init(team);
1565+
team_mcast_rejoin_init(team);
1566+
13991567
err = team_options_register(team, team_options, ARRAY_SIZE(team_options));
14001568
if (err)
14011569
goto err_options_register;
@@ -1406,6 +1574,8 @@ static int team_init(struct net_device *dev)
14061574
return 0;
14071575

14081576
err_options_register:
1577+
team_mcast_rejoin_fini(team);
1578+
team_notify_peers_fini(team);
14091579
team_queue_override_fini(team);
14101580
err_team_queue_override_init:
14111581
free_percpu(team->pcpu_stats);
@@ -1425,6 +1595,8 @@ static void team_uninit(struct net_device *dev)
14251595

14261596
__team_change_mode(team, NULL); /* cleanup */
14271597
__team_options_unregister(team, team_options, ARRAY_SIZE(team_options));
1598+
team_mcast_rejoin_fini(team);
1599+
team_notify_peers_fini(team);
14281600
team_queue_override_fini(team);
14291601
mutex_unlock(&team->lock);
14301602
}
@@ -2698,6 +2870,10 @@ static int team_device_event(struct notifier_block *unused,
26982870
case NETDEV_PRE_TYPE_CHANGE:
26992871
/* Forbid to change type of underlaying device */
27002872
return NOTIFY_BAD;
2873+
case NETDEV_RESEND_IGMP:
2874+
/* Propagate to master device */
2875+
call_netdevice_notifiers(event, port->team->dev);
2876+
break;
27012877
}
27022878
return NOTIFY_DONE;
27032879
}

include/linux/if_team.h

Lines changed: 13 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -10,9 +10,9 @@
1010
#ifndef _LINUX_IF_TEAM_H_
1111
#define _LINUX_IF_TEAM_H_
1212

13-
1413
#include <linux/netpoll.h>
1514
#include <net/sch_generic.h>
15+
#include <linux/types.h>
1616
#include <uapi/linux/if_team.h>
1717

1818
struct team_pcpu_stats {
@@ -194,6 +194,18 @@ struct team {
194194
bool user_carrier_enabled;
195195
bool queue_override_enabled;
196196
struct list_head *qom_lists; /* array of queue override mapping lists */
197+
struct {
198+
unsigned int count;
199+
unsigned int interval; /* in ms */
200+
atomic_t count_pending;
201+
struct delayed_work dw;
202+
} notify_peers;
203+
struct {
204+
unsigned int count;
205+
unsigned int interval; /* in ms */
206+
atomic_t count_pending;
207+
struct delayed_work dw;
208+
} mcast_rejoin;
197209
long mode_priv[TEAM_MODE_PRIV_LONGS];
198210
};
199211

include/linux/igmp.h

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -129,6 +129,5 @@ extern void ip_mc_unmap(struct in_device *);
129129
extern void ip_mc_remap(struct in_device *);
130130
extern void ip_mc_dec_group(struct in_device *in_dev, __be32 addr);
131131
extern void ip_mc_inc_group(struct in_device *in_dev, __be32 addr);
132-
extern void ip_mc_rejoin_groups(struct in_device *in_dev);
133132

134133
#endif

include/linux/netdevice.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1633,6 +1633,7 @@ struct packet_offload {
16331633
#define NETDEV_NOTIFY_PEERS 0x0013
16341634
#define NETDEV_JOIN 0x0014
16351635
#define NETDEV_CHANGEUPPER 0x0015
1636+
#define NETDEV_RESEND_IGMP 0x0016
16361637

16371638
extern int register_netdevice_notifier(struct notifier_block *nb);
16381639
extern int unregister_netdevice_notifier(struct notifier_block *nb);

net/8021q/vlan.c

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -459,6 +459,7 @@ static int vlan_device_event(struct notifier_block *unused, unsigned long event,
459459

460460
case NETDEV_NOTIFY_PEERS:
461461
case NETDEV_BONDING_FAILOVER:
462+
case NETDEV_RESEND_IGMP:
462463
/* Propagate to vlan devices */
463464
vlan_group_for_each_dev(grp, i, vlandev)
464465
call_netdevice_notifiers(event, vlandev);

net/bridge/br_notify.c

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -102,6 +102,11 @@ static int br_device_event(struct notifier_block *unused, unsigned long event, v
102102
case NETDEV_PRE_TYPE_CHANGE:
103103
/* Forbid underlaying device to change its type. */
104104
return NOTIFY_BAD;
105+
106+
case NETDEV_RESEND_IGMP:
107+
/* Propagate to master device */
108+
call_netdevice_notifiers(event, br->dev);
109+
break;
105110
}
106111

107112
/* Events that may cause spanning tree to refresh */

0 commit comments

Comments
 (0)