Skip to content

Commit 0d65fc1

Browse files
jpirkodavem330
authored andcommitted
mlxsw: spectrum: Implement LAG port join/leave
Implement basic procedures for joining/leaving port to/from LAG. That includes HW setup of collector, core LAG mapping setup. Signed-off-by: Jiri Pirko <jiri@mellanox.com> Signed-off-by: David S. Miller <davem@davemloft.net>
1 parent 3b71571 commit 0d65fc1

File tree

2 files changed

+306
-17
lines changed

2 files changed

+306
-17
lines changed

drivers/net/ethernet/mellanox/mlxsw/spectrum.c

Lines changed: 276 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -1712,6 +1712,22 @@ static int mlxsw_sp_flood_init(struct mlxsw_sp *mlxsw_sp)
17121712
return 0;
17131713
}
17141714

1715+
static int mlxsw_sp_lag_init(struct mlxsw_sp *mlxsw_sp)
1716+
{
1717+
char slcr_pl[MLXSW_REG_SLCR_LEN];
1718+
1719+
mlxsw_reg_slcr_pack(slcr_pl, MLXSW_REG_SLCR_LAG_HASH_SMAC |
1720+
MLXSW_REG_SLCR_LAG_HASH_DMAC |
1721+
MLXSW_REG_SLCR_LAG_HASH_ETHERTYPE |
1722+
MLXSW_REG_SLCR_LAG_HASH_VLANID |
1723+
MLXSW_REG_SLCR_LAG_HASH_SIP |
1724+
MLXSW_REG_SLCR_LAG_HASH_DIP |
1725+
MLXSW_REG_SLCR_LAG_HASH_SPORT |
1726+
MLXSW_REG_SLCR_LAG_HASH_DPORT |
1727+
MLXSW_REG_SLCR_LAG_HASH_IPPROTO);
1728+
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcr), slcr_pl);
1729+
}
1730+
17151731
static int mlxsw_sp_init(void *priv, struct mlxsw_core *mlxsw_core,
17161732
const struct mlxsw_bus_info *mlxsw_bus_info)
17171733
{
@@ -1757,6 +1773,12 @@ static int mlxsw_sp_init(void *priv, struct mlxsw_core *mlxsw_core,
17571773
goto err_buffers_init;
17581774
}
17591775

1776+
err = mlxsw_sp_lag_init(mlxsw_sp);
1777+
if (err) {
1778+
dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize LAG\n");
1779+
goto err_lag_init;
1780+
}
1781+
17601782
err = mlxsw_sp_switchdev_init(mlxsw_sp);
17611783
if (err) {
17621784
dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize switchdev\n");
@@ -1766,6 +1788,7 @@ static int mlxsw_sp_init(void *priv, struct mlxsw_core *mlxsw_core,
17661788
return 0;
17671789

17681790
err_switchdev_init:
1791+
err_lag_init:
17691792
err_buffers_init:
17701793
err_flood_init:
17711794
mlxsw_sp_traps_fini(mlxsw_sp);
@@ -1793,9 +1816,9 @@ static struct mlxsw_config_profile mlxsw_sp_config_profile = {
17931816
.used_max_vepa_channels = 1,
17941817
.max_vepa_channels = 0,
17951818
.used_max_lag = 1,
1796-
.max_lag = 64,
1819+
.max_lag = MLXSW_SP_LAG_MAX,
17971820
.used_max_port_per_lag = 1,
1798-
.max_port_per_lag = 16,
1821+
.max_port_per_lag = MLXSW_SP_PORT_PER_LAG_MAX,
17991822
.used_max_mid = 1,
18001823
.max_mid = 7000,
18011824
.used_max_pgt = 1,
@@ -1894,36 +1917,229 @@ static void mlxsw_sp_master_bridge_dec(struct mlxsw_sp *mlxsw_sp,
18941917
mlxsw_sp->master_bridge.dev = NULL;
18951918
}
18961919

1897-
static int mlxsw_sp_netdevice_event(struct notifier_block *unused,
1898-
unsigned long event, void *ptr)
1920+
static int mlxsw_sp_lag_create(struct mlxsw_sp *mlxsw_sp, u16 lag_id)
1921+
{
1922+
char sldr_pl[MLXSW_REG_SLDR_LEN];
1923+
1924+
mlxsw_reg_sldr_lag_create_pack(sldr_pl, lag_id);
1925+
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
1926+
}
1927+
1928+
static int mlxsw_sp_lag_destroy(struct mlxsw_sp *mlxsw_sp, u16 lag_id)
1929+
{
1930+
char sldr_pl[MLXSW_REG_SLDR_LEN];
1931+
1932+
mlxsw_reg_sldr_lag_destroy_pack(sldr_pl, lag_id);
1933+
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
1934+
}
1935+
1936+
static int mlxsw_sp_lag_col_port_add(struct mlxsw_sp_port *mlxsw_sp_port,
1937+
u16 lag_id, u8 port_index)
1938+
{
1939+
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1940+
char slcor_pl[MLXSW_REG_SLCOR_LEN];
1941+
1942+
mlxsw_reg_slcor_port_add_pack(slcor_pl, mlxsw_sp_port->local_port,
1943+
lag_id, port_index);
1944+
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
1945+
}
1946+
1947+
static int mlxsw_sp_lag_col_port_remove(struct mlxsw_sp_port *mlxsw_sp_port,
1948+
u16 lag_id)
1949+
{
1950+
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1951+
char slcor_pl[MLXSW_REG_SLCOR_LEN];
1952+
1953+
mlxsw_reg_slcor_port_remove_pack(slcor_pl, mlxsw_sp_port->local_port,
1954+
lag_id);
1955+
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
1956+
}
1957+
1958+
static int mlxsw_sp_lag_col_port_enable(struct mlxsw_sp_port *mlxsw_sp_port,
1959+
u16 lag_id)
1960+
{
1961+
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1962+
char slcor_pl[MLXSW_REG_SLCOR_LEN];
1963+
1964+
mlxsw_reg_slcor_col_enable_pack(slcor_pl, mlxsw_sp_port->local_port,
1965+
lag_id);
1966+
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
1967+
}
1968+
1969+
static int mlxsw_sp_lag_col_port_disable(struct mlxsw_sp_port *mlxsw_sp_port,
1970+
u16 lag_id)
1971+
{
1972+
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1973+
char slcor_pl[MLXSW_REG_SLCOR_LEN];
1974+
1975+
mlxsw_reg_slcor_col_disable_pack(slcor_pl, mlxsw_sp_port->local_port,
1976+
lag_id);
1977+
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
1978+
}
1979+
1980+
static int mlxsw_sp_lag_index_get(struct mlxsw_sp *mlxsw_sp,
1981+
struct net_device *lag_dev,
1982+
u16 *p_lag_id)
1983+
{
1984+
struct mlxsw_sp_upper *lag;
1985+
int free_lag_id = -1;
1986+
int i;
1987+
1988+
for (i = 0; i < MLXSW_SP_LAG_MAX; i++) {
1989+
lag = mlxsw_sp_lag_get(mlxsw_sp, i);
1990+
if (lag->ref_count) {
1991+
if (lag->dev == lag_dev) {
1992+
*p_lag_id = i;
1993+
return 0;
1994+
}
1995+
} else if (free_lag_id < 0) {
1996+
free_lag_id = i;
1997+
}
1998+
}
1999+
if (free_lag_id < 0)
2000+
return -EBUSY;
2001+
*p_lag_id = free_lag_id;
2002+
return 0;
2003+
}
2004+
2005+
static bool
2006+
mlxsw_sp_master_lag_check(struct mlxsw_sp *mlxsw_sp,
2007+
struct net_device *lag_dev,
2008+
struct netdev_lag_upper_info *lag_upper_info)
2009+
{
2010+
u16 lag_id;
2011+
2012+
if (mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id) != 0)
2013+
return false;
2014+
if (lag_upper_info->tx_type != NETDEV_LAG_TX_TYPE_HASH)
2015+
return false;
2016+
return true;
2017+
}
2018+
2019+
static int mlxsw_sp_port_lag_index_get(struct mlxsw_sp *mlxsw_sp,
2020+
u16 lag_id, u8 *p_port_index)
2021+
{
2022+
int i;
2023+
2024+
for (i = 0; i < MLXSW_SP_PORT_PER_LAG_MAX; i++) {
2025+
if (!mlxsw_sp_port_lagged_get(mlxsw_sp, lag_id, i)) {
2026+
*p_port_index = i;
2027+
return 0;
2028+
}
2029+
}
2030+
return -EBUSY;
2031+
}
2032+
2033+
static int mlxsw_sp_port_lag_join(struct mlxsw_sp_port *mlxsw_sp_port,
2034+
struct net_device *lag_dev)
2035+
{
2036+
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2037+
struct mlxsw_sp_upper *lag;
2038+
u16 lag_id;
2039+
u8 port_index;
2040+
int err;
2041+
2042+
err = mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id);
2043+
if (err)
2044+
return err;
2045+
lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id);
2046+
if (!lag->ref_count) {
2047+
err = mlxsw_sp_lag_create(mlxsw_sp, lag_id);
2048+
if (err)
2049+
return err;
2050+
lag->dev = lag_dev;
2051+
}
2052+
2053+
err = mlxsw_sp_port_lag_index_get(mlxsw_sp, lag_id, &port_index);
2054+
if (err)
2055+
return err;
2056+
err = mlxsw_sp_lag_col_port_add(mlxsw_sp_port, lag_id, port_index);
2057+
if (err)
2058+
goto err_col_port_add;
2059+
err = mlxsw_sp_lag_col_port_enable(mlxsw_sp_port, lag_id);
2060+
if (err)
2061+
goto err_col_port_enable;
2062+
2063+
mlxsw_core_lag_mapping_set(mlxsw_sp->core, lag_id, port_index,
2064+
mlxsw_sp_port->local_port);
2065+
mlxsw_sp_port->lag_id = lag_id;
2066+
mlxsw_sp_port->lagged = 1;
2067+
lag->ref_count++;
2068+
return 0;
2069+
2070+
err_col_port_add:
2071+
if (!lag->ref_count)
2072+
mlxsw_sp_lag_destroy(mlxsw_sp, lag_id);
2073+
err_col_port_enable:
2074+
mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id);
2075+
return err;
2076+
}
2077+
2078+
static int mlxsw_sp_port_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port,
2079+
struct net_device *lag_dev)
2080+
{
2081+
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2082+
struct mlxsw_sp_upper *lag;
2083+
u16 lag_id = mlxsw_sp_port->lag_id;
2084+
int err;
2085+
2086+
if (!mlxsw_sp_port->lagged)
2087+
return 0;
2088+
lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id);
2089+
WARN_ON(lag->ref_count == 0);
2090+
2091+
err = mlxsw_sp_lag_col_port_disable(mlxsw_sp_port, lag_id);
2092+
if (err)
2093+
return err;
2094+
mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id);
2095+
if (err)
2096+
return err;
2097+
2098+
if (lag->ref_count == 1) {
2099+
err = mlxsw_sp_lag_destroy(mlxsw_sp, lag_id);
2100+
if (err)
2101+
return err;
2102+
}
2103+
2104+
mlxsw_core_lag_mapping_clear(mlxsw_sp->core, lag_id,
2105+
mlxsw_sp_port->local_port);
2106+
mlxsw_sp_port->lagged = 0;
2107+
lag->ref_count--;
2108+
return 0;
2109+
}
2110+
2111+
static int mlxsw_sp_netdevice_port_event(struct net_device *dev,
2112+
unsigned long event, void *ptr)
18992113
{
1900-
struct net_device *dev = netdev_notifier_info_to_dev(ptr);
19012114
struct netdev_notifier_changeupper_info *info;
19022115
struct mlxsw_sp_port *mlxsw_sp_port;
19032116
struct net_device *upper_dev;
19042117
struct mlxsw_sp *mlxsw_sp;
19052118
int err;
19062119

1907-
if (!mlxsw_sp_port_dev_check(dev))
1908-
return NOTIFY_DONE;
1909-
19102120
mlxsw_sp_port = netdev_priv(dev);
19112121
mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
19122122
info = ptr;
19132123

19142124
switch (event) {
19152125
case NETDEV_PRECHANGEUPPER:
19162126
upper_dev = info->upper_dev;
2127+
if (!info->master || !info->linking)
2128+
break;
19172129
/* HW limitation forbids to put ports to multiple bridges. */
1918-
if (info->master && info->linking &&
1919-
netif_is_bridge_master(upper_dev) &&
2130+
if (netif_is_bridge_master(upper_dev) &&
19202131
!mlxsw_sp_master_bridge_check(mlxsw_sp, upper_dev))
19212132
return NOTIFY_BAD;
2133+
if (netif_is_lag_master(upper_dev) &&
2134+
!mlxsw_sp_master_lag_check(mlxsw_sp, upper_dev,
2135+
info->upper_info))
2136+
return NOTIFY_BAD;
19222137
break;
19232138
case NETDEV_CHANGEUPPER:
19242139
upper_dev = info->upper_dev;
1925-
if (info->master &&
1926-
netif_is_bridge_master(upper_dev)) {
2140+
if (!info->master)
2141+
break;
2142+
if (netif_is_bridge_master(upper_dev)) {
19272143
if (info->linking) {
19282144
err = mlxsw_sp_port_bridge_join(mlxsw_sp_port);
19292145
if (err)
@@ -1937,13 +2153,61 @@ static int mlxsw_sp_netdevice_event(struct notifier_block *unused,
19372153
mlxsw_sp_port->bridged = 0;
19382154
mlxsw_sp_master_bridge_dec(mlxsw_sp, upper_dev);
19392155
}
2156+
} else if (netif_is_lag_master(upper_dev)) {
2157+
if (info->linking) {
2158+
err = mlxsw_sp_port_lag_join(mlxsw_sp_port,
2159+
upper_dev);
2160+
if (err) {
2161+
netdev_err(dev, "Failed to join link aggregation\n");
2162+
return NOTIFY_BAD;
2163+
}
2164+
} else {
2165+
err = mlxsw_sp_port_lag_leave(mlxsw_sp_port,
2166+
upper_dev);
2167+
if (err) {
2168+
netdev_err(dev, "Failed to leave link aggregation\n");
2169+
return NOTIFY_BAD;
2170+
}
2171+
}
19402172
}
19412173
break;
19422174
}
19432175

19442176
return NOTIFY_DONE;
19452177
}
19462178

2179+
static int mlxsw_sp_netdevice_lag_event(struct net_device *lag_dev,
2180+
unsigned long event, void *ptr)
2181+
{
2182+
struct net_device *dev;
2183+
struct list_head *iter;
2184+
int ret;
2185+
2186+
netdev_for_each_lower_dev(lag_dev, dev, iter) {
2187+
if (mlxsw_sp_port_dev_check(dev)) {
2188+
ret = mlxsw_sp_netdevice_port_event(dev, event, ptr);
2189+
if (ret == NOTIFY_BAD)
2190+
return ret;
2191+
}
2192+
}
2193+
2194+
return NOTIFY_DONE;
2195+
}
2196+
2197+
static int mlxsw_sp_netdevice_event(struct notifier_block *unused,
2198+
unsigned long event, void *ptr)
2199+
{
2200+
struct net_device *dev = netdev_notifier_info_to_dev(ptr);
2201+
2202+
if (mlxsw_sp_port_dev_check(dev))
2203+
return mlxsw_sp_netdevice_port_event(dev, event, ptr);
2204+
2205+
if (netif_is_lag_master(dev))
2206+
return mlxsw_sp_netdevice_lag_event(dev, event, ptr);
2207+
2208+
return NOTIFY_DONE;
2209+
}
2210+
19472211
static struct notifier_block mlxsw_sp_netdevice_nb __read_mostly = {
19482212
.notifier_call = mlxsw_sp_netdevice_event,
19492213
};

0 commit comments

Comments
 (0)