Skip to content

Commit 9a401de

Browse files
gclementdavem330
authored andcommitted
net: mvneta: Add naive RSS support
This patch adds the support for the RSS related ethtool function. Currently it only uses one entry in the indirection table which allows associating an mvneta interface to a given CPU. Signed-off-by: Gregory CLEMENT <gregory.clement@free-electrons.com> Tested-by: Marcin Wojtas <mw@semihalf.com> Signed-off-by: David S. Miller <davem@davemloft.net>
1 parent 2dcf75e commit 9a401de

File tree

1 file changed

+126
-1
lines changed

1 file changed

+126
-1
lines changed

drivers/net/ethernet/marvell/mvneta.c

Lines changed: 126 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -261,6 +261,11 @@
261261

262262
#define MVNETA_TX_MTU_MAX 0x3ffff
263263

264+
/* The RSS lookup table actually has 256 entries but we do not use
265+
* them yet
266+
*/
267+
#define MVNETA_RSS_LU_TABLE_SIZE 1
268+
264269
/* TSO header size */
265270
#define TSO_HEADER_SIZE 128
266271

@@ -382,6 +387,8 @@ struct mvneta_port {
382387
unsigned int use_inband_status:1;
383388

384389
u64 ethtool_stats[ARRAY_SIZE(mvneta_statistics)];
390+
391+
u32 indir[MVNETA_RSS_LU_TABLE_SIZE];
385392
};
386393

387394
/* The mvneta_tx_desc and mvneta_rx_desc structures describe the
@@ -1067,7 +1074,7 @@ static void mvneta_defaults_set(struct mvneta_port *pp)
10671074
if ((rxq % max_cpu) == cpu)
10681075
rxq_map |= MVNETA_CPU_RXQ_ACCESS(rxq);
10691076

1070-
if (cpu == rxq_def)
1077+
if (cpu == pp->rxq_def)
10711078
txq_map = MVNETA_CPU_TXQ_ACCESS_ALL_MASK;
10721079

10731080
mvreg_write(pp, MVNETA_CPU_MAP(cpu), rxq_map | txq_map);
@@ -2508,6 +2515,18 @@ static void mvneta_percpu_unmask_interrupt(void *arg)
25082515
MVNETA_MISCINTR_INTR_MASK);
25092516
}
25102517

2518+
static void mvneta_percpu_mask_interrupt(void *arg)
2519+
{
2520+
struct mvneta_port *pp = arg;
2521+
2522+
/* All the queue are masked, but actually only the ones
2523+
* maped to this CPU will be masked
2524+
*/
2525+
mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0);
2526+
mvreg_write(pp, MVNETA_INTR_OLD_MASK, 0);
2527+
mvreg_write(pp, MVNETA_INTR_MISC_MASK, 0);
2528+
}
2529+
25112530
static void mvneta_start_dev(struct mvneta_port *pp)
25122531
{
25132532
unsigned int cpu;
@@ -3231,6 +3250,106 @@ static int mvneta_ethtool_get_sset_count(struct net_device *dev, int sset)
32313250
return -EOPNOTSUPP;
32323251
}
32333252

3253+
static u32 mvneta_ethtool_get_rxfh_indir_size(struct net_device *dev)
3254+
{
3255+
return MVNETA_RSS_LU_TABLE_SIZE;
3256+
}
3257+
3258+
static int mvneta_ethtool_get_rxnfc(struct net_device *dev,
3259+
struct ethtool_rxnfc *info,
3260+
u32 *rules __always_unused)
3261+
{
3262+
switch (info->cmd) {
3263+
case ETHTOOL_GRXRINGS:
3264+
info->data = rxq_number;
3265+
return 0;
3266+
case ETHTOOL_GRXFH:
3267+
return -EOPNOTSUPP;
3268+
default:
3269+
return -EOPNOTSUPP;
3270+
}
3271+
}
3272+
3273+
static int mvneta_config_rss(struct mvneta_port *pp)
3274+
{
3275+
int cpu;
3276+
u32 val;
3277+
3278+
netif_tx_stop_all_queues(pp->dev);
3279+
3280+
for_each_online_cpu(cpu)
3281+
smp_call_function_single(cpu, mvneta_percpu_mask_interrupt,
3282+
pp, true);
3283+
3284+
/* We have to synchronise on the napi of each CPU */
3285+
for_each_online_cpu(cpu) {
3286+
struct mvneta_pcpu_port *pcpu_port =
3287+
per_cpu_ptr(pp->ports, cpu);
3288+
3289+
napi_synchronize(&pcpu_port->napi);
3290+
napi_disable(&pcpu_port->napi);
3291+
}
3292+
3293+
pp->rxq_def = pp->indir[0];
3294+
3295+
/* Update unicast mapping */
3296+
mvneta_set_rx_mode(pp->dev);
3297+
3298+
/* Update val of portCfg register accordingly with all RxQueue types */
3299+
val = MVNETA_PORT_CONFIG_DEFL_VALUE(pp->rxq_def);
3300+
mvreg_write(pp, MVNETA_PORT_CONFIG, val);
3301+
3302+
/* Update the elected CPU matching the new rxq_def */
3303+
mvneta_percpu_elect(pp);
3304+
3305+
/* We have to synchronise on the napi of each CPU */
3306+
for_each_online_cpu(cpu) {
3307+
struct mvneta_pcpu_port *pcpu_port =
3308+
per_cpu_ptr(pp->ports, cpu);
3309+
3310+
napi_enable(&pcpu_port->napi);
3311+
}
3312+
3313+
netif_tx_start_all_queues(pp->dev);
3314+
3315+
return 0;
3316+
}
3317+
3318+
static int mvneta_ethtool_set_rxfh(struct net_device *dev, const u32 *indir,
3319+
const u8 *key, const u8 hfunc)
3320+
{
3321+
struct mvneta_port *pp = netdev_priv(dev);
3322+
/* We require at least one supported parameter to be changed
3323+
* and no change in any of the unsupported parameters
3324+
*/
3325+
if (key ||
3326+
(hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP))
3327+
return -EOPNOTSUPP;
3328+
3329+
if (!indir)
3330+
return 0;
3331+
3332+
memcpy(pp->indir, indir, MVNETA_RSS_LU_TABLE_SIZE);
3333+
3334+
return mvneta_config_rss(pp);
3335+
}
3336+
3337+
static int mvneta_ethtool_get_rxfh(struct net_device *dev, u32 *indir, u8 *key,
3338+
u8 *hfunc)
3339+
{
3340+
struct mvneta_port *pp = netdev_priv(dev);
3341+
3342+
if (hfunc)
3343+
*hfunc = ETH_RSS_HASH_TOP;
3344+
3345+
if (!indir)
3346+
return 0;
3347+
3348+
memcpy(indir, pp->indir, MVNETA_RSS_LU_TABLE_SIZE);
3349+
3350+
return 0;
3351+
}
3352+
32343353
static const struct net_device_ops mvneta_netdev_ops = {
32353354
.ndo_open = mvneta_open,
32363355
.ndo_stop = mvneta_stop,
@@ -3255,6 +3374,10 @@ const struct ethtool_ops mvneta_eth_tool_ops = {
32553374
.get_strings = mvneta_ethtool_get_strings,
32563375
.get_ethtool_stats = mvneta_ethtool_get_stats,
32573376
.get_sset_count = mvneta_ethtool_get_sset_count,
3377+
.get_rxfh_indir_size = mvneta_ethtool_get_rxfh_indir_size,
3378+
.get_rxnfc = mvneta_ethtool_get_rxnfc,
3379+
.get_rxfh = mvneta_ethtool_get_rxfh,
3380+
.set_rxfh = mvneta_ethtool_set_rxfh,
32583381
};
32593382

32603383
/* Initialize hw */
@@ -3446,6 +3569,8 @@ static int mvneta_probe(struct platform_device *pdev)
34463569

34473570
pp->rxq_def = rxq_def;
34483571

3572+
pp->indir[0] = rxq_def;
3573+
34493574
pp->clk = devm_clk_get(&pdev->dev, NULL);
34503575
if (IS_ERR(pp->clk)) {
34513576
err = PTR_ERR(pp->clk);

0 commit comments

Comments
 (0)