Skip to content

Commit 50bf8cb

Browse files
gclementdavem330
authored andcommitted
net: mvneta: Configure XPS support
With this patch each CPU is associated with its own set of TX queues. It also setup the XPS with an initial configuration which set the affinity matching the hardware configuration. Suggested-by: Arnd Bergmann <arnd@arndb.de> Signed-off-by: Gregory CLEMENT <gregory.clement@free-electrons.com> Signed-off-by: David S. Miller <davem@davemloft.net>
1 parent 9a401de commit 50bf8cb

File tree

1 file changed

+45
-11
lines changed

1 file changed

+45
-11
lines changed

drivers/net/ethernet/marvell/mvneta.c

Lines changed: 45 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -111,6 +111,7 @@
111111
#define MVNETA_CPU_RXQ_ACCESS_ALL_MASK 0x000000ff
112112
#define MVNETA_CPU_TXQ_ACCESS_ALL_MASK 0x0000ff00
113113
#define MVNETA_CPU_RXQ_ACCESS(rxq) BIT(rxq)
114+
#define MVNETA_CPU_TXQ_ACCESS(txq) BIT(txq + 8)
114115
#define MVNETA_RXQ_TIME_COAL_REG(q) (0x2580 + ((q) << 2))
115116

116117
/* Exception Interrupt Port/Queue Cause register
@@ -514,6 +515,9 @@ struct mvneta_tx_queue {
514515

515516
/* DMA address of TSO headers */
516517
dma_addr_t tso_hdrs_phys;
518+
519+
/* Affinity mask for CPUs*/
520+
cpumask_t affinity_mask;
517521
};
518522

519523
struct mvneta_rx_queue {
@@ -1062,20 +1066,30 @@ static void mvneta_defaults_set(struct mvneta_port *pp)
10621066
/* Enable MBUS Retry bit16 */
10631067
mvreg_write(pp, MVNETA_MBUS_RETRY, 0x20);
10641068

1065-
/* Set CPU queue access map. CPUs are assigned to the RX
1066-
* queues modulo their number and all the TX queues are
1067-
* assigned to the CPU associated to the default RX queue.
1069+
/* Set CPU queue access map. CPUs are assigned to the RX and
1070+
* TX queues modulo their number. If there is only one TX
1071+
* queue then it is assigned to the CPU associated to the
1072+
* default RX queue.
10681073
*/
10691074
for_each_present_cpu(cpu) {
10701075
int rxq_map = 0, txq_map = 0;
1071-
int rxq;
1076+
int rxq, txq;
10721077

10731078
for (rxq = 0; rxq < rxq_number; rxq++)
10741079
if ((rxq % max_cpu) == cpu)
10751080
rxq_map |= MVNETA_CPU_RXQ_ACCESS(rxq);
10761081

1077-
if (cpu == pp->rxq_def)
1078-
txq_map = MVNETA_CPU_TXQ_ACCESS_ALL_MASK;
1082+
for (txq = 0; txq < txq_number; txq++)
1083+
if ((txq % max_cpu) == cpu)
1084+
txq_map |= MVNETA_CPU_TXQ_ACCESS(txq);
1085+
1086+
/* With only one TX queue we configure a special case
1087+
* which will allow to get all the irq on a single
1088+
* CPU
1089+
*/
1090+
if (txq_number == 1)
1091+
txq_map = (cpu == pp->rxq_def) ?
1092+
MVNETA_CPU_TXQ_ACCESS(1) : 0;
10791093

10801094
mvreg_write(pp, MVNETA_CPU_MAP(cpu), rxq_map | txq_map);
10811095
}
@@ -2362,6 +2376,8 @@ static void mvneta_rxq_deinit(struct mvneta_port *pp,
23622376
static int mvneta_txq_init(struct mvneta_port *pp,
23632377
struct mvneta_tx_queue *txq)
23642378
{
2379+
int cpu;
2380+
23652381
txq->size = pp->tx_ring_size;
23662382

23672383
/* A queue must always have room for at least one skb.
@@ -2414,6 +2430,14 @@ static int mvneta_txq_init(struct mvneta_port *pp,
24142430
}
24152431
mvneta_tx_done_pkts_coal_set(pp, txq, txq->done_pkts_coal);
24162432

2433+
/* Setup XPS mapping */
2434+
if (txq_number > 1)
2435+
cpu = txq->id % num_present_cpus();
2436+
else
2437+
cpu = pp->rxq_def % num_present_cpus();
2438+
cpumask_set_cpu(cpu, &txq->affinity_mask);
2439+
netif_set_xps_queue(pp->dev, &txq->affinity_mask, txq->id);
2440+
24172441
return 0;
24182442
}
24192443

@@ -2836,13 +2860,23 @@ static void mvneta_percpu_elect(struct mvneta_port *pp)
28362860
if ((rxq % max_cpu) == cpu)
28372861
rxq_map |= MVNETA_CPU_RXQ_ACCESS(rxq);
28382862

2839-
if (i == online_cpu_idx) {
2840-
/* Map the default receive queue and transmit
2841-
* queue to the elected CPU
2863+
if (i == online_cpu_idx)
2864+
/* Map the default receive queue queue to the
2865+
* elected CPU
28422866
*/
28432867
rxq_map |= MVNETA_CPU_RXQ_ACCESS(pp->rxq_def);
2844-
txq_map = MVNETA_CPU_TXQ_ACCESS_ALL_MASK;
2845-
}
2868+
2869+
/* We update the TX queue map only if we have one
2870+
* queue. In this case we associate the TX queue to
2871+
* the CPU bound to the default RX queue
2872+
*/
2873+
if (txq_number == 1)
2874+
txq_map = (i == online_cpu_idx) ?
2875+
MVNETA_CPU_TXQ_ACCESS(1) : 0;
2876+
else
2877+
txq_map = mvreg_read(pp, MVNETA_CPU_MAP(cpu)) &
2878+
MVNETA_CPU_TXQ_ACCESS_ALL_MASK;
2879+
28462880
mvreg_write(pp, MVNETA_CPU_MAP(cpu), rxq_map | txq_map);
28472881

28482882
/* Update the interrupt mask on each CPU according the

0 commit comments

Comments
 (0)