|
111 | 111 | #define MVNETA_CPU_RXQ_ACCESS_ALL_MASK 0x000000ff
|
112 | 112 | #define MVNETA_CPU_TXQ_ACCESS_ALL_MASK 0x0000ff00
|
113 | 113 | #define MVNETA_CPU_RXQ_ACCESS(rxq) BIT(rxq)
|
| 114 | +#define MVNETA_CPU_TXQ_ACCESS(txq) BIT(txq + 8) |
114 | 115 | #define MVNETA_RXQ_TIME_COAL_REG(q) (0x2580 + ((q) << 2))
|
115 | 116 |
|
116 | 117 | /* Exception Interrupt Port/Queue Cause register
|
@@ -514,6 +515,9 @@ struct mvneta_tx_queue {
|
514 | 515 |
|
515 | 516 | /* DMA address of TSO headers */
|
516 | 517 | dma_addr_t tso_hdrs_phys;
|
| 518 | + |
| 519 | + /* Affinity mask for CPUs*/ |
| 520 | + cpumask_t affinity_mask; |
517 | 521 | };
|
518 | 522 |
|
519 | 523 | struct mvneta_rx_queue {
|
@@ -1062,20 +1066,30 @@ static void mvneta_defaults_set(struct mvneta_port *pp)
|
1062 | 1066 | /* Enable MBUS Retry bit16 */
|
1063 | 1067 | mvreg_write(pp, MVNETA_MBUS_RETRY, 0x20);
|
1064 | 1068 |
|
1065 |
| - /* Set CPU queue access map. CPUs are assigned to the RX |
1066 |
| - * queues modulo their number and all the TX queues are |
1067 |
| - * assigned to the CPU associated to the default RX queue. |
| 1069 | + /* Set CPU queue access map. CPUs are assigned to the RX and |
| 1070 | + * TX queues modulo their number. If there is only one TX |
| 1071 | + * queue then it is assigned to the CPU associated to the |
| 1072 | + * default RX queue. |
1068 | 1073 | */
|
1069 | 1074 | for_each_present_cpu(cpu) {
|
1070 | 1075 | int rxq_map = 0, txq_map = 0;
|
1071 |
| - int rxq; |
| 1076 | + int rxq, txq; |
1072 | 1077 |
|
1073 | 1078 | for (rxq = 0; rxq < rxq_number; rxq++)
|
1074 | 1079 | if ((rxq % max_cpu) == cpu)
|
1075 | 1080 | rxq_map |= MVNETA_CPU_RXQ_ACCESS(rxq);
|
1076 | 1081 |
|
1077 |
| - if (cpu == pp->rxq_def) |
1078 |
| - txq_map = MVNETA_CPU_TXQ_ACCESS_ALL_MASK; |
| 1082 | + for (txq = 0; txq < txq_number; txq++) |
| 1083 | + if ((txq % max_cpu) == cpu) |
| 1084 | + txq_map |= MVNETA_CPU_TXQ_ACCESS(txq); |
| 1085 | + |
| 1086 | + /* With only one TX queue we configure a special case |
| 1087 | + * which will allow to get all the irq on a single |
| 1088 | + * CPU |
| 1089 | + */ |
| 1090 | + if (txq_number == 1) |
| 1091 | + txq_map = (cpu == pp->rxq_def) ? |
| 1092 | + MVNETA_CPU_TXQ_ACCESS(1) : 0; |
1079 | 1093 |
|
1080 | 1094 | mvreg_write(pp, MVNETA_CPU_MAP(cpu), rxq_map | txq_map);
|
1081 | 1095 | }
|
@@ -2362,6 +2376,8 @@ static void mvneta_rxq_deinit(struct mvneta_port *pp,
|
2362 | 2376 | static int mvneta_txq_init(struct mvneta_port *pp,
|
2363 | 2377 | struct mvneta_tx_queue *txq)
|
2364 | 2378 | {
|
| 2379 | + int cpu; |
| 2380 | + |
2365 | 2381 | txq->size = pp->tx_ring_size;
|
2366 | 2382 |
|
2367 | 2383 | /* A queue must always have room for at least one skb.
|
@@ -2414,6 +2430,14 @@ static int mvneta_txq_init(struct mvneta_port *pp,
|
2414 | 2430 | }
|
2415 | 2431 | mvneta_tx_done_pkts_coal_set(pp, txq, txq->done_pkts_coal);
|
2416 | 2432 |
|
| 2433 | + /* Setup XPS mapping */ |
| 2434 | + if (txq_number > 1) |
| 2435 | + cpu = txq->id % num_present_cpus(); |
| 2436 | + else |
| 2437 | + cpu = pp->rxq_def % num_present_cpus(); |
| 2438 | + cpumask_set_cpu(cpu, &txq->affinity_mask); |
| 2439 | + netif_set_xps_queue(pp->dev, &txq->affinity_mask, txq->id); |
| 2440 | + |
2417 | 2441 | return 0;
|
2418 | 2442 | }
|
2419 | 2443 |
|
@@ -2836,13 +2860,23 @@ static void mvneta_percpu_elect(struct mvneta_port *pp)
|
2836 | 2860 | if ((rxq % max_cpu) == cpu)
|
2837 | 2861 | rxq_map |= MVNETA_CPU_RXQ_ACCESS(rxq);
|
2838 | 2862 |
|
2839 |
| - if (i == online_cpu_idx) { |
2840 |
| - /* Map the default receive queue and transmit |
2841 |
| - * queue to the elected CPU |
| 2863 | + if (i == online_cpu_idx) |
| 2864 | + /* Map the default receive queue queue to the |
| 2865 | + * elected CPU |
2842 | 2866 | */
|
2843 | 2867 | rxq_map |= MVNETA_CPU_RXQ_ACCESS(pp->rxq_def);
|
2844 |
| - txq_map = MVNETA_CPU_TXQ_ACCESS_ALL_MASK; |
2845 |
| - } |
| 2868 | + |
| 2869 | + /* We update the TX queue map only if we have one |
| 2870 | + * queue. In this case we associate the TX queue to |
| 2871 | + * the CPU bound to the default RX queue |
| 2872 | + */ |
| 2873 | + if (txq_number == 1) |
| 2874 | + txq_map = (i == online_cpu_idx) ? |
| 2875 | + MVNETA_CPU_TXQ_ACCESS(1) : 0; |
| 2876 | + else |
| 2877 | + txq_map = mvreg_read(pp, MVNETA_CPU_MAP(cpu)) & |
| 2878 | + MVNETA_CPU_TXQ_ACCESS_ALL_MASK; |
| 2879 | + |
2846 | 2880 | mvreg_write(pp, MVNETA_CPU_MAP(cpu), rxq_map | txq_map);
|
2847 | 2881 |
|
2848 | 2882 | /* Update the interrupt mask on each CPU according the
|
|
0 commit comments