@@ -138,6 +138,64 @@ static void stmmac_verify_args(void)
138
138
eee_timer = STMMAC_DEFAULT_LPI_TIMER ;
139
139
}
140
140
141
+ /**
142
+ * stmmac_disable_all_queues - Disable all queues
143
+ * @priv: driver private structure
144
+ */
145
+ static void stmmac_disable_all_queues (struct stmmac_priv * priv )
146
+ {
147
+ u32 rx_queues_cnt = priv -> plat -> rx_queues_to_use ;
148
+ u32 queue ;
149
+
150
+ for (queue = 0 ; queue < rx_queues_cnt ; queue ++ ) {
151
+ struct stmmac_rx_queue * rx_q = & priv -> rx_queue [queue ];
152
+
153
+ napi_disable (& rx_q -> napi );
154
+ }
155
+ }
156
+
157
+ /**
158
+ * stmmac_enable_all_queues - Enable all queues
159
+ * @priv: driver private structure
160
+ */
161
+ static void stmmac_enable_all_queues (struct stmmac_priv * priv )
162
+ {
163
+ u32 rx_queues_cnt = priv -> plat -> rx_queues_to_use ;
164
+ u32 queue ;
165
+
166
+ for (queue = 0 ; queue < rx_queues_cnt ; queue ++ ) {
167
+ struct stmmac_rx_queue * rx_q = & priv -> rx_queue [queue ];
168
+
169
+ napi_enable (& rx_q -> napi );
170
+ }
171
+ }
172
+
173
+ /**
174
+ * stmmac_stop_all_queues - Stop all queues
175
+ * @priv: driver private structure
176
+ */
177
+ static void stmmac_stop_all_queues (struct stmmac_priv * priv )
178
+ {
179
+ u32 tx_queues_cnt = priv -> plat -> tx_queues_to_use ;
180
+ u32 queue ;
181
+
182
+ for (queue = 0 ; queue < tx_queues_cnt ; queue ++ )
183
+ netif_tx_stop_queue (netdev_get_tx_queue (priv -> dev , queue ));
184
+ }
185
+
186
+ /**
187
+ * stmmac_start_all_queues - Start all queues
188
+ * @priv: driver private structure
189
+ */
190
+ static void stmmac_start_all_queues (struct stmmac_priv * priv )
191
+ {
192
+ u32 tx_queues_cnt = priv -> plat -> tx_queues_to_use ;
193
+ u32 queue ;
194
+
195
+ for (queue = 0 ; queue < tx_queues_cnt ; queue ++ )
196
+ netif_tx_start_queue (netdev_get_tx_queue (priv -> dev , queue ));
197
+ }
198
+
141
199
/**
142
200
* stmmac_clk_csr_set - dynamically set the MDC clock
143
201
* @priv: driver private structure
@@ -1262,7 +1320,6 @@ static int init_dma_tx_desc_rings(struct net_device *dev)
1262
1320
1263
1321
for (i = 0 ; i < DMA_TX_SIZE ; i ++ ) {
1264
1322
struct dma_desc * p ;
1265
-
1266
1323
if (priv -> extend_desc )
1267
1324
p = & ((tx_q -> dma_etx + i )-> basic );
1268
1325
else
@@ -1286,9 +1343,9 @@ static int init_dma_tx_desc_rings(struct net_device *dev)
1286
1343
1287
1344
tx_q -> dirty_tx = 0 ;
1288
1345
tx_q -> cur_tx = 0 ;
1289
- }
1290
1346
1291
- netdev_reset_queue (priv -> dev );
1347
+ netdev_tx_reset_queue (netdev_get_tx_queue (priv -> dev , queue ));
1348
+ }
1292
1349
1293
1350
return 0 ;
1294
1351
}
@@ -1805,13 +1862,16 @@ static void stmmac_tx_clean(struct stmmac_priv *priv, u32 queue)
1805
1862
}
1806
1863
tx_q -> dirty_tx = entry ;
1807
1864
1808
- netdev_completed_queue (priv -> dev , pkts_compl , bytes_compl );
1865
+ netdev_tx_completed_queue (netdev_get_tx_queue (priv -> dev , queue ),
1866
+ pkts_compl , bytes_compl );
1867
+
1868
+ if (unlikely (netif_tx_queue_stopped (netdev_get_tx_queue (priv -> dev ,
1869
+ queue ))) &&
1870
+ stmmac_tx_avail (priv , queue ) > STMMAC_TX_THRESH ) {
1809
1871
1810
- if (unlikely (netif_queue_stopped (priv -> dev ) &&
1811
- stmmac_tx_avail (priv , queue ) > STMMAC_TX_THRESH )) {
1812
1872
netif_dbg (priv , tx_done , priv -> dev ,
1813
1873
"%s: restart transmit\n" , __func__ );
1814
- netif_wake_queue ( priv -> dev );
1874
+ netif_tx_wake_queue ( netdev_get_tx_queue ( priv -> dev , queue ) );
1815
1875
}
1816
1876
1817
1877
if ((priv -> eee_enabled ) && (!priv -> tx_path_in_lpi_mode )) {
@@ -1843,7 +1903,7 @@ static void stmmac_tx_err(struct stmmac_priv *priv, u32 chan)
1843
1903
struct stmmac_tx_queue * tx_q = & priv -> tx_queue [chan ];
1844
1904
int i ;
1845
1905
1846
- netif_stop_queue ( priv -> dev );
1906
+ netif_tx_stop_queue ( netdev_get_tx_queue ( priv -> dev , chan ) );
1847
1907
1848
1908
stmmac_stop_tx_dma (priv , chan );
1849
1909
dma_free_tx_skbufs (priv , chan );
@@ -1858,11 +1918,11 @@ static void stmmac_tx_err(struct stmmac_priv *priv, u32 chan)
1858
1918
(i == DMA_TX_SIZE - 1 ));
1859
1919
tx_q -> dirty_tx = 0 ;
1860
1920
tx_q -> cur_tx = 0 ;
1861
- netdev_reset_queue ( priv -> dev );
1921
+ netdev_tx_reset_queue ( netdev_get_tx_queue ( priv -> dev , chan ) );
1862
1922
stmmac_start_tx_dma (priv , chan );
1863
1923
1864
1924
priv -> dev -> stats .tx_errors ++ ;
1865
- netif_wake_queue ( priv -> dev );
1925
+ netif_tx_wake_queue ( netdev_get_tx_queue ( priv -> dev , chan ) );
1866
1926
}
1867
1927
1868
1928
/**
@@ -1907,12 +1967,14 @@ static void stmmac_dma_interrupt(struct stmmac_priv *priv)
1907
1967
u32 chan ;
1908
1968
1909
1969
for (chan = 0 ; chan < tx_channel_count ; chan ++ ) {
1970
+ struct stmmac_rx_queue * rx_q = & priv -> rx_queue [chan ];
1971
+
1910
1972
status = priv -> hw -> dma -> dma_interrupt (priv -> ioaddr ,
1911
1973
& priv -> xstats , chan );
1912
1974
if (likely ((status & handle_rx )) || (status & handle_tx )) {
1913
- if (likely (napi_schedule_prep (& priv -> napi ))) {
1975
+ if (likely (napi_schedule_prep (& rx_q -> napi ))) {
1914
1976
stmmac_disable_dma_irq (priv , chan );
1915
- __napi_schedule (& priv -> napi );
1977
+ __napi_schedule (& rx_q -> napi );
1916
1978
}
1917
1979
}
1918
1980
@@ -2554,8 +2616,8 @@ static int stmmac_open(struct net_device *dev)
2554
2616
}
2555
2617
}
2556
2618
2557
- napi_enable ( & priv -> napi );
2558
- netif_start_queue ( dev );
2619
+ stmmac_enable_all_queues ( priv );
2620
+ stmmac_start_all_queues ( priv );
2559
2621
2560
2622
return 0 ;
2561
2623
@@ -2598,9 +2660,9 @@ static int stmmac_release(struct net_device *dev)
2598
2660
phy_disconnect (dev -> phydev );
2599
2661
}
2600
2662
2601
- netif_stop_queue ( dev );
2663
+ stmmac_stop_all_queues ( priv );
2602
2664
2603
- napi_disable ( & priv -> napi );
2665
+ stmmac_disable_all_queues ( priv );
2604
2666
2605
2667
del_timer_sync (& priv -> txtimer );
2606
2668
@@ -2717,8 +2779,9 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
2717
2779
/* Desc availability based on threshold should be enough safe */
2718
2780
if (unlikely (stmmac_tx_avail (priv , queue ) <
2719
2781
(((skb -> len - proto_hdr_len ) / TSO_MAX_BUFF_SIZE + 1 )))) {
2720
- if (!netif_queue_stopped (dev )) {
2721
- netif_stop_queue (dev );
2782
+ if (!netif_tx_queue_stopped (netdev_get_tx_queue (dev , queue ))) {
2783
+ netif_tx_stop_queue (netdev_get_tx_queue (priv -> dev ,
2784
+ queue ));
2722
2785
/* This is a hard error, log it. */
2723
2786
netdev_err (priv -> dev ,
2724
2787
"%s: Tx Ring full when queue awake\n" ,
@@ -2798,7 +2861,7 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
2798
2861
if (unlikely (stmmac_tx_avail (priv , queue ) <= (MAX_SKB_FRAGS + 1 ))) {
2799
2862
netif_dbg (priv , hw , priv -> dev , "%s: stop transmitted packets\n" ,
2800
2863
__func__ );
2801
- netif_stop_queue ( dev );
2864
+ netif_tx_stop_queue ( netdev_get_tx_queue ( priv -> dev , queue ) );
2802
2865
}
2803
2866
2804
2867
dev -> stats .tx_bytes += skb -> len ;
@@ -2855,7 +2918,7 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
2855
2918
print_pkt (skb -> data , skb_headlen (skb ));
2856
2919
}
2857
2920
2858
- netdev_sent_queue ( dev , skb -> len );
2921
+ netdev_tx_sent_queue ( netdev_get_tx_queue ( dev , queue ) , skb -> len );
2859
2922
2860
2923
priv -> hw -> dma -> set_tx_tail_ptr (priv -> ioaddr , tx_q -> tx_tail_addr ,
2861
2924
queue );
@@ -2899,8 +2962,9 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
2899
2962
}
2900
2963
2901
2964
if (unlikely (stmmac_tx_avail (priv , queue ) < nfrags + 1 )) {
2902
- if (!netif_queue_stopped (dev )) {
2903
- netif_stop_queue (dev );
2965
+ if (!netif_tx_queue_stopped (netdev_get_tx_queue (dev , queue ))) {
2966
+ netif_tx_stop_queue (netdev_get_tx_queue (priv -> dev ,
2967
+ queue ));
2904
2968
/* This is a hard error, log it. */
2905
2969
netdev_err (priv -> dev ,
2906
2970
"%s: Tx Ring full when queue awake\n" ,
@@ -2998,7 +3062,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
2998
3062
if (unlikely (stmmac_tx_avail (priv , queue ) <= (MAX_SKB_FRAGS + 1 ))) {
2999
3063
netif_dbg (priv , hw , priv -> dev , "%s: stop transmitted packets\n" ,
3000
3064
__func__ );
3001
- netif_stop_queue ( dev );
3065
+ netif_tx_stop_queue ( netdev_get_tx_queue ( priv -> dev , queue ) );
3002
3066
}
3003
3067
3004
3068
dev -> stats .tx_bytes += skb -> len ;
@@ -3061,7 +3125,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
3061
3125
dma_wmb ();
3062
3126
}
3063
3127
3064
- netdev_sent_queue ( dev , skb -> len );
3128
+ netdev_tx_sent_queue ( netdev_get_tx_queue ( dev , queue ) , skb -> len );
3065
3129
3066
3130
if (priv -> synopsys_id < DWMAC_CORE_4_00 )
3067
3131
priv -> hw -> dma -> enable_dma_transmission (priv -> ioaddr );
@@ -3361,7 +3425,7 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
3361
3425
else
3362
3426
skb -> ip_summed = CHECKSUM_UNNECESSARY ;
3363
3427
3364
- napi_gro_receive (& priv -> napi , skb );
3428
+ napi_gro_receive (& rx_q -> napi , skb );
3365
3429
3366
3430
priv -> dev -> stats .rx_packets ++ ;
3367
3431
priv -> dev -> stats .rx_bytes += frame_len ;
@@ -3386,21 +3450,21 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
3386
3450
*/
3387
3451
static int stmmac_poll (struct napi_struct * napi , int budget )
3388
3452
{
3389
- struct stmmac_priv * priv = container_of (napi , struct stmmac_priv , napi );
3453
+ struct stmmac_rx_queue * rx_q =
3454
+ container_of (napi , struct stmmac_rx_queue , napi );
3455
+ struct stmmac_priv * priv = rx_q -> priv_data ;
3390
3456
u32 tx_count = priv -> plat -> tx_queues_to_use ;
3391
- u32 chan = STMMAC_CHAN0 ;
3457
+ u32 chan = rx_q -> queue_index ;
3392
3458
int work_done = 0 ;
3393
- u32 queue = chan ;
3459
+ u32 queue ;
3394
3460
3395
3461
priv -> xstats .napi_poll ++ ;
3396
3462
3397
3463
/* check all the queues */
3398
3464
for (queue = 0 ; queue < tx_count ; queue ++ )
3399
3465
stmmac_tx_clean (priv , queue );
3400
3466
3401
- queue = chan ;
3402
-
3403
- work_done = stmmac_rx (priv , budget , queue );
3467
+ work_done = stmmac_rx (priv , budget , rx_q -> queue_index );
3404
3468
if (work_done < budget ) {
3405
3469
napi_complete_done (napi , work_done );
3406
3470
stmmac_enable_dma_irq (priv , chan );
@@ -3989,11 +4053,14 @@ int stmmac_dvr_probe(struct device *device,
3989
4053
struct plat_stmmacenet_data * plat_dat ,
3990
4054
struct stmmac_resources * res )
3991
4055
{
3992
- int ret = 0 ;
3993
4056
struct net_device * ndev = NULL ;
3994
4057
struct stmmac_priv * priv ;
4058
+ int ret = 0 ;
4059
+ u32 queue ;
3995
4060
3996
- ndev = alloc_etherdev (sizeof (struct stmmac_priv ));
4061
+ ndev = alloc_etherdev_mqs (sizeof (struct stmmac_priv ),
4062
+ MTL_MAX_TX_QUEUES ,
4063
+ MTL_MAX_RX_QUEUES );
3997
4064
if (!ndev )
3998
4065
return - ENOMEM ;
3999
4066
@@ -4035,6 +4102,10 @@ int stmmac_dvr_probe(struct device *device,
4035
4102
if (ret )
4036
4103
goto error_hw_init ;
4037
4104
4105
+ /* Configure real RX and TX queues */
4106
+ ndev -> real_num_rx_queues = priv -> plat -> rx_queues_to_use ;
4107
+ ndev -> real_num_tx_queues = priv -> plat -> tx_queues_to_use ;
4108
+
4038
4109
ndev -> netdev_ops = & stmmac_netdev_ops ;
4039
4110
4040
4111
ndev -> hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
@@ -4084,7 +4155,12 @@ int stmmac_dvr_probe(struct device *device,
4084
4155
"Enable RX Mitigation via HW Watchdog Timer\n" );
4085
4156
}
4086
4157
4087
- netif_napi_add (ndev , & priv -> napi , stmmac_poll , 64 );
4158
+ for (queue = 0 ; queue < priv -> plat -> rx_queues_to_use ; queue ++ ) {
4159
+ struct stmmac_rx_queue * rx_q = & priv -> rx_queue [queue ];
4160
+
4161
+ netif_napi_add (ndev , & rx_q -> napi , stmmac_poll ,
4162
+ (8 * priv -> plat -> rx_queues_to_use ));
4163
+ }
4088
4164
4089
4165
spin_lock_init (& priv -> lock );
4090
4166
@@ -4129,7 +4205,11 @@ int stmmac_dvr_probe(struct device *device,
4129
4205
priv -> hw -> pcs != STMMAC_PCS_RTBI )
4130
4206
stmmac_mdio_unregister (ndev );
4131
4207
error_mdio_register :
4132
- netif_napi_del (& priv -> napi );
4208
+ for (queue = 0 ; queue < priv -> plat -> rx_queues_to_use ; queue ++ ) {
4209
+ struct stmmac_rx_queue * rx_q = & priv -> rx_queue [queue ];
4210
+
4211
+ netif_napi_del (& rx_q -> napi );
4212
+ }
4133
4213
error_hw_init :
4134
4214
free_netdev (ndev );
4135
4215
@@ -4191,9 +4271,9 @@ int stmmac_suspend(struct device *dev)
4191
4271
spin_lock_irqsave (& priv -> lock , flags );
4192
4272
4193
4273
netif_device_detach (ndev );
4194
- netif_stop_queue ( ndev );
4274
+ stmmac_stop_all_queues ( priv );
4195
4275
4196
- napi_disable ( & priv -> napi );
4276
+ stmmac_disable_all_queues ( priv );
4197
4277
4198
4278
/* Stop TX/RX DMA */
4199
4279
stmmac_stop_all_dma (priv );
@@ -4296,9 +4376,9 @@ int stmmac_resume(struct device *dev)
4296
4376
stmmac_init_tx_coalesce (priv );
4297
4377
stmmac_set_rx_mode (ndev );
4298
4378
4299
- napi_enable ( & priv -> napi );
4379
+ stmmac_enable_all_queues ( priv );
4300
4380
4301
- netif_start_queue ( ndev );
4381
+ stmmac_start_all_queues ( priv );
4302
4382
4303
4383
spin_unlock_irqrestore (& priv -> lock , flags );
4304
4384
0 commit comments