@@ -128,10 +128,10 @@ static int enic_wq_service(struct vnic_dev *vdev, struct cq_desc *cq_desc,
128
128
completed_index , enic_wq_free_buf ,
129
129
opaque );
130
130
131
- if (netif_queue_stopped ( enic -> netdev ) &&
131
+ if (netif_tx_queue_stopped ( netdev_get_tx_queue ( enic -> netdev , q_number ) ) &&
132
132
vnic_wq_desc_avail (& enic -> wq [q_number ]) >=
133
133
(MAX_SKB_FRAGS + ENIC_DESC_MAX_SPLITS ))
134
- netif_wake_queue (enic -> netdev );
134
+ netif_wake_subqueue (enic -> netdev , q_number );
135
135
136
136
spin_unlock (& enic -> wq_lock [q_number ]);
137
137
@@ -292,10 +292,15 @@ static irqreturn_t enic_isr_msix_rq(int irq, void *data)
292
292
static irqreturn_t enic_isr_msix_wq (int irq , void * data )
293
293
{
294
294
struct enic * enic = data ;
295
- unsigned int cq = enic_cq_wq ( enic , 0 ) ;
296
- unsigned int intr = enic_msix_wq_intr ( enic , 0 ) ;
295
+ unsigned int cq ;
296
+ unsigned int intr ;
297
297
unsigned int wq_work_to_do = -1 ; /* no limit */
298
298
unsigned int wq_work_done ;
299
+ unsigned int wq_irq ;
300
+
301
+ wq_irq = (u32 )irq - enic -> msix_entry [enic_msix_wq_intr (enic , 0 )].vector ;
302
+ cq = enic_cq_wq (enic , wq_irq );
303
+ intr = enic_msix_wq_intr (enic , wq_irq );
299
304
300
305
wq_work_done = vnic_cq_service (& enic -> cq [cq ],
301
306
wq_work_to_do , enic_wq_service , NULL );
@@ -511,14 +516,18 @@ static netdev_tx_t enic_hard_start_xmit(struct sk_buff *skb,
511
516
struct net_device * netdev )
512
517
{
513
518
struct enic * enic = netdev_priv (netdev );
514
- struct vnic_wq * wq = & enic -> wq [ 0 ] ;
519
+ struct vnic_wq * wq ;
515
520
unsigned long flags ;
521
+ unsigned int txq_map ;
516
522
517
523
if (skb -> len <= 0 ) {
518
524
dev_kfree_skb (skb );
519
525
return NETDEV_TX_OK ;
520
526
}
521
527
528
+ txq_map = skb_get_queue_mapping (skb ) % enic -> wq_count ;
529
+ wq = & enic -> wq [txq_map ];
530
+
522
531
/* Non-TSO sends must fit within ENIC_NON_TSO_MAX_DESC descs,
523
532
* which is very likely. In the off chance it's going to take
524
533
* more than * ENIC_NON_TSO_MAX_DESC, linearize the skb.
@@ -531,23 +540,23 @@ static netdev_tx_t enic_hard_start_xmit(struct sk_buff *skb,
531
540
return NETDEV_TX_OK ;
532
541
}
533
542
534
- spin_lock_irqsave (& enic -> wq_lock [0 ], flags );
543
+ spin_lock_irqsave (& enic -> wq_lock [txq_map ], flags );
535
544
536
545
if (vnic_wq_desc_avail (wq ) <
537
546
skb_shinfo (skb )-> nr_frags + ENIC_DESC_MAX_SPLITS ) {
538
- netif_stop_queue ( netdev );
547
+ netif_tx_stop_queue ( netdev_get_tx_queue ( netdev , txq_map ) );
539
548
/* This is a hard error, log it */
540
549
netdev_err (netdev , "BUG! Tx ring full when queue awake!\n" );
541
- spin_unlock_irqrestore (& enic -> wq_lock [0 ], flags );
550
+ spin_unlock_irqrestore (& enic -> wq_lock [txq_map ], flags );
542
551
return NETDEV_TX_BUSY ;
543
552
}
544
553
545
554
enic_queue_wq_skb (enic , wq , skb );
546
555
547
556
if (vnic_wq_desc_avail (wq ) < MAX_SKB_FRAGS + ENIC_DESC_MAX_SPLITS )
548
- netif_stop_queue ( netdev );
557
+ netif_tx_stop_queue ( netdev_get_tx_queue ( netdev , txq_map ) );
549
558
550
- spin_unlock_irqrestore (& enic -> wq_lock [0 ], flags );
559
+ spin_unlock_irqrestore (& enic -> wq_lock [txq_map ], flags );
551
560
552
561
return NETDEV_TX_OK ;
553
562
}
@@ -1369,7 +1378,7 @@ static int enic_open(struct net_device *netdev)
1369
1378
1370
1379
enic_set_rx_mode (netdev );
1371
1380
1372
- netif_wake_queue (netdev );
1381
+ netif_tx_wake_all_queues (netdev );
1373
1382
1374
1383
for (i = 0 ; i < enic -> rq_count ; i ++ )
1375
1384
napi_enable (& enic -> napi [i ]);
@@ -2032,7 +2041,8 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2032
2041
* instance data is initialized to zero.
2033
2042
*/
2034
2043
2035
- netdev = alloc_etherdev (sizeof (struct enic ));
2044
+ netdev = alloc_etherdev_mqs (sizeof (struct enic ),
2045
+ ENIC_RQ_MAX , ENIC_WQ_MAX );
2036
2046
if (!netdev )
2037
2047
return - ENOMEM ;
2038
2048
@@ -2198,6 +2208,8 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2198
2208
goto err_out_dev_close ;
2199
2209
}
2200
2210
2211
+ netif_set_real_num_tx_queues (netdev , enic -> wq_count );
2212
+
2201
2213
/* Setup notification timer, HW reset task, and wq locks
2202
2214
*/
2203
2215
0 commit comments