@@ -40,6 +40,8 @@ MODULE_DESCRIPTION("Sun LDOM virtual network driver");
40
40
MODULE_LICENSE ("GPL" );
41
41
MODULE_VERSION (DRV_MODULE_VERSION );
42
42
43
+ #define VNET_MAX_TXQS 16
44
+
43
45
/* Heuristic for the number of times to exponentially backoff and
44
46
* retry sending an LDC trigger when EAGAIN is encountered
45
47
*/
@@ -551,6 +553,8 @@ static int vnet_ack(struct vnet_port *port, void *msgbuf)
551
553
struct vnet * vp ;
552
554
u32 end ;
553
555
struct vio_net_desc * desc ;
556
+ struct netdev_queue * txq ;
557
+
554
558
if (unlikely (pkt -> tag .stype_env != VIO_DRING_DATA ))
555
559
return 0 ;
556
560
@@ -580,7 +584,8 @@ static int vnet_ack(struct vnet_port *port, void *msgbuf)
580
584
}
581
585
netif_tx_unlock (dev );
582
586
583
- if (unlikely (netif_queue_stopped (dev ) &&
587
+ txq = netdev_get_tx_queue (dev , port -> q_index );
588
+ if (unlikely (netif_tx_queue_stopped (txq ) &&
584
589
vnet_tx_dring_avail (dr ) >= VNET_TX_WAKEUP_THRESH (dr )))
585
590
return 1 ;
586
591
@@ -608,31 +613,23 @@ static int handle_mcast(struct vnet_port *port, void *msgbuf)
608
613
return 0 ;
609
614
}
610
615
611
- static void maybe_tx_wakeup (struct vnet * vp )
616
+ /* Got back a STOPPED LDC message on port. If the queue is stopped,
617
+ * wake it up so that we'll send out another START message at the
618
+ * next TX.
619
+ */
620
+ static void maybe_tx_wakeup (struct vnet_port * port )
612
621
{
613
- struct net_device * dev = vp -> dev ;
622
+ struct netdev_queue * txq ;
614
623
615
- netif_tx_lock (dev );
616
- if (likely (netif_queue_stopped (dev ))) {
617
- struct vnet_port * port ;
618
- int wake = 1 ;
619
-
620
- rcu_read_lock ();
621
- list_for_each_entry_rcu (port , & vp -> port_list , list ) {
622
- struct vio_dring_state * dr ;
623
-
624
- dr = & port -> vio .drings [VIO_DRIVER_TX_RING ];
625
- if (vnet_tx_dring_avail (dr ) <
626
- VNET_TX_WAKEUP_THRESH (dr )) {
627
- wake = 0 ;
628
- break ;
629
- }
630
- }
631
- rcu_read_unlock ();
632
- if (wake )
633
- netif_wake_queue (dev );
624
+ txq = netdev_get_tx_queue (port -> vp -> dev , port -> q_index );
625
+ __netif_tx_lock (txq , smp_processor_id ());
626
+ if (likely (netif_tx_queue_stopped (txq ))) {
627
+ struct vio_dring_state * dr ;
628
+
629
+ dr = & port -> vio .drings [VIO_DRIVER_TX_RING ];
630
+ netif_tx_wake_queue (txq );
634
631
}
635
- netif_tx_unlock ( dev );
632
+ __netif_tx_unlock ( txq );
636
633
}
637
634
638
635
static inline bool port_is_up (struct vnet_port * vnet )
@@ -748,7 +745,7 @@ static int vnet_event_napi(struct vnet_port *port, int budget)
748
745
break ;
749
746
}
750
747
if (unlikely (tx_wakeup && err != - ECONNRESET ))
751
- maybe_tx_wakeup (port -> vp );
748
+ maybe_tx_wakeup (port );
752
749
return npkts ;
753
750
}
754
751
@@ -760,6 +757,7 @@ static int vnet_poll(struct napi_struct *napi, int budget)
760
757
761
758
if (processed < budget ) {
762
759
napi_complete (napi );
760
+ port -> rx_event &= ~LDC_EVENT_DATA_READY ;
763
761
vio_set_intr (vio -> vdev -> rx_ino , HV_INTR_ENABLED );
764
762
}
765
763
return processed ;
@@ -952,6 +950,16 @@ static inline struct sk_buff *vnet_skb_shape(struct sk_buff *skb, void **pstart,
952
950
return skb ;
953
951
}
954
952
953
+ static u16
954
+ vnet_select_queue (struct net_device * dev , struct sk_buff * skb ,
955
+ void * accel_priv , select_queue_fallback_t fallback )
956
+ {
957
+ struct vnet * vp = netdev_priv (dev );
958
+ struct vnet_port * port = __tx_port_find (vp , skb );
959
+
960
+ return port -> q_index ;
961
+ }
962
+
955
963
static int vnet_start_xmit (struct sk_buff * skb , struct net_device * dev )
956
964
{
957
965
struct vnet * vp = netdev_priv (dev );
@@ -964,6 +972,7 @@ static int vnet_start_xmit(struct sk_buff *skb, struct net_device *dev)
964
972
void * start = NULL ;
965
973
int nlen = 0 ;
966
974
unsigned pending = 0 ;
975
+ struct netdev_queue * txq ;
967
976
968
977
skb = vnet_skb_shape (skb , & start , & nlen );
969
978
if (unlikely (!skb ))
@@ -1007,9 +1016,11 @@ static int vnet_start_xmit(struct sk_buff *skb, struct net_device *dev)
1007
1016
}
1008
1017
1009
1018
dr = & port -> vio .drings [VIO_DRIVER_TX_RING ];
1019
+ i = skb_get_queue_mapping (skb );
1020
+ txq = netdev_get_tx_queue (dev , i );
1010
1021
if (unlikely (vnet_tx_dring_avail (dr ) < 1 )) {
1011
- if (!netif_queue_stopped ( dev )) {
1012
- netif_stop_queue ( dev );
1022
+ if (!netif_tx_queue_stopped ( txq )) {
1023
+ netif_tx_stop_queue ( txq );
1013
1024
1014
1025
/* This is a hard error, log it. */
1015
1026
netdev_err (dev , "BUG! Tx Ring full when queue awake!\n" );
@@ -1103,9 +1114,9 @@ static int vnet_start_xmit(struct sk_buff *skb, struct net_device *dev)
1103
1114
1104
1115
dr -> prod = (dr -> prod + 1 ) & (VNET_TX_RING_SIZE - 1 );
1105
1116
if (unlikely (vnet_tx_dring_avail (dr ) < 1 )) {
1106
- netif_stop_queue ( dev );
1117
+ netif_tx_stop_queue ( txq );
1107
1118
if (vnet_tx_dring_avail (dr ) > VNET_TX_WAKEUP_THRESH (dr ))
1108
- netif_wake_queue ( dev );
1119
+ netif_tx_wake_queue ( txq );
1109
1120
}
1110
1121
1111
1122
(void )mod_timer (& port -> clean_timer , jiffies + VNET_CLEAN_TIMEOUT );
@@ -1138,14 +1149,14 @@ static void vnet_tx_timeout(struct net_device *dev)
1138
1149
static int vnet_open (struct net_device * dev )
1139
1150
{
1140
1151
netif_carrier_on (dev );
1141
- netif_start_queue (dev );
1152
+ netif_tx_start_all_queues (dev );
1142
1153
1143
1154
return 0 ;
1144
1155
}
1145
1156
1146
1157
static int vnet_close (struct net_device * dev )
1147
1158
{
1148
- netif_stop_queue (dev );
1159
+ netif_tx_stop_all_queues (dev );
1149
1160
netif_carrier_off (dev );
1150
1161
1151
1162
return 0 ;
@@ -1419,6 +1430,7 @@ static const struct net_device_ops vnet_ops = {
1419
1430
.ndo_tx_timeout = vnet_tx_timeout ,
1420
1431
.ndo_change_mtu = vnet_change_mtu ,
1421
1432
.ndo_start_xmit = vnet_start_xmit ,
1433
+ .ndo_select_queue = vnet_select_queue ,
1422
1434
#ifdef CONFIG_NET_POLL_CONTROLLER
1423
1435
.ndo_poll_controller = vnet_poll_controller ,
1424
1436
#endif
@@ -1430,7 +1442,7 @@ static struct vnet *vnet_new(const u64 *local_mac)
1430
1442
struct vnet * vp ;
1431
1443
int err , i ;
1432
1444
1433
- dev = alloc_etherdev (sizeof (* vp ));
1445
+ dev = alloc_etherdev_mqs (sizeof (* vp ), VNET_MAX_TXQS , 1 );
1434
1446
if (!dev )
1435
1447
return ERR_PTR (- ENOMEM );
1436
1448
dev -> needed_headroom = VNET_PACKET_SKIP + 8 ;
@@ -1555,6 +1567,25 @@ static void print_version(void)
1555
1567
1556
1568
const char * remote_macaddr_prop = "remote-mac-address" ;
1557
1569
1570
+ static void
1571
+ vnet_port_add_txq (struct vnet_port * port )
1572
+ {
1573
+ struct vnet * vp = port -> vp ;
1574
+ int n ;
1575
+
1576
+ n = vp -> nports ++ ;
1577
+ n = n & (VNET_MAX_TXQS - 1 );
1578
+ port -> q_index = n ;
1579
+ netif_tx_wake_queue (netdev_get_tx_queue (vp -> dev , port -> q_index ));
1580
+ }
1581
+
1582
+ static void
1583
+ vnet_port_rm_txq (struct vnet_port * port )
1584
+ {
1585
+ port -> vp -> nports -- ;
1586
+ netif_tx_stop_queue (netdev_get_tx_queue (port -> vp -> dev , port -> q_index ));
1587
+ }
1588
+
1558
1589
static int vnet_port_probe (struct vio_dev * vdev , const struct vio_device_id * id )
1559
1590
{
1560
1591
struct mdesc_handle * hp ;
@@ -1623,6 +1654,7 @@ static int vnet_port_probe(struct vio_dev *vdev, const struct vio_device_id *id)
1623
1654
list_add_tail_rcu (& port -> list , & vp -> port_list );
1624
1655
hlist_add_head_rcu (& port -> hash ,
1625
1656
& vp -> port_hash [vnet_hashfn (port -> raddr )]);
1657
+ vnet_port_add_txq (port );
1626
1658
spin_unlock_irqrestore (& vp -> lock , flags );
1627
1659
1628
1660
dev_set_drvdata (& vdev -> dev , port );
@@ -1667,6 +1699,7 @@ static int vnet_port_remove(struct vio_dev *vdev)
1667
1699
1668
1700
synchronize_rcu ();
1669
1701
del_timer_sync (& port -> clean_timer );
1702
+ vnet_port_rm_txq (port );
1670
1703
netif_napi_del (& port -> napi );
1671
1704
vnet_port_free_tx_bufs (port );
1672
1705
vio_ldc_free (& port -> vio );
0 commit comments