@@ -37,6 +37,8 @@ MODULE_VERSION(DRV_MODULE_VERSION);
37
37
*/
38
38
#define VNET_MAX_RETRIES 10
39
39
40
+ static int __vnet_tx_trigger (struct vnet_port * port , u32 start );
41
+
40
42
/* Ordered from largest major to lowest */
41
43
static struct vio_version vnet_versions [] = {
42
44
{ .major = 1 , .minor = 0 },
@@ -283,10 +285,18 @@ static int vnet_send_ack(struct vnet_port *port, struct vio_dring_state *dr,
283
285
port -> raddr [0 ], port -> raddr [1 ],
284
286
port -> raddr [2 ], port -> raddr [3 ],
285
287
port -> raddr [4 ], port -> raddr [5 ]);
286
- err = - ECONNRESET ;
288
+ break ;
287
289
}
288
290
} while (err == - EAGAIN );
289
291
292
+ if (err <= 0 && vio_dring_state == VIO_DRING_STOPPED ) {
293
+ port -> stop_rx_idx = end ;
294
+ port -> stop_rx = true;
295
+ } else {
296
+ port -> stop_rx_idx = 0 ;
297
+ port -> stop_rx = false;
298
+ }
299
+
290
300
return err ;
291
301
}
292
302
@@ -448,15 +458,32 @@ static int vnet_ack(struct vnet_port *port, void *msgbuf)
448
458
struct net_device * dev ;
449
459
struct vnet * vp ;
450
460
u32 end ;
451
-
461
+ struct vio_net_desc * desc ;
452
462
if (unlikely (pkt -> tag .stype_env != VIO_DRING_DATA ))
453
463
return 0 ;
454
464
455
465
end = pkt -> end_idx ;
456
466
if (unlikely (!idx_is_pending (dr , end )))
457
467
return 0 ;
458
468
469
+ /* sync for race conditions with vnet_start_xmit() and tell xmit it
470
+ * is time to send a trigger.
471
+ */
459
472
dr -> cons = next_idx (end , dr );
473
+ desc = vio_dring_entry (dr , dr -> cons );
474
+ if (desc -> hdr .state == VIO_DESC_READY && port -> start_cons ) {
475
+ /* vnet_start_xmit() just populated this dring but missed
476
+ * sending the "start" LDC message to the consumer.
477
+ * Send a "start" trigger on its behalf.
478
+ */
479
+ if (__vnet_tx_trigger (port , dr -> cons ) > 0 )
480
+ port -> start_cons = false;
481
+ else
482
+ port -> start_cons = true;
483
+ } else {
484
+ port -> start_cons = true;
485
+ }
486
+
460
487
461
488
vp = port -> vp ;
462
489
dev = vp -> dev ;
@@ -597,7 +624,7 @@ static void vnet_event(void *arg, int event)
597
624
local_irq_restore (flags );
598
625
}
599
626
600
- static int __vnet_tx_trigger (struct vnet_port * port )
627
+ static int __vnet_tx_trigger (struct vnet_port * port , u32 start )
601
628
{
602
629
struct vio_dring_state * dr = & port -> vio .drings [VIO_DRIVER_TX_RING ];
603
630
struct vio_dring_data hdr = {
@@ -608,12 +635,21 @@ static int __vnet_tx_trigger(struct vnet_port *port)
608
635
.sid = vio_send_sid (& port -> vio ),
609
636
},
610
637
.dring_ident = dr -> ident ,
611
- .start_idx = dr -> prod ,
638
+ .start_idx = start ,
612
639
.end_idx = (u32 ) - 1 ,
613
640
};
614
641
int err , delay ;
615
642
int retries = 0 ;
616
643
644
+ if (port -> stop_rx ) {
645
+ err = vnet_send_ack (port ,
646
+ & port -> vio .drings [VIO_DRIVER_RX_RING ],
647
+ port -> stop_rx_idx , -1 ,
648
+ VIO_DRING_STOPPED );
649
+ if (err <= 0 )
650
+ return err ;
651
+ }
652
+
617
653
hdr .seq = dr -> snd_nxt ;
618
654
delay = 1 ;
619
655
do {
@@ -734,14 +770,40 @@ static int vnet_start_xmit(struct sk_buff *skb, struct net_device *dev)
734
770
735
771
d -> hdr .state = VIO_DESC_READY ;
736
772
737
- err = __vnet_tx_trigger (port );
773
+ /* Exactly one ldc "start" trigger (for dr->cons) needs to be sent
774
+ * to notify the consumer that some descriptors are READY.
775
+ * After that "start" trigger, no additional triggers are needed until
776
+ * a DRING_STOPPED is received from the consumer. The dr->cons field
777
+ * (set up by vnet_ack()) has the value of the next dring index
778
+ * that has not yet been ack-ed. We send a "start" trigger here
779
+ * if, and only if, start_cons is true (reset it afterward). Conversely,
780
+ * vnet_ack() should check if the dring corresponding to cons
781
+ * is marked READY, but start_cons was false.
782
+ * If so, vnet_ack() should send out the missed "start" trigger.
783
+ *
784
+ * Note that the wmb() above makes sure the cookies et al. are
785
+ * not globally visible before the VIO_DESC_READY, and that the
786
+ * stores are ordered correctly by the compiler. The consumer will
787
+ * not proceed until the VIO_DESC_READY is visible assuring that
788
+ * the consumer does not observe anything related to descriptors
789
+ * out of order. The HV trap from the LDC start trigger is the
790
+ * producer to consumer announcement that work is available to the
791
+ * consumer
792
+ */
793
+ if (!port -> start_cons )
794
+ goto ldc_start_done ; /* previous trigger suffices */
795
+
796
+ err = __vnet_tx_trigger (port , dr -> cons );
738
797
if (unlikely (err < 0 )) {
739
798
netdev_info (dev , "TX trigger error %d\n" , err );
740
799
d -> hdr .state = VIO_DESC_FREE ;
741
800
dev -> stats .tx_carrier_errors ++ ;
742
801
goto out_dropped_unlock ;
743
802
}
744
803
804
+ ldc_start_done :
805
+ port -> start_cons = false;
806
+
745
807
dev -> stats .tx_packets ++ ;
746
808
dev -> stats .tx_bytes += skb -> len ;
747
809
@@ -1035,6 +1097,7 @@ static int vnet_port_alloc_tx_bufs(struct vnet_port *port)
1035
1097
(sizeof (struct ldc_trans_cookie ) * 2 ));
1036
1098
dr -> num_entries = VNET_TX_RING_SIZE ;
1037
1099
dr -> prod = dr -> cons = 0 ;
1100
+ port -> start_cons = true; /* need an initial trigger */
1038
1101
dr -> pending = VNET_TX_RING_SIZE ;
1039
1102
dr -> ncookies = ncookies ;
1040
1103
0 commit comments