@@ -607,19 +607,21 @@ void virtqueue_disable_cb(struct virtqueue *_vq)
607
607
EXPORT_SYMBOL_GPL (virtqueue_disable_cb );
608
608
609
609
/**
610
- * virtqueue_enable_cb - restart callbacks after disable_cb.
610
+ * virtqueue_enable_cb_prepare - restart callbacks after disable_cb
611
611
* @vq: the struct virtqueue we're talking about.
612
612
*
613
- * This re-enables callbacks; it returns "false" if there are pending
614
- * buffers in the queue, to detect a possible race between the driver
615
- * checking for more work, and enabling callbacks.
613
+ * This re-enables callbacks; it returns current queue state
614
+ * in an opaque unsigned value. This value should be later tested by
615
+ * virtqueue_poll, to detect a possible race between the driver checking for
616
+ * more work, and enabling callbacks.
616
617
*
617
618
* Caller must ensure we don't call this with other virtqueue
618
619
* operations at the same time (except where noted).
619
620
*/
620
- bool virtqueue_enable_cb (struct virtqueue * _vq )
621
+ unsigned virtqueue_enable_cb_prepare (struct virtqueue * _vq )
621
622
{
622
623
struct vring_virtqueue * vq = to_vvq (_vq );
624
+ u16 last_used_idx ;
623
625
624
626
START_USE (vq );
625
627
@@ -629,15 +631,45 @@ bool virtqueue_enable_cb(struct virtqueue *_vq)
629
631
* either clear the flags bit or point the event index at the next
630
632
* entry. Always do both to keep code simple. */
631
633
vq -> vring .avail -> flags &= ~VRING_AVAIL_F_NO_INTERRUPT ;
632
- vring_used_event (& vq -> vring ) = vq -> last_used_idx ;
634
+ vring_used_event (& vq -> vring ) = last_used_idx = vq -> last_used_idx ;
635
+ END_USE (vq );
636
+ return last_used_idx ;
637
+ }
638
+ EXPORT_SYMBOL_GPL (virtqueue_enable_cb_prepare );
639
+
640
+ /**
641
+ * virtqueue_poll - query pending used buffers
642
+ * @vq: the struct virtqueue we're talking about.
643
+ * @last_used_idx: virtqueue state (from call to virtqueue_enable_cb_prepare).
644
+ *
645
+ * Returns "true" if there are pending used buffers in the queue.
646
+ *
647
+ * This does not need to be serialized.
648
+ */
649
+ bool virtqueue_poll (struct virtqueue * _vq , unsigned last_used_idx )
650
+ {
651
+ struct vring_virtqueue * vq = to_vvq (_vq );
652
+
633
653
virtio_mb (vq -> weak_barriers );
634
- if (unlikely (more_used (vq ))) {
635
- END_USE (vq );
636
- return false;
637
- }
654
+ return (u16 )last_used_idx != vq -> vring .used -> idx ;
655
+ }
656
+ EXPORT_SYMBOL_GPL (virtqueue_poll );
638
657
639
- END_USE (vq );
640
- return true;
658
+ /**
659
+ * virtqueue_enable_cb - restart callbacks after disable_cb.
660
+ * @vq: the struct virtqueue we're talking about.
661
+ *
662
+ * This re-enables callbacks; it returns "false" if there are pending
663
+ * buffers in the queue, to detect a possible race between the driver
664
+ * checking for more work, and enabling callbacks.
665
+ *
666
+ * Caller must ensure we don't call this with other virtqueue
667
+ * operations at the same time (except where noted).
668
+ */
669
+ bool virtqueue_enable_cb (struct virtqueue * _vq )
670
+ {
671
+ unsigned last_used_idx = virtqueue_enable_cb_prepare (_vq );
672
+ return !virtqueue_poll (_vq , last_used_idx );
641
673
}
642
674
EXPORT_SYMBOL_GPL (virtqueue_enable_cb );
643
675
0 commit comments