@@ -396,13 +396,10 @@ static inline unsigned long busy_clock(void)
396
396
return local_clock () >> 10 ;
397
397
}
398
398
399
- static bool vhost_can_busy_poll (struct vhost_dev * dev ,
400
- unsigned long endtime )
399
+ static bool vhost_can_busy_poll (unsigned long endtime )
401
400
{
402
- return likely (!need_resched ()) &&
403
- likely (!time_after (busy_clock (), endtime )) &&
404
- likely (!signal_pending (current )) &&
405
- !vhost_has_work (dev );
401
+ return likely (!need_resched () && !time_after (busy_clock (), endtime ) &&
402
+ !signal_pending (current ));
406
403
}
407
404
408
405
static void vhost_net_disable_vq (struct vhost_net * n ,
@@ -434,7 +431,8 @@ static int vhost_net_enable_vq(struct vhost_net *n,
434
431
static int vhost_net_tx_get_vq_desc (struct vhost_net * net ,
435
432
struct vhost_virtqueue * vq ,
436
433
struct iovec iov [], unsigned int iov_size ,
437
- unsigned int * out_num , unsigned int * in_num )
434
+ unsigned int * out_num , unsigned int * in_num ,
435
+ bool * busyloop_intr )
438
436
{
439
437
unsigned long uninitialized_var (endtime );
440
438
int r = vhost_get_vq_desc (vq , vq -> iov , ARRAY_SIZE (vq -> iov ),
@@ -443,9 +441,15 @@ static int vhost_net_tx_get_vq_desc(struct vhost_net *net,
443
441
if (r == vq -> num && vq -> busyloop_timeout ) {
444
442
preempt_disable ();
445
443
endtime = busy_clock () + vq -> busyloop_timeout ;
446
- while (vhost_can_busy_poll (vq -> dev , endtime ) &&
447
- vhost_vq_avail_empty (vq -> dev , vq ))
444
+ while (vhost_can_busy_poll (endtime )) {
445
+ if (vhost_has_work (vq -> dev )) {
446
+ * busyloop_intr = true;
447
+ break ;
448
+ }
449
+ if (!vhost_vq_avail_empty (vq -> dev , vq ))
450
+ break ;
448
451
cpu_relax ();
452
+ }
449
453
preempt_enable ();
450
454
r = vhost_get_vq_desc (vq , vq -> iov , ARRAY_SIZE (vq -> iov ),
451
455
out_num , in_num , NULL , NULL );
@@ -501,20 +505,24 @@ static void handle_tx(struct vhost_net *net)
501
505
zcopy = nvq -> ubufs ;
502
506
503
507
for (;;) {
508
+ bool busyloop_intr ;
509
+
504
510
/* Release DMAs done buffers first */
505
511
if (zcopy )
506
512
vhost_zerocopy_signal_used (net , vq );
507
513
508
-
514
+ busyloop_intr = false;
509
515
head = vhost_net_tx_get_vq_desc (net , vq , vq -> iov ,
510
516
ARRAY_SIZE (vq -> iov ),
511
- & out , & in );
517
+ & out , & in , & busyloop_intr );
512
518
/* On error, stop handling until the next kick. */
513
519
if (unlikely (head < 0 ))
514
520
break ;
515
521
/* Nothing new? Wait for eventfd to tell us they refilled. */
516
522
if (head == vq -> num ) {
517
- if (unlikely (vhost_enable_notify (& net -> dev , vq ))) {
523
+ if (unlikely (busyloop_intr )) {
524
+ vhost_poll_queue (& vq -> poll );
525
+ } else if (unlikely (vhost_enable_notify (& net -> dev , vq ))) {
518
526
vhost_disable_notify (& net -> dev , vq );
519
527
continue ;
520
528
}
@@ -645,41 +653,50 @@ static void vhost_rx_signal_used(struct vhost_net_virtqueue *nvq)
645
653
nvq -> done_idx = 0 ;
646
654
}
647
655
648
- static int vhost_net_rx_peek_head_len (struct vhost_net * net , struct sock * sk )
656
+ static int vhost_net_rx_peek_head_len (struct vhost_net * net , struct sock * sk ,
657
+ bool * busyloop_intr )
649
658
{
650
- struct vhost_net_virtqueue * rvq = & net -> vqs [VHOST_NET_VQ_RX ];
651
- struct vhost_net_virtqueue * nvq = & net -> vqs [VHOST_NET_VQ_TX ];
652
- struct vhost_virtqueue * vq = & nvq -> vq ;
659
+ struct vhost_net_virtqueue * rnvq = & net -> vqs [VHOST_NET_VQ_RX ];
660
+ struct vhost_net_virtqueue * tnvq = & net -> vqs [VHOST_NET_VQ_TX ];
661
+ struct vhost_virtqueue * rvq = & rnvq -> vq ;
662
+ struct vhost_virtqueue * tvq = & tnvq -> vq ;
653
663
unsigned long uninitialized_var (endtime );
654
- int len = peek_head_len (rvq , sk );
664
+ int len = peek_head_len (rnvq , sk );
655
665
656
- if (!len && vq -> busyloop_timeout ) {
666
+ if (!len && tvq -> busyloop_timeout ) {
657
667
/* Flush batched heads first */
658
- vhost_rx_signal_used (rvq );
668
+ vhost_rx_signal_used (rnvq );
659
669
/* Both tx vq and rx socket were polled here */
660
- mutex_lock_nested (& vq -> mutex , 1 );
661
- vhost_disable_notify (& net -> dev , vq );
670
+ mutex_lock_nested (& tvq -> mutex , 1 );
671
+ vhost_disable_notify (& net -> dev , tvq );
662
672
663
673
preempt_disable ();
664
- endtime = busy_clock () + vq -> busyloop_timeout ;
674
+ endtime = busy_clock () + tvq -> busyloop_timeout ;
665
675
666
- while (vhost_can_busy_poll (& net -> dev , endtime ) &&
667
- !sk_has_rx_data (sk ) &&
668
- vhost_vq_avail_empty (& net -> dev , vq ))
676
+ while (vhost_can_busy_poll (endtime )) {
677
+ if (vhost_has_work (& net -> dev )) {
678
+ * busyloop_intr = true;
679
+ break ;
680
+ }
681
+ if ((sk_has_rx_data (sk ) &&
682
+ !vhost_vq_avail_empty (& net -> dev , rvq )) ||
683
+ !vhost_vq_avail_empty (& net -> dev , tvq ))
684
+ break ;
669
685
cpu_relax ();
686
+ }
670
687
671
688
preempt_enable ();
672
689
673
- if (!vhost_vq_avail_empty (& net -> dev , vq ))
674
- vhost_poll_queue (& vq -> poll );
675
- else if (unlikely (vhost_enable_notify (& net -> dev , vq ))) {
676
- vhost_disable_notify (& net -> dev , vq );
677
- vhost_poll_queue (& vq -> poll );
690
+ if (!vhost_vq_avail_empty (& net -> dev , tvq )) {
691
+ vhost_poll_queue (& tvq -> poll );
692
+ } else if (unlikely (vhost_enable_notify (& net -> dev , tvq ))) {
693
+ vhost_disable_notify (& net -> dev , tvq );
694
+ vhost_poll_queue (& tvq -> poll );
678
695
}
679
696
680
- mutex_unlock (& vq -> mutex );
697
+ mutex_unlock (& tvq -> mutex );
681
698
682
- len = peek_head_len (rvq , sk );
699
+ len = peek_head_len (rnvq , sk );
683
700
}
684
701
685
702
return len ;
@@ -786,6 +803,7 @@ static void handle_rx(struct vhost_net *net)
786
803
s16 headcount ;
787
804
size_t vhost_hlen , sock_hlen ;
788
805
size_t vhost_len , sock_len ;
806
+ bool busyloop_intr = false;
789
807
struct socket * sock ;
790
808
struct iov_iter fixup ;
791
809
__virtio16 num_buffers ;
@@ -809,7 +827,8 @@ static void handle_rx(struct vhost_net *net)
809
827
vq -> log : NULL ;
810
828
mergeable = vhost_has_feature (vq , VIRTIO_NET_F_MRG_RXBUF );
811
829
812
- while ((sock_len = vhost_net_rx_peek_head_len (net , sock -> sk ))) {
830
+ while ((sock_len = vhost_net_rx_peek_head_len (net , sock -> sk ,
831
+ & busyloop_intr ))) {
813
832
sock_len += sock_hlen ;
814
833
vhost_len = sock_len + vhost_hlen ;
815
834
headcount = get_rx_bufs (vq , vq -> heads + nvq -> done_idx ,
@@ -820,7 +839,9 @@ static void handle_rx(struct vhost_net *net)
820
839
goto out ;
821
840
/* OK, now we need to know about added descriptors. */
822
841
if (!headcount ) {
823
- if (unlikely (vhost_enable_notify (& net -> dev , vq ))) {
842
+ if (unlikely (busyloop_intr )) {
843
+ vhost_poll_queue (& vq -> poll );
844
+ } else if (unlikely (vhost_enable_notify (& net -> dev , vq ))) {
824
845
/* They have slipped one in as we were
825
846
* doing that: check again. */
826
847
vhost_disable_notify (& net -> dev , vq );
@@ -830,6 +851,7 @@ static void handle_rx(struct vhost_net *net)
830
851
* they refilled. */
831
852
goto out ;
832
853
}
854
+ busyloop_intr = false;
833
855
if (nvq -> rx_ring )
834
856
msg .msg_control = vhost_net_buf_consume (& nvq -> rxq );
835
857
/* On overrun, truncate and discard */
@@ -896,7 +918,10 @@ static void handle_rx(struct vhost_net *net)
896
918
goto out ;
897
919
}
898
920
}
899
- vhost_net_enable_vq (net , vq );
921
+ if (unlikely (busyloop_intr ))
922
+ vhost_poll_queue (& vq -> poll );
923
+ else
924
+ vhost_net_enable_vq (net , vq );
900
925
out :
901
926
vhost_rx_signal_used (nvq );
902
927
mutex_unlock (& vq -> mutex );
0 commit comments