@@ -623,26 +623,6 @@ void bfq_pos_tree_add_move(struct bfq_data *bfqd, struct bfq_queue *bfqq)
623
623
bfqq -> pos_root = NULL ;
624
624
}
625
625
626
- /*
627
- * Tell whether there are active queues with different weights or
628
- * active groups.
629
- */
630
- static bool bfq_varied_queue_weights_or_active_groups (struct bfq_data * bfqd )
631
- {
632
- /*
633
- * For queue weights to differ, queue_weights_tree must contain
634
- * at least two nodes.
635
- */
636
- return (!RB_EMPTY_ROOT (& bfqd -> queue_weights_tree ) &&
637
- (bfqd -> queue_weights_tree .rb_node -> rb_left ||
638
- bfqd -> queue_weights_tree .rb_node -> rb_right )
639
- #ifdef CONFIG_BFQ_GROUP_IOSCHED
640
- ) ||
641
- (bfqd -> num_groups_with_pending_reqs > 0
642
- #endif
643
- );
644
- }
645
-
646
626
/*
647
627
* The following function returns true if every queue must receive the
648
628
* same share of the throughput (this condition is used when deciding
@@ -651,25 +631,48 @@ static bool bfq_varied_queue_weights_or_active_groups(struct bfq_data *bfqd)
651
631
*
652
632
* Such a scenario occurs when:
653
633
* 1) all active queues have the same weight,
654
- * 2) all active groups at the same level in the groups tree have the same
655
- * weight,
634
+ * 2) all active queues belong to the same I/O-priority class,
656
635
* 3) all active groups at the same level in the groups tree have the same
636
+ * weight,
637
+ * 4) all active groups at the same level in the groups tree have the same
657
638
* number of children.
658
639
*
659
640
* Unfortunately, keeping the necessary state for evaluating exactly
660
641
* the last two symmetry sub-conditions above would be quite complex
661
- * and time consuming. Therefore this function evaluates, instead,
662
- * only the following stronger two sub-conditions, for which it is
642
+ * and time consuming. Therefore this function evaluates, instead,
643
+ * only the following stronger three sub-conditions, for which it is
663
644
* much easier to maintain the needed state:
664
645
* 1) all active queues have the same weight,
665
- * 2) there are no active groups.
646
+ * 2) all active queues belong to the same I/O-priority class,
647
+ * 3) there are no active groups.
666
648
* In particular, the last condition is always true if hierarchical
667
649
* support or the cgroups interface are not enabled, thus no state
668
650
* needs to be maintained in this case.
669
651
*/
670
652
static bool bfq_symmetric_scenario (struct bfq_data * bfqd )
671
653
{
672
- return !bfq_varied_queue_weights_or_active_groups (bfqd );
654
+ /*
655
+ * For queue weights to differ, queue_weights_tree must contain
656
+ * at least two nodes.
657
+ */
658
+ bool varied_queue_weights = !RB_EMPTY_ROOT (& bfqd -> queue_weights_tree ) &&
659
+ (bfqd -> queue_weights_tree .rb_node -> rb_left ||
660
+ bfqd -> queue_weights_tree .rb_node -> rb_right );
661
+
662
+ bool multiple_classes_busy =
663
+ (bfqd -> busy_queues [0 ] && bfqd -> busy_queues [1 ]) ||
664
+ (bfqd -> busy_queues [0 ] && bfqd -> busy_queues [2 ]) ||
665
+ (bfqd -> busy_queues [1 ] && bfqd -> busy_queues [2 ]);
666
+
667
+ /*
668
+ * For queue weights to differ, queue_weights_tree must contain
669
+ * at least two nodes.
670
+ */
671
+ return !(varied_queue_weights || multiple_classes_busy
672
+ #ifdef BFQ_GROUP_IOSCHED_ENABLED
673
+ || bfqd -> num_groups_with_pending_reqs > 0
674
+ #endif
675
+ );
673
676
}
674
677
675
678
/*
@@ -728,15 +731,14 @@ void bfq_weights_tree_add(struct bfq_data *bfqd, struct bfq_queue *bfqq,
728
731
/*
729
732
* In the unlucky event of an allocation failure, we just
730
733
* exit. This will cause the weight of queue to not be
731
- * considered in bfq_varied_queue_weights_or_active_groups,
732
- * which, in its turn, causes the scenario to be deemed
733
- * wrongly symmetric in case bfqq's weight would have been
734
- * the only weight making the scenario asymmetric. On the
735
- * bright side, no unbalance will however occur when bfqq
736
- * becomes inactive again (the invocation of this function
737
- * is triggered by an activation of queue). In fact,
738
- * bfq_weights_tree_remove does nothing if
739
- * !bfqq->weight_counter.
734
+ * considered in bfq_symmetric_scenario, which, in its turn,
735
+ * causes the scenario to be deemed wrongly symmetric in case
736
+ * bfqq's weight would have been the only weight making the
737
+ * scenario asymmetric. On the bright side, no unbalance will
738
+ * however occur when bfqq becomes inactive again (the
739
+ * invocation of this function is triggered by an activation
740
+ * of queue). In fact, bfq_weights_tree_remove does nothing
741
+ * if !bfqq->weight_counter.
740
742
*/
741
743
if (unlikely (!bfqq -> weight_counter ))
742
744
return ;
@@ -2227,7 +2229,7 @@ bfq_setup_cooperator(struct bfq_data *bfqd, struct bfq_queue *bfqq,
2227
2229
return NULL ;
2228
2230
2229
2231
/* If there is only one backlogged queue, don't search. */
2230
- if (bfqd -> busy_queues == 1 )
2232
+ if (bfq_tot_busy_queues ( bfqd ) == 1 )
2231
2233
return NULL ;
2232
2234
2233
2235
in_service_bfqq = bfqd -> in_service_queue ;
@@ -3681,7 +3683,8 @@ static bool bfq_better_to_idle(struct bfq_queue *bfqq)
3681
3683
* the requests already queued in the device have been served.
3682
3684
*/
3683
3685
asymmetric_scenario = (bfqq -> wr_coeff > 1 &&
3684
- bfqd -> wr_busy_queues < bfqd -> busy_queues ) ||
3686
+ bfqd -> wr_busy_queues <
3687
+ bfq_tot_busy_queues (bfqd )) ||
3685
3688
!bfq_symmetric_scenario (bfqd );
3686
3689
3687
3690
/*
@@ -3960,7 +3963,7 @@ static struct request *bfq_dispatch_rq_from_bfqq(struct bfq_data *bfqd,
3960
3963
* belongs to CLASS_IDLE and other queues are waiting for
3961
3964
* service.
3962
3965
*/
3963
- if (!(bfqd -> busy_queues > 1 && bfq_class_idle (bfqq )))
3966
+ if (!(bfq_tot_busy_queues ( bfqd ) > 1 && bfq_class_idle (bfqq )))
3964
3967
goto return_rq ;
3965
3968
3966
3969
bfq_bfqq_expire (bfqd , bfqq , false, BFQQE_BUDGET_EXHAUSTED );
@@ -3978,7 +3981,7 @@ static bool bfq_has_work(struct blk_mq_hw_ctx *hctx)
3978
3981
* most a call to dispatch for nothing
3979
3982
*/
3980
3983
return !list_empty_careful (& bfqd -> dispatch ) ||
3981
- bfqd -> busy_queues > 0 ;
3984
+ bfq_tot_busy_queues ( bfqd ) > 0 ;
3982
3985
}
3983
3986
3984
3987
static struct request * __bfq_dispatch_request (struct blk_mq_hw_ctx * hctx )
@@ -4032,9 +4035,10 @@ static struct request *__bfq_dispatch_request(struct blk_mq_hw_ctx *hctx)
4032
4035
goto start_rq ;
4033
4036
}
4034
4037
4035
- bfq_log (bfqd , "dispatch requests: %d busy queues" , bfqd -> busy_queues );
4038
+ bfq_log (bfqd , "dispatch requests: %d busy queues" ,
4039
+ bfq_tot_busy_queues (bfqd ));
4036
4040
4037
- if (bfqd -> busy_queues == 0 )
4041
+ if (bfq_tot_busy_queues ( bfqd ) == 0 )
4038
4042
goto exit ;
4039
4043
4040
4044
/*
0 commit comments