@@ -924,7 +924,7 @@ bool blk_mq_dispatch_rq_list(struct request_queue *q, struct list_head *list)
924
924
{
925
925
struct blk_mq_hw_ctx * hctx ;
926
926
struct request * rq ;
927
- int errors , queued , ret = BLK_MQ_RQ_QUEUE_OK ;
927
+ int errors , queued ;
928
928
929
929
if (list_empty (list ))
930
930
return false;
@@ -935,6 +935,7 @@ bool blk_mq_dispatch_rq_list(struct request_queue *q, struct list_head *list)
935
935
errors = queued = 0 ;
936
936
do {
937
937
struct blk_mq_queue_data bd ;
938
+ blk_status_t ret ;
938
939
939
940
rq = list_first_entry (list , struct request , queuelist );
940
941
if (!blk_mq_get_driver_tag (rq , & hctx , false)) {
@@ -975,25 +976,20 @@ bool blk_mq_dispatch_rq_list(struct request_queue *q, struct list_head *list)
975
976
}
976
977
977
978
ret = q -> mq_ops -> queue_rq (hctx , & bd );
978
- switch (ret ) {
979
- case BLK_MQ_RQ_QUEUE_OK :
980
- queued ++ ;
981
- break ;
982
- case BLK_MQ_RQ_QUEUE_BUSY :
979
+ if (ret == BLK_STS_RESOURCE ) {
983
980
blk_mq_put_driver_tag_hctx (hctx , rq );
984
981
list_add (& rq -> queuelist , list );
985
982
__blk_mq_requeue_request (rq );
986
983
break ;
987
- default :
988
- pr_err ( "blk-mq: bad return on queue: %d\n" , ret );
989
- case BLK_MQ_RQ_QUEUE_ERROR :
984
+ }
985
+
986
+ if ( unlikely ( ret != BLK_STS_OK )) {
990
987
errors ++ ;
991
988
blk_mq_end_request (rq , BLK_STS_IOERR );
992
- break ;
989
+ continue ;
993
990
}
994
991
995
- if (ret == BLK_MQ_RQ_QUEUE_BUSY )
996
- break ;
992
+ queued ++ ;
997
993
} while (!list_empty (list ));
998
994
999
995
hctx -> dispatched [queued_to_index (queued )]++ ;
@@ -1031,7 +1027,7 @@ bool blk_mq_dispatch_rq_list(struct request_queue *q, struct list_head *list)
1031
1027
* - blk_mq_run_hw_queue() checks whether or not a queue has
1032
1028
* been stopped before rerunning a queue.
1033
1029
* - Some but not all block drivers stop a queue before
1034
- * returning BLK_MQ_RQ_QUEUE_BUSY . Two exceptions are scsi-mq
1030
+ * returning BLK_STS_RESOURCE . Two exceptions are scsi-mq
1035
1031
* and dm-rq.
1036
1032
*/
1037
1033
if (!blk_mq_sched_needs_restart (hctx ) &&
@@ -1410,7 +1406,7 @@ static void __blk_mq_try_issue_directly(struct request *rq, blk_qc_t *cookie,
1410
1406
};
1411
1407
struct blk_mq_hw_ctx * hctx ;
1412
1408
blk_qc_t new_cookie ;
1413
- int ret ;
1409
+ blk_status_t ret ;
1414
1410
1415
1411
if (q -> elevator )
1416
1412
goto insert ;
@@ -1426,18 +1422,19 @@ static void __blk_mq_try_issue_directly(struct request *rq, blk_qc_t *cookie,
1426
1422
* would have done
1427
1423
*/
1428
1424
ret = q -> mq_ops -> queue_rq (hctx , & bd );
1429
- if (ret == BLK_MQ_RQ_QUEUE_OK ) {
1425
+ switch (ret ) {
1426
+ case BLK_STS_OK :
1430
1427
* cookie = new_cookie ;
1431
1428
return ;
1432
- }
1433
-
1434
- if (ret == BLK_MQ_RQ_QUEUE_ERROR ) {
1429
+ case BLK_STS_RESOURCE :
1430
+ __blk_mq_requeue_request (rq );
1431
+ goto insert ;
1432
+ default :
1435
1433
* cookie = BLK_QC_T_NONE ;
1436
- blk_mq_end_request (rq , BLK_STS_IOERR );
1434
+ blk_mq_end_request (rq , ret );
1437
1435
return ;
1438
1436
}
1439
1437
1440
- __blk_mq_requeue_request (rq );
1441
1438
insert :
1442
1439
blk_mq_sched_insert_request (rq , false, true, false, may_sleep );
1443
1440
}
0 commit comments