@@ -557,6 +557,22 @@ static void __blk_mq_complete_request(struct request *rq)
557
557
put_cpu ();
558
558
}
559
559
560
+ static void hctx_unlock (struct blk_mq_hw_ctx * hctx , int srcu_idx )
561
+ {
562
+ if (!(hctx -> flags & BLK_MQ_F_BLOCKING ))
563
+ rcu_read_unlock ();
564
+ else
565
+ srcu_read_unlock (hctx -> queue_rq_srcu , srcu_idx );
566
+ }
567
+
568
+ static void hctx_lock (struct blk_mq_hw_ctx * hctx , int * srcu_idx )
569
+ {
570
+ if (!(hctx -> flags & BLK_MQ_F_BLOCKING ))
571
+ rcu_read_lock ();
572
+ else
573
+ * srcu_idx = srcu_read_lock (hctx -> queue_rq_srcu );
574
+ }
575
+
560
576
/**
561
577
* blk_mq_complete_request - end I/O on a request
562
578
* @rq: the request being processed
@@ -1214,17 +1230,11 @@ static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx)
1214
1230
*/
1215
1231
WARN_ON_ONCE (in_interrupt ());
1216
1232
1217
- if (!(hctx -> flags & BLK_MQ_F_BLOCKING )) {
1218
- rcu_read_lock ();
1219
- blk_mq_sched_dispatch_requests (hctx );
1220
- rcu_read_unlock ();
1221
- } else {
1222
- might_sleep ();
1233
+ might_sleep_if (hctx -> flags & BLK_MQ_F_BLOCKING );
1223
1234
1224
- srcu_idx = srcu_read_lock (hctx -> queue_rq_srcu );
1225
- blk_mq_sched_dispatch_requests (hctx );
1226
- srcu_read_unlock (hctx -> queue_rq_srcu , srcu_idx );
1227
- }
1235
+ hctx_lock (hctx , & srcu_idx );
1236
+ blk_mq_sched_dispatch_requests (hctx );
1237
+ hctx_unlock (hctx , srcu_idx );
1228
1238
}
1229
1239
1230
1240
/*
@@ -1296,17 +1306,10 @@ bool blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
1296
1306
* And queue will be rerun in blk_mq_unquiesce_queue() if it is
1297
1307
* quiesced.
1298
1308
*/
1299
- if (!(hctx -> flags & BLK_MQ_F_BLOCKING )) {
1300
- rcu_read_lock ();
1301
- need_run = !blk_queue_quiesced (hctx -> queue ) &&
1302
- blk_mq_hctx_has_pending (hctx );
1303
- rcu_read_unlock ();
1304
- } else {
1305
- srcu_idx = srcu_read_lock (hctx -> queue_rq_srcu );
1306
- need_run = !blk_queue_quiesced (hctx -> queue ) &&
1307
- blk_mq_hctx_has_pending (hctx );
1308
- srcu_read_unlock (hctx -> queue_rq_srcu , srcu_idx );
1309
- }
1309
+ hctx_lock (hctx , & srcu_idx );
1310
+ need_run = !blk_queue_quiesced (hctx -> queue ) &&
1311
+ blk_mq_hctx_has_pending (hctx );
1312
+ hctx_unlock (hctx , srcu_idx );
1310
1313
1311
1314
if (need_run ) {
1312
1315
__blk_mq_delay_run_hw_queue (hctx , async , 0 );
@@ -1618,7 +1621,7 @@ static blk_qc_t request_to_qc_t(struct blk_mq_hw_ctx *hctx, struct request *rq)
1618
1621
1619
1622
static void __blk_mq_try_issue_directly (struct blk_mq_hw_ctx * hctx ,
1620
1623
struct request * rq ,
1621
- blk_qc_t * cookie , bool may_sleep )
1624
+ blk_qc_t * cookie )
1622
1625
{
1623
1626
struct request_queue * q = rq -> q ;
1624
1627
struct blk_mq_queue_data bd = {
@@ -1668,25 +1671,20 @@ static void __blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
1668
1671
}
1669
1672
1670
1673
insert :
1671
- blk_mq_sched_insert_request (rq , false, run_queue , false, may_sleep );
1674
+ blk_mq_sched_insert_request (rq , false, run_queue , false,
1675
+ hctx -> flags & BLK_MQ_F_BLOCKING );
1672
1676
}
1673
1677
1674
1678
static void blk_mq_try_issue_directly (struct blk_mq_hw_ctx * hctx ,
1675
1679
struct request * rq , blk_qc_t * cookie )
1676
1680
{
1677
- if (!(hctx -> flags & BLK_MQ_F_BLOCKING )) {
1678
- rcu_read_lock ();
1679
- __blk_mq_try_issue_directly (hctx , rq , cookie , false);
1680
- rcu_read_unlock ();
1681
- } else {
1682
- unsigned int srcu_idx ;
1681
+ int srcu_idx ;
1683
1682
1684
- might_sleep ( );
1683
+ might_sleep_if ( hctx -> flags & BLK_MQ_F_BLOCKING );
1685
1684
1686
- srcu_idx = srcu_read_lock (hctx -> queue_rq_srcu );
1687
- __blk_mq_try_issue_directly (hctx , rq , cookie , true);
1688
- srcu_read_unlock (hctx -> queue_rq_srcu , srcu_idx );
1689
- }
1685
+ hctx_lock (hctx , & srcu_idx );
1686
+ __blk_mq_try_issue_directly (hctx , rq , cookie );
1687
+ hctx_unlock (hctx , srcu_idx );
1690
1688
}
1691
1689
1692
1690
static blk_qc_t blk_mq_make_request (struct request_queue * q , struct bio * bio )
0 commit comments