Skip to content

Commit 055f6e1

Browse files
Ming Leiaxboe
authored andcommitted
block: Make q_usage_counter also track legacy requests
This patch makes it possible to pause request allocation for the legacy block layer by calling blk_mq_freeze_queue() and blk_mq_unfreeze_queue(). Signed-off-by: Ming Lei <ming.lei@redhat.com> [ bvanassche: Combined two patches into one, edited a comment and made sure REQ_NOWAIT is handled properly in blk_old_get_request() ] Signed-off-by: Bart Van Assche <bart.vanassche@wdc.com> Reviewed-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Johannes Thumshirn <jthumshirn@suse.de> Reviewed-by: Hannes Reinecke <hare@suse.com> Tested-by: Martin Steigerwald <martin@lichtvoll.de> Tested-by: Oleksandr Natalenko <oleksandr@natalenko.name> Cc: Ming Lei <ming.lei@redhat.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
1 parent eb619fd commit 055f6e1

File tree

2 files changed

+14
-8
lines changed

2 files changed

+14
-8
lines changed

block/blk-core.c

Lines changed: 12 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -612,6 +612,9 @@ void blk_set_queue_dying(struct request_queue *q)
612612
}
613613
spin_unlock_irq(q->queue_lock);
614614
}
615+
616+
/* Make blk_queue_enter() reexamine the DYING flag. */
617+
wake_up_all(&q->mq_freeze_wq);
615618
}
616619
EXPORT_SYMBOL_GPL(blk_set_queue_dying);
617620

@@ -1398,16 +1401,22 @@ static struct request *blk_old_get_request(struct request_queue *q,
13981401
unsigned int op, gfp_t gfp_mask)
13991402
{
14001403
struct request *rq;
1404+
int ret = 0;
14011405

14021406
WARN_ON_ONCE(q->mq_ops);
14031407

14041408
/* create ioc upfront */
14051409
create_io_context(gfp_mask, q->node);
14061410

1411+
ret = blk_queue_enter(q, !(gfp_mask & __GFP_DIRECT_RECLAIM) ||
1412+
(op & REQ_NOWAIT));
1413+
if (ret)
1414+
return ERR_PTR(ret);
14071415
spin_lock_irq(q->queue_lock);
14081416
rq = get_request(q, op, NULL, gfp_mask);
14091417
if (IS_ERR(rq)) {
14101418
spin_unlock_irq(q->queue_lock);
1419+
blk_queue_exit(q);
14111420
return rq;
14121421
}
14131422

@@ -1579,6 +1588,7 @@ void __blk_put_request(struct request_queue *q, struct request *req)
15791588
blk_free_request(rl, req);
15801589
freed_request(rl, sync, rq_flags);
15811590
blk_put_rl(rl);
1591+
blk_queue_exit(q);
15821592
}
15831593
}
15841594
EXPORT_SYMBOL_GPL(__blk_put_request);
@@ -1860,8 +1870,10 @@ static blk_qc_t blk_queue_bio(struct request_queue *q, struct bio *bio)
18601870
* Grab a free request. This is might sleep but can not fail.
18611871
* Returns with the queue unlocked.
18621872
*/
1873+
blk_queue_enter_live(q);
18631874
req = get_request(q, bio->bi_opf, bio, GFP_NOIO);
18641875
if (IS_ERR(req)) {
1876+
blk_queue_exit(q);
18651877
__wbt_done(q->rq_wb, wb_acct);
18661878
if (PTR_ERR(req) == -ENOMEM)
18671879
bio->bi_status = BLK_STS_RESOURCE;

block/blk-mq.c

Lines changed: 2 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -126,7 +126,8 @@ void blk_freeze_queue_start(struct request_queue *q)
126126
freeze_depth = atomic_inc_return(&q->mq_freeze_depth);
127127
if (freeze_depth == 1) {
128128
percpu_ref_kill(&q->q_usage_counter);
129-
blk_mq_run_hw_queues(q, false);
129+
if (q->mq_ops)
130+
blk_mq_run_hw_queues(q, false);
130131
}
131132
}
132133
EXPORT_SYMBOL_GPL(blk_freeze_queue_start);
@@ -256,13 +257,6 @@ void blk_mq_wake_waiters(struct request_queue *q)
256257
queue_for_each_hw_ctx(q, hctx, i)
257258
if (blk_mq_hw_queue_mapped(hctx))
258259
blk_mq_tag_wakeup_all(hctx->tags, true);
259-
260-
/*
261-
* If we are called because the queue has now been marked as
262-
* dying, we need to ensure that processes currently waiting on
263-
* the queue are notified as well.
264-
*/
265-
wake_up_all(&q->mq_freeze_wq);
266260
}
267261

268262
bool blk_mq_can_queue(struct blk_mq_hw_ctx *hctx)

0 commit comments

Comments
 (0)