Skip to content

Commit 6ce3dd6

Browse files
Ming Leiaxboe
authored andcommitted
blk-mq: issue directly if hw queue isn't busy in case of 'none'
In case of 'none' io scheduler, when hw queue isn't busy, it isn't necessary to enqueue request to sw queue and dequeue it from sw queue because request may be submitted to hw queue asap without extra cost, meantime there shouldn't be much request in sw queue, and we don't need to worry about effect on IO merge. There are still some single hw queue SCSI HBAs(HPSA, megaraid_sas, ...) which may connect high performance devices, so 'none' is often required for obtaining good performance. This patch improves IOPS and decreases CPU unilization on megaraid_sas, per Kashyap's test. Cc: Kashyap Desai <kashyap.desai@broadcom.com> Cc: Laurence Oberman <loberman@redhat.com> Cc: Omar Sandoval <osandov@fb.com> Cc: Christoph Hellwig <hch@lst.de> Cc: Bart Van Assche <bart.vanassche@wdc.com> Cc: Hannes Reinecke <hare@suse.de> Reported-by: Kashyap Desai <kashyap.desai@broadcom.com> Tested-by: Kashyap Desai <kashyap.desai@broadcom.com> Signed-off-by: Ming Lei <ming.lei@redhat.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
1 parent 71e9690 commit 6ce3dd6

File tree

3 files changed

+36
-2
lines changed

3 files changed

+36
-2
lines changed

block/blk-mq-sched.c

Lines changed: 12 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -405,8 +405,19 @@ void blk_mq_sched_insert_requests(struct request_queue *q,
405405

406406
if (e && e->type->ops.mq.insert_requests)
407407
e->type->ops.mq.insert_requests(hctx, list, false);
408-
else
408+
else {
409+
/*
410+
* try to issue requests directly if the hw queue isn't
411+
* busy in case of 'none' scheduler, and this way may save
412+
* us one extra enqueue & dequeue to sw queue.
413+
*/
414+
if (!hctx->dispatch_busy && !e && !run_queue_async) {
415+
blk_mq_try_issue_list_directly(hctx, list);
416+
if (list_empty(list))
417+
return;
418+
}
409419
blk_mq_insert_requests(hctx, ctx, list);
420+
}
410421

411422
blk_mq_run_hw_queue(hctx, run_queue_async);
412423
}

block/blk-mq.c

Lines changed: 22 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1691,13 +1691,16 @@ static blk_status_t __blk_mq_issue_directly(struct blk_mq_hw_ctx *hctx,
16911691
ret = q->mq_ops->queue_rq(hctx, &bd);
16921692
switch (ret) {
16931693
case BLK_STS_OK:
1694+
blk_mq_update_dispatch_busy(hctx, false);
16941695
*cookie = new_cookie;
16951696
break;
16961697
case BLK_STS_RESOURCE:
16971698
case BLK_STS_DEV_RESOURCE:
1699+
blk_mq_update_dispatch_busy(hctx, true);
16981700
__blk_mq_requeue_request(rq);
16991701
break;
17001702
default:
1703+
blk_mq_update_dispatch_busy(hctx, false);
17011704
*cookie = BLK_QC_T_NONE;
17021705
break;
17031706
}
@@ -1780,6 +1783,23 @@ blk_status_t blk_mq_request_issue_directly(struct request *rq)
17801783
return ret;
17811784
}
17821785

1786+
void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx,
1787+
struct list_head *list)
1788+
{
1789+
while (!list_empty(list)) {
1790+
blk_status_t ret;
1791+
struct request *rq = list_first_entry(list, struct request,
1792+
queuelist);
1793+
1794+
list_del_init(&rq->queuelist);
1795+
ret = blk_mq_request_issue_directly(rq);
1796+
if (ret != BLK_STS_OK) {
1797+
list_add(&rq->queuelist, list);
1798+
break;
1799+
}
1800+
}
1801+
}
1802+
17831803
static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
17841804
{
17851805
const int is_sync = op_is_sync(bio->bi_opf);
@@ -1880,7 +1900,8 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
18801900
blk_mq_try_issue_directly(data.hctx, same_queue_rq,
18811901
&cookie);
18821902
}
1883-
} else if (q->nr_hw_queues > 1 && is_sync) {
1903+
} else if ((q->nr_hw_queues > 1 && is_sync) || (!q->elevator &&
1904+
!data.hctx->dispatch_busy)) {
18841905
blk_mq_put_ctx(data.ctx);
18851906
blk_mq_bio_to_request(rq, bio);
18861907
blk_mq_try_issue_directly(data.hctx, rq, &cookie);

block/blk-mq.h

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -64,6 +64,8 @@ void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx,
6464

6565
/* Used by blk_insert_cloned_request() to issue request directly */
6666
blk_status_t blk_mq_request_issue_directly(struct request *rq);
67+
void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx,
68+
struct list_head *list);
6769

6870
/*
6971
* CPU -> queue mappings

0 commit comments

Comments
 (0)