Skip to content

Commit 7f556a4

Browse files
Jianchao Wangaxboe
authored andcommitted
blk-mq: refactor the code of issue request directly
Merge blk_mq_try_issue_directly and __blk_mq_try_issue_directly into one interface to unify the interfaces to issue requests directly. The merged interface takes over the requests totally, it could insert, end or do nothing based on the return value of .queue_rq and 'bypass' parameter. Then caller needn't any other handling any more and then code could be cleaned up. And also the commit c616cbe ( blk-mq: punt failed direct issue to dispatch list ) always inserts requests to hctx dispatch list whenever get a BLK_STS_RESOURCE or BLK_STS_DEV_RESOURCE, this is overkill and will harm the merging. We just need to do that for the requests that has been through .queue_rq. This patch also could fix this. Signed-off-by: Jianchao Wang <jianchao.w.wang@oracle.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
1 parent 4c9770c commit 7f556a4

File tree

1 file changed

+54
-49
lines changed

1 file changed

+54
-49
lines changed

block/blk-mq.c

Lines changed: 54 additions & 49 deletions
Original file line numberDiff line numberDiff line change
@@ -1792,78 +1792,83 @@ static blk_status_t __blk_mq_issue_directly(struct blk_mq_hw_ctx *hctx,
17921792
return ret;
17931793
}
17941794

1795-
static blk_status_t __blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
1795+
static blk_status_t blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
17961796
struct request *rq,
17971797
blk_qc_t *cookie,
1798-
bool bypass_insert, bool last)
1798+
bool bypass, bool last)
17991799
{
18001800
struct request_queue *q = rq->q;
18011801
bool run_queue = true;
1802+
blk_status_t ret = BLK_STS_RESOURCE;
1803+
int srcu_idx;
1804+
bool force = false;
18021805

1806+
hctx_lock(hctx, &srcu_idx);
18031807
/*
1804-
* RCU or SRCU read lock is needed before checking quiesced flag.
1808+
* hctx_lock is needed before checking quiesced flag.
18051809
*
1806-
* When queue is stopped or quiesced, ignore 'bypass_insert' from
1807-
* blk_mq_request_issue_directly(), and return BLK_STS_OK to caller,
1808-
* and avoid driver to try to dispatch again.
1810+
* When queue is stopped or quiesced, ignore 'bypass', insert
1811+
* and return BLK_STS_OK to caller, and avoid driver to try to
1812+
* dispatch again.
18091813
*/
1810-
if (blk_mq_hctx_stopped(hctx) || blk_queue_quiesced(q)) {
1814+
if (unlikely(blk_mq_hctx_stopped(hctx) || blk_queue_quiesced(q))) {
18111815
run_queue = false;
1812-
bypass_insert = false;
1813-
goto insert;
1816+
bypass = false;
1817+
goto out_unlock;
18141818
}
18151819

1816-
if (q->elevator && !bypass_insert)
1817-
goto insert;
1820+
if (unlikely(q->elevator && !bypass))
1821+
goto out_unlock;
18181822

18191823
if (!blk_mq_get_dispatch_budget(hctx))
1820-
goto insert;
1824+
goto out_unlock;
18211825

18221826
if (!blk_mq_get_driver_tag(rq)) {
18231827
blk_mq_put_dispatch_budget(hctx);
1824-
goto insert;
1828+
goto out_unlock;
18251829
}
18261830

1827-
return __blk_mq_issue_directly(hctx, rq, cookie, last);
1828-
insert:
1829-
if (bypass_insert)
1830-
return BLK_STS_RESOURCE;
1831-
1832-
blk_mq_request_bypass_insert(rq, run_queue);
1833-
return BLK_STS_OK;
1834-
}
1835-
1836-
static void blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
1837-
struct request *rq, blk_qc_t *cookie)
1838-
{
1839-
blk_status_t ret;
1840-
int srcu_idx;
1841-
1842-
might_sleep_if(hctx->flags & BLK_MQ_F_BLOCKING);
1843-
1844-
hctx_lock(hctx, &srcu_idx);
1845-
1846-
ret = __blk_mq_try_issue_directly(hctx, rq, cookie, false, true);
1847-
if (ret == BLK_STS_RESOURCE || ret == BLK_STS_DEV_RESOURCE)
1848-
blk_mq_request_bypass_insert(rq, true);
1849-
else if (ret != BLK_STS_OK)
1850-
blk_mq_end_request(rq, ret);
1851-
1831+
/*
1832+
* Always add a request that has been through
1833+
*.queue_rq() to the hardware dispatch list.
1834+
*/
1835+
force = true;
1836+
ret = __blk_mq_issue_directly(hctx, rq, cookie, last);
1837+
out_unlock:
18521838
hctx_unlock(hctx, srcu_idx);
1839+
switch (ret) {
1840+
case BLK_STS_OK:
1841+
break;
1842+
case BLK_STS_DEV_RESOURCE:
1843+
case BLK_STS_RESOURCE:
1844+
if (force) {
1845+
blk_mq_request_bypass_insert(rq, run_queue);
1846+
/*
1847+
* We have to return BLK_STS_OK for the DM
1848+
* to avoid livelock. Otherwise, we return
1849+
* the real result to indicate whether the
1850+
* request is direct-issued successfully.
1851+
*/
1852+
ret = bypass ? BLK_STS_OK : ret;
1853+
} else if (!bypass) {
1854+
blk_mq_sched_insert_request(rq, false,
1855+
run_queue, false);
1856+
}
1857+
break;
1858+
default:
1859+
if (!bypass)
1860+
blk_mq_end_request(rq, ret);
1861+
break;
1862+
}
1863+
1864+
return ret;
18531865
}
18541866

18551867
blk_status_t blk_mq_request_issue_directly(struct request *rq, bool last)
18561868
{
1857-
blk_status_t ret;
1858-
int srcu_idx;
1859-
blk_qc_t unused_cookie;
1860-
struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
1869+
blk_qc_t unused;
18611870

1862-
hctx_lock(hctx, &srcu_idx);
1863-
ret = __blk_mq_try_issue_directly(hctx, rq, &unused_cookie, true, last);
1864-
hctx_unlock(hctx, srcu_idx);
1865-
1866-
return ret;
1871+
return blk_mq_try_issue_directly(rq->mq_hctx, rq, &unused, true, last);
18671872
}
18681873

18691874
void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx,
@@ -2004,13 +2009,13 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
20042009
if (same_queue_rq) {
20052010
data.hctx = same_queue_rq->mq_hctx;
20062011
blk_mq_try_issue_directly(data.hctx, same_queue_rq,
2007-
&cookie);
2012+
&cookie, false, true);
20082013
}
20092014
} else if ((q->nr_hw_queues > 1 && is_sync) || (!q->elevator &&
20102015
!data.hctx->dispatch_busy)) {
20112016
blk_mq_put_ctx(data.ctx);
20122017
blk_mq_bio_to_request(rq, bio);
2013-
blk_mq_try_issue_directly(data.hctx, rq, &cookie);
2018+
blk_mq_try_issue_directly(data.hctx, rq, &cookie, false, true);
20142019
} else {
20152020
blk_mq_put_ctx(data.ctx);
20162021
blk_mq_bio_to_request(rq, bio);

0 commit comments

Comments
 (0)