Skip to content

Commit 1e8e55b

Browse files
ahunter6storulf
authored andcommitted
mmc: block: Add CQE support
Add CQE support to the block driver, including: - optionally using DCMD for flush requests - "manually" issuing discard requests - issuing read / write requests to the CQE - supporting block-layer timeouts - handling recovery - supporting re-tuning CQE offers 25% - 50% better random multi-threaded I/O. There is a slight (e.g. 2%) drop in sequential read speed but no observable change to sequential write. CQE automatically sends the commands to complete requests. However it only supports reads / writes and so-called "direct commands" (DCMD). Furthermore DCMD is limited to one command at a time, but discards require 3 commands. That makes issuing discards through CQE very awkward, but some CQE's don't support DCMD anyway. So for discards, the existing non-CQE approach is taken, where the mmc core code issues the 3 commands one at a time i.e. mmc_erase(). Where DCMD is used, is for issuing flushes. Signed-off-by: Adrian Hunter <adrian.hunter@intel.com> Acked-by: Linus Walleij <linus.walleij@linaro.org> Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org> Tested-by: Linus Walleij <linus.walleij@linaro.org>
1 parent 8119697 commit 1e8e55b

File tree

4 files changed

+326
-6
lines changed

4 files changed

+326
-6
lines changed

drivers/mmc/core/block.c

Lines changed: 148 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -112,6 +112,7 @@ struct mmc_blk_data {
112112
#define MMC_BLK_WRITE BIT(1)
113113
#define MMC_BLK_DISCARD BIT(2)
114114
#define MMC_BLK_SECDISCARD BIT(3)
115+
#define MMC_BLK_CQE_RECOVERY BIT(4)
115116

116117
/*
117118
* Only set in main mmc_blk_data associated
@@ -1730,6 +1731,138 @@ static void mmc_blk_data_prep(struct mmc_queue *mq, struct mmc_queue_req *mqrq,
17301731
*do_data_tag_p = do_data_tag;
17311732
}
17321733

1734+
#define MMC_CQE_RETRIES 2
1735+
1736+
static void mmc_blk_cqe_complete_rq(struct mmc_queue *mq, struct request *req)
1737+
{
1738+
struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req);
1739+
struct mmc_request *mrq = &mqrq->brq.mrq;
1740+
struct request_queue *q = req->q;
1741+
struct mmc_host *host = mq->card->host;
1742+
unsigned long flags;
1743+
bool put_card;
1744+
int err;
1745+
1746+
mmc_cqe_post_req(host, mrq);
1747+
1748+
if (mrq->cmd && mrq->cmd->error)
1749+
err = mrq->cmd->error;
1750+
else if (mrq->data && mrq->data->error)
1751+
err = mrq->data->error;
1752+
else
1753+
err = 0;
1754+
1755+
if (err) {
1756+
if (mqrq->retries++ < MMC_CQE_RETRIES)
1757+
blk_mq_requeue_request(req, true);
1758+
else
1759+
blk_mq_end_request(req, BLK_STS_IOERR);
1760+
} else if (mrq->data) {
1761+
if (blk_update_request(req, BLK_STS_OK, mrq->data->bytes_xfered))
1762+
blk_mq_requeue_request(req, true);
1763+
else
1764+
__blk_mq_end_request(req, BLK_STS_OK);
1765+
} else {
1766+
blk_mq_end_request(req, BLK_STS_OK);
1767+
}
1768+
1769+
spin_lock_irqsave(q->queue_lock, flags);
1770+
1771+
mq->in_flight[mmc_issue_type(mq, req)] -= 1;
1772+
1773+
put_card = (mmc_tot_in_flight(mq) == 0);
1774+
1775+
mmc_cqe_check_busy(mq);
1776+
1777+
spin_unlock_irqrestore(q->queue_lock, flags);
1778+
1779+
if (!mq->cqe_busy)
1780+
blk_mq_run_hw_queues(q, true);
1781+
1782+
if (put_card)
1783+
mmc_put_card(mq->card, &mq->ctx);
1784+
}
1785+
1786+
void mmc_blk_cqe_recovery(struct mmc_queue *mq)
1787+
{
1788+
struct mmc_card *card = mq->card;
1789+
struct mmc_host *host = card->host;
1790+
int err;
1791+
1792+
pr_debug("%s: CQE recovery start\n", mmc_hostname(host));
1793+
1794+
err = mmc_cqe_recovery(host);
1795+
if (err)
1796+
mmc_blk_reset(mq->blkdata, host, MMC_BLK_CQE_RECOVERY);
1797+
else
1798+
mmc_blk_reset_success(mq->blkdata, MMC_BLK_CQE_RECOVERY);
1799+
1800+
pr_debug("%s: CQE recovery done\n", mmc_hostname(host));
1801+
}
1802+
1803+
static void mmc_blk_cqe_req_done(struct mmc_request *mrq)
1804+
{
1805+
struct mmc_queue_req *mqrq = container_of(mrq, struct mmc_queue_req,
1806+
brq.mrq);
1807+
struct request *req = mmc_queue_req_to_req(mqrq);
1808+
struct request_queue *q = req->q;
1809+
struct mmc_queue *mq = q->queuedata;
1810+
1811+
/*
1812+
* Block layer timeouts race with completions which means the normal
1813+
* completion path cannot be used during recovery.
1814+
*/
1815+
if (mq->in_recovery)
1816+
mmc_blk_cqe_complete_rq(mq, req);
1817+
else
1818+
blk_mq_complete_request(req);
1819+
}
1820+
1821+
static int mmc_blk_cqe_start_req(struct mmc_host *host, struct mmc_request *mrq)
1822+
{
1823+
mrq->done = mmc_blk_cqe_req_done;
1824+
mrq->recovery_notifier = mmc_cqe_recovery_notifier;
1825+
1826+
return mmc_cqe_start_req(host, mrq);
1827+
}
1828+
1829+
static struct mmc_request *mmc_blk_cqe_prep_dcmd(struct mmc_queue_req *mqrq,
1830+
struct request *req)
1831+
{
1832+
struct mmc_blk_request *brq = &mqrq->brq;
1833+
1834+
memset(brq, 0, sizeof(*brq));
1835+
1836+
brq->mrq.cmd = &brq->cmd;
1837+
brq->mrq.tag = req->tag;
1838+
1839+
return &brq->mrq;
1840+
}
1841+
1842+
static int mmc_blk_cqe_issue_flush(struct mmc_queue *mq, struct request *req)
1843+
{
1844+
struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req);
1845+
struct mmc_request *mrq = mmc_blk_cqe_prep_dcmd(mqrq, req);
1846+
1847+
mrq->cmd->opcode = MMC_SWITCH;
1848+
mrq->cmd->arg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) |
1849+
(EXT_CSD_FLUSH_CACHE << 16) |
1850+
(1 << 8) |
1851+
EXT_CSD_CMD_SET_NORMAL;
1852+
mrq->cmd->flags = MMC_CMD_AC | MMC_RSP_R1B;
1853+
1854+
return mmc_blk_cqe_start_req(mq->card->host, mrq);
1855+
}
1856+
1857+
static int mmc_blk_cqe_issue_rw_rq(struct mmc_queue *mq, struct request *req)
1858+
{
1859+
struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req);
1860+
1861+
mmc_blk_data_prep(mq, mqrq, 0, NULL, NULL);
1862+
1863+
return mmc_blk_cqe_start_req(mq->card->host, &mqrq->brq.mrq);
1864+
}
1865+
17331866
static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq,
17341867
struct mmc_card *card,
17351868
int disable_multi,
@@ -2038,7 +2171,10 @@ void mmc_blk_mq_complete(struct request *req)
20382171
{
20392172
struct mmc_queue *mq = req->q->queuedata;
20402173

2041-
mmc_blk_mq_complete_rq(mq, req);
2174+
if (mq->use_cqe)
2175+
mmc_blk_cqe_complete_rq(mq, req);
2176+
else
2177+
mmc_blk_mq_complete_rq(mq, req);
20422178
}
20432179

20442180
static void mmc_blk_mq_poll_completion(struct mmc_queue *mq,
@@ -2212,6 +2348,9 @@ static int mmc_blk_mq_issue_rw_rq(struct mmc_queue *mq,
22122348

22132349
static int mmc_blk_wait_for_idle(struct mmc_queue *mq, struct mmc_host *host)
22142350
{
2351+
if (mq->use_cqe)
2352+
return host->cqe_ops->cqe_wait_for_idle(host);
2353+
22152354
return mmc_blk_rw_wait(mq, NULL);
22162355
}
22172356

@@ -2250,11 +2389,18 @@ enum mmc_issued mmc_blk_mq_issue_rq(struct mmc_queue *mq, struct request *req)
22502389
return MMC_REQ_FAILED_TO_START;
22512390
}
22522391
return MMC_REQ_FINISHED;
2392+
case MMC_ISSUE_DCMD:
22532393
case MMC_ISSUE_ASYNC:
22542394
switch (req_op(req)) {
2395+
case REQ_OP_FLUSH:
2396+
ret = mmc_blk_cqe_issue_flush(mq, req);
2397+
break;
22552398
case REQ_OP_READ:
22562399
case REQ_OP_WRITE:
2257-
ret = mmc_blk_mq_issue_rw_rq(mq, req);
2400+
if (mq->use_cqe)
2401+
ret = mmc_blk_cqe_issue_rw_rq(mq, req);
2402+
else
2403+
ret = mmc_blk_mq_issue_rw_rq(mq, req);
22582404
break;
22592405
default:
22602406
WARN_ON_ONCE(1);

drivers/mmc/core/block.h

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -7,6 +7,8 @@ struct request;
77

88
void mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req);
99

10+
void mmc_blk_cqe_recovery(struct mmc_queue *mq);
11+
1012
enum mmc_issued;
1113

1214
enum mmc_issued mmc_blk_mq_issue_rq(struct mmc_queue *mq, struct request *req);

0 commit comments

Comments
 (0)