Skip to content

Commit 72a5af5

Browse files
ahunter6storulf
authored andcommitted
mmc: core: Add support for handling CQE requests
Add core support for handling CQE requests, including starting, completing and recovering. Signed-off-by: Adrian Hunter <adrian.hunter@intel.com> Reviewed-by: Linus Walleij <linus.walleij@linaro.org> Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
1 parent 6c0cedd commit 72a5af5

File tree

3 files changed

+164
-5
lines changed

3 files changed

+164
-5
lines changed

drivers/mmc/core/core.c

Lines changed: 158 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -266,7 +266,8 @@ static void __mmc_start_request(struct mmc_host *host, struct mmc_request *mrq)
266266
host->ops->request(host, mrq);
267267
}
268268

269-
static void mmc_mrq_pr_debug(struct mmc_host *host, struct mmc_request *mrq)
269+
static void mmc_mrq_pr_debug(struct mmc_host *host, struct mmc_request *mrq,
270+
bool cqe)
270271
{
271272
if (mrq->sbc) {
272273
pr_debug("<%s: starting CMD%u arg %08x flags %08x>\n",
@@ -275,9 +276,12 @@ static void mmc_mrq_pr_debug(struct mmc_host *host, struct mmc_request *mrq)
275276
}
276277

277278
if (mrq->cmd) {
278-
pr_debug("%s: starting CMD%u arg %08x flags %08x\n",
279-
mmc_hostname(host), mrq->cmd->opcode, mrq->cmd->arg,
280-
mrq->cmd->flags);
279+
pr_debug("%s: starting %sCMD%u arg %08x flags %08x\n",
280+
mmc_hostname(host), cqe ? "CQE direct " : "",
281+
mrq->cmd->opcode, mrq->cmd->arg, mrq->cmd->flags);
282+
} else if (cqe) {
283+
pr_debug("%s: starting CQE transfer for tag %d blkaddr %u\n",
284+
mmc_hostname(host), mrq->tag, mrq->data->blk_addr);
281285
}
282286

283287
if (mrq->data) {
@@ -342,7 +346,7 @@ static int mmc_start_request(struct mmc_host *host, struct mmc_request *mrq)
342346
if (mmc_card_removed(host->card))
343347
return -ENOMEDIUM;
344348

345-
mmc_mrq_pr_debug(host, mrq);
349+
mmc_mrq_pr_debug(host, mrq, false);
346350

347351
WARN_ON(!host->claimed);
348352

@@ -482,6 +486,155 @@ void mmc_wait_for_req_done(struct mmc_host *host, struct mmc_request *mrq)
482486
}
483487
EXPORT_SYMBOL(mmc_wait_for_req_done);
484488

489+
/*
490+
* mmc_cqe_start_req - Start a CQE request.
491+
* @host: MMC host to start the request
492+
* @mrq: request to start
493+
*
494+
* Start the request, re-tuning if needed and it is possible. Returns an error
495+
* code if the request fails to start or -EBUSY if CQE is busy.
496+
*/
497+
int mmc_cqe_start_req(struct mmc_host *host, struct mmc_request *mrq)
498+
{
499+
int err;
500+
501+
/*
502+
* CQE cannot process re-tuning commands. Caller must hold retuning
503+
* while CQE is in use. Re-tuning can happen here only when CQE has no
504+
* active requests i.e. this is the first. Note, re-tuning will call
505+
* ->cqe_off().
506+
*/
507+
err = mmc_retune(host);
508+
if (err)
509+
goto out_err;
510+
511+
mrq->host = host;
512+
513+
mmc_mrq_pr_debug(host, mrq, true);
514+
515+
err = mmc_mrq_prep(host, mrq);
516+
if (err)
517+
goto out_err;
518+
519+
err = host->cqe_ops->cqe_request(host, mrq);
520+
if (err)
521+
goto out_err;
522+
523+
trace_mmc_request_start(host, mrq);
524+
525+
return 0;
526+
527+
out_err:
528+
if (mrq->cmd) {
529+
pr_debug("%s: failed to start CQE direct CMD%u, error %d\n",
530+
mmc_hostname(host), mrq->cmd->opcode, err);
531+
} else {
532+
pr_debug("%s: failed to start CQE transfer for tag %d, error %d\n",
533+
mmc_hostname(host), mrq->tag, err);
534+
}
535+
return err;
536+
}
537+
EXPORT_SYMBOL(mmc_cqe_start_req);
538+
539+
/**
540+
* mmc_cqe_request_done - CQE has finished processing an MMC request
541+
* @host: MMC host which completed request
542+
* @mrq: MMC request which completed
543+
*
544+
* CQE drivers should call this function when they have completed
545+
* their processing of a request.
546+
*/
547+
void mmc_cqe_request_done(struct mmc_host *host, struct mmc_request *mrq)
548+
{
549+
mmc_should_fail_request(host, mrq);
550+
551+
/* Flag re-tuning needed on CRC errors */
552+
if ((mrq->cmd && mrq->cmd->error == -EILSEQ) ||
553+
(mrq->data && mrq->data->error == -EILSEQ))
554+
mmc_retune_needed(host);
555+
556+
trace_mmc_request_done(host, mrq);
557+
558+
if (mrq->cmd) {
559+
pr_debug("%s: CQE req done (direct CMD%u): %d\n",
560+
mmc_hostname(host), mrq->cmd->opcode, mrq->cmd->error);
561+
} else {
562+
pr_debug("%s: CQE transfer done tag %d\n",
563+
mmc_hostname(host), mrq->tag);
564+
}
565+
566+
if (mrq->data) {
567+
pr_debug("%s: %d bytes transferred: %d\n",
568+
mmc_hostname(host),
569+
mrq->data->bytes_xfered, mrq->data->error);
570+
}
571+
572+
mrq->done(mrq);
573+
}
574+
EXPORT_SYMBOL(mmc_cqe_request_done);
575+
576+
/**
577+
* mmc_cqe_post_req - CQE post process of a completed MMC request
578+
* @host: MMC host
579+
* @mrq: MMC request to be processed
580+
*/
581+
void mmc_cqe_post_req(struct mmc_host *host, struct mmc_request *mrq)
582+
{
583+
if (host->cqe_ops->cqe_post_req)
584+
host->cqe_ops->cqe_post_req(host, mrq);
585+
}
586+
EXPORT_SYMBOL(mmc_cqe_post_req);
587+
588+
/* Arbitrary 1 second timeout */
589+
#define MMC_CQE_RECOVERY_TIMEOUT 1000
590+
591+
/*
592+
* mmc_cqe_recovery - Recover from CQE errors.
593+
* @host: MMC host to recover
594+
*
595+
* Recovery consists of stopping CQE, stopping eMMC, discarding the queue in
596+
* in eMMC, and discarding the queue in CQE. CQE must call
597+
* mmc_cqe_request_done() on all requests. An error is returned if the eMMC
598+
* fails to discard its queue.
599+
*/
600+
int mmc_cqe_recovery(struct mmc_host *host)
601+
{
602+
struct mmc_command cmd;
603+
int err;
604+
605+
mmc_retune_hold_now(host);
606+
607+
/*
608+
* Recovery is expected seldom, if at all, but it reduces performance,
609+
* so make sure it is not completely silent.
610+
*/
611+
pr_warn("%s: running CQE recovery\n", mmc_hostname(host));
612+
613+
host->cqe_ops->cqe_recovery_start(host);
614+
615+
memset(&cmd, 0, sizeof(cmd));
616+
cmd.opcode = MMC_STOP_TRANSMISSION,
617+
cmd.flags = MMC_RSP_R1B | MMC_CMD_AC,
618+
cmd.flags &= ~MMC_RSP_CRC; /* Ignore CRC */
619+
cmd.busy_timeout = MMC_CQE_RECOVERY_TIMEOUT,
620+
mmc_wait_for_cmd(host, &cmd, 0);
621+
622+
memset(&cmd, 0, sizeof(cmd));
623+
cmd.opcode = MMC_CMDQ_TASK_MGMT;
624+
cmd.arg = 1; /* Discard entire queue */
625+
cmd.flags = MMC_RSP_R1B | MMC_CMD_AC;
626+
cmd.flags &= ~MMC_RSP_CRC; /* Ignore CRC */
627+
cmd.busy_timeout = MMC_CQE_RECOVERY_TIMEOUT,
628+
err = mmc_wait_for_cmd(host, &cmd, 0);
629+
630+
host->cqe_ops->cqe_recovery_finish(host);
631+
632+
mmc_retune_release(host);
633+
634+
return err;
635+
}
636+
EXPORT_SYMBOL(mmc_cqe_recovery);
637+
485638
/**
486639
* mmc_is_req_done - Determine if a 'cap_cmd_during_tfr' request is done
487640
* @host: MMC host

drivers/mmc/core/core.h

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -145,4 +145,8 @@ static inline void mmc_claim_host(struct mmc_host *host)
145145
__mmc_claim_host(host, NULL, NULL);
146146
}
147147

148+
int mmc_cqe_start_req(struct mmc_host *host, struct mmc_request *mrq);
149+
void mmc_cqe_post_req(struct mmc_host *host, struct mmc_request *mrq);
150+
int mmc_cqe_recovery(struct mmc_host *host);
151+
148152
#endif

include/linux/mmc/host.h

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -474,6 +474,8 @@ void mmc_detect_change(struct mmc_host *, unsigned long delay);
474474
void mmc_request_done(struct mmc_host *, struct mmc_request *);
475475
void mmc_command_done(struct mmc_host *host, struct mmc_request *mrq);
476476

477+
void mmc_cqe_request_done(struct mmc_host *host, struct mmc_request *mrq);
478+
477479
static inline void mmc_signal_sdio_irq(struct mmc_host *host)
478480
{
479481
host->ops->enable_sdio_irq(host, 0);

0 commit comments

Comments
 (0)