@@ -112,6 +112,7 @@ struct mmc_blk_data {
112
112
#define MMC_BLK_WRITE BIT(1)
113
113
#define MMC_BLK_DISCARD BIT(2)
114
114
#define MMC_BLK_SECDISCARD BIT(3)
115
+ #define MMC_BLK_CQE_RECOVERY BIT(4)
115
116
116
117
/*
117
118
* Only set in main mmc_blk_data associated
@@ -1730,6 +1731,138 @@ static void mmc_blk_data_prep(struct mmc_queue *mq, struct mmc_queue_req *mqrq,
1730
1731
* do_data_tag_p = do_data_tag ;
1731
1732
}
1732
1733
1734
+ #define MMC_CQE_RETRIES 2
1735
+
1736
+ static void mmc_blk_cqe_complete_rq (struct mmc_queue * mq , struct request * req )
1737
+ {
1738
+ struct mmc_queue_req * mqrq = req_to_mmc_queue_req (req );
1739
+ struct mmc_request * mrq = & mqrq -> brq .mrq ;
1740
+ struct request_queue * q = req -> q ;
1741
+ struct mmc_host * host = mq -> card -> host ;
1742
+ unsigned long flags ;
1743
+ bool put_card ;
1744
+ int err ;
1745
+
1746
+ mmc_cqe_post_req (host , mrq );
1747
+
1748
+ if (mrq -> cmd && mrq -> cmd -> error )
1749
+ err = mrq -> cmd -> error ;
1750
+ else if (mrq -> data && mrq -> data -> error )
1751
+ err = mrq -> data -> error ;
1752
+ else
1753
+ err = 0 ;
1754
+
1755
+ if (err ) {
1756
+ if (mqrq -> retries ++ < MMC_CQE_RETRIES )
1757
+ blk_mq_requeue_request (req , true);
1758
+ else
1759
+ blk_mq_end_request (req , BLK_STS_IOERR );
1760
+ } else if (mrq -> data ) {
1761
+ if (blk_update_request (req , BLK_STS_OK , mrq -> data -> bytes_xfered ))
1762
+ blk_mq_requeue_request (req , true);
1763
+ else
1764
+ __blk_mq_end_request (req , BLK_STS_OK );
1765
+ } else {
1766
+ blk_mq_end_request (req , BLK_STS_OK );
1767
+ }
1768
+
1769
+ spin_lock_irqsave (q -> queue_lock , flags );
1770
+
1771
+ mq -> in_flight [mmc_issue_type (mq , req )] -= 1 ;
1772
+
1773
+ put_card = (mmc_tot_in_flight (mq ) == 0 );
1774
+
1775
+ mmc_cqe_check_busy (mq );
1776
+
1777
+ spin_unlock_irqrestore (q -> queue_lock , flags );
1778
+
1779
+ if (!mq -> cqe_busy )
1780
+ blk_mq_run_hw_queues (q , true);
1781
+
1782
+ if (put_card )
1783
+ mmc_put_card (mq -> card , & mq -> ctx );
1784
+ }
1785
+
1786
+ void mmc_blk_cqe_recovery (struct mmc_queue * mq )
1787
+ {
1788
+ struct mmc_card * card = mq -> card ;
1789
+ struct mmc_host * host = card -> host ;
1790
+ int err ;
1791
+
1792
+ pr_debug ("%s: CQE recovery start\n" , mmc_hostname (host ));
1793
+
1794
+ err = mmc_cqe_recovery (host );
1795
+ if (err )
1796
+ mmc_blk_reset (mq -> blkdata , host , MMC_BLK_CQE_RECOVERY );
1797
+ else
1798
+ mmc_blk_reset_success (mq -> blkdata , MMC_BLK_CQE_RECOVERY );
1799
+
1800
+ pr_debug ("%s: CQE recovery done\n" , mmc_hostname (host ));
1801
+ }
1802
+
1803
+ static void mmc_blk_cqe_req_done (struct mmc_request * mrq )
1804
+ {
1805
+ struct mmc_queue_req * mqrq = container_of (mrq , struct mmc_queue_req ,
1806
+ brq .mrq );
1807
+ struct request * req = mmc_queue_req_to_req (mqrq );
1808
+ struct request_queue * q = req -> q ;
1809
+ struct mmc_queue * mq = q -> queuedata ;
1810
+
1811
+ /*
1812
+ * Block layer timeouts race with completions which means the normal
1813
+ * completion path cannot be used during recovery.
1814
+ */
1815
+ if (mq -> in_recovery )
1816
+ mmc_blk_cqe_complete_rq (mq , req );
1817
+ else
1818
+ blk_mq_complete_request (req );
1819
+ }
1820
+
1821
+ static int mmc_blk_cqe_start_req (struct mmc_host * host , struct mmc_request * mrq )
1822
+ {
1823
+ mrq -> done = mmc_blk_cqe_req_done ;
1824
+ mrq -> recovery_notifier = mmc_cqe_recovery_notifier ;
1825
+
1826
+ return mmc_cqe_start_req (host , mrq );
1827
+ }
1828
+
1829
+ static struct mmc_request * mmc_blk_cqe_prep_dcmd (struct mmc_queue_req * mqrq ,
1830
+ struct request * req )
1831
+ {
1832
+ struct mmc_blk_request * brq = & mqrq -> brq ;
1833
+
1834
+ memset (brq , 0 , sizeof (* brq ));
1835
+
1836
+ brq -> mrq .cmd = & brq -> cmd ;
1837
+ brq -> mrq .tag = req -> tag ;
1838
+
1839
+ return & brq -> mrq ;
1840
+ }
1841
+
1842
+ static int mmc_blk_cqe_issue_flush (struct mmc_queue * mq , struct request * req )
1843
+ {
1844
+ struct mmc_queue_req * mqrq = req_to_mmc_queue_req (req );
1845
+ struct mmc_request * mrq = mmc_blk_cqe_prep_dcmd (mqrq , req );
1846
+
1847
+ mrq -> cmd -> opcode = MMC_SWITCH ;
1848
+ mrq -> cmd -> arg = (MMC_SWITCH_MODE_WRITE_BYTE << 24 ) |
1849
+ (EXT_CSD_FLUSH_CACHE << 16 ) |
1850
+ (1 << 8 ) |
1851
+ EXT_CSD_CMD_SET_NORMAL ;
1852
+ mrq -> cmd -> flags = MMC_CMD_AC | MMC_RSP_R1B ;
1853
+
1854
+ return mmc_blk_cqe_start_req (mq -> card -> host , mrq );
1855
+ }
1856
+
1857
+ static int mmc_blk_cqe_issue_rw_rq (struct mmc_queue * mq , struct request * req )
1858
+ {
1859
+ struct mmc_queue_req * mqrq = req_to_mmc_queue_req (req );
1860
+
1861
+ mmc_blk_data_prep (mq , mqrq , 0 , NULL , NULL );
1862
+
1863
+ return mmc_blk_cqe_start_req (mq -> card -> host , & mqrq -> brq .mrq );
1864
+ }
1865
+
1733
1866
static void mmc_blk_rw_rq_prep (struct mmc_queue_req * mqrq ,
1734
1867
struct mmc_card * card ,
1735
1868
int disable_multi ,
@@ -2038,7 +2171,10 @@ void mmc_blk_mq_complete(struct request *req)
2038
2171
{
2039
2172
struct mmc_queue * mq = req -> q -> queuedata ;
2040
2173
2041
- mmc_blk_mq_complete_rq (mq , req );
2174
+ if (mq -> use_cqe )
2175
+ mmc_blk_cqe_complete_rq (mq , req );
2176
+ else
2177
+ mmc_blk_mq_complete_rq (mq , req );
2042
2178
}
2043
2179
2044
2180
static void mmc_blk_mq_poll_completion (struct mmc_queue * mq ,
@@ -2212,6 +2348,9 @@ static int mmc_blk_mq_issue_rw_rq(struct mmc_queue *mq,
2212
2348
2213
2349
static int mmc_blk_wait_for_idle (struct mmc_queue * mq , struct mmc_host * host )
2214
2350
{
2351
+ if (mq -> use_cqe )
2352
+ return host -> cqe_ops -> cqe_wait_for_idle (host );
2353
+
2215
2354
return mmc_blk_rw_wait (mq , NULL );
2216
2355
}
2217
2356
@@ -2250,11 +2389,18 @@ enum mmc_issued mmc_blk_mq_issue_rq(struct mmc_queue *mq, struct request *req)
2250
2389
return MMC_REQ_FAILED_TO_START ;
2251
2390
}
2252
2391
return MMC_REQ_FINISHED ;
2392
+ case MMC_ISSUE_DCMD :
2253
2393
case MMC_ISSUE_ASYNC :
2254
2394
switch (req_op (req )) {
2395
+ case REQ_OP_FLUSH :
2396
+ ret = mmc_blk_cqe_issue_flush (mq , req );
2397
+ break ;
2255
2398
case REQ_OP_READ :
2256
2399
case REQ_OP_WRITE :
2257
- ret = mmc_blk_mq_issue_rw_rq (mq , req );
2400
+ if (mq -> use_cqe )
2401
+ ret = mmc_blk_cqe_issue_rw_rq (mq , req );
2402
+ else
2403
+ ret = mmc_blk_mq_issue_rw_rq (mq , req );
2258
2404
break ;
2259
2405
default :
2260
2406
WARN_ON_ONCE (1 );
0 commit comments