Skip to content

Commit 6f20070

Browse files
Abhishek Sahumiquelraynal
authored andcommitted
mtd: rawnand: qcom: wait for desc completion in all BAM channels
The BAM has 3 channels - tx, rx and command. command channel is used for register read/writes, tx channel for data writes and rx channel for data reads. Currently, the driver assumes the transfer completion once it gets all the command descriptors completed. Sometimes, there is race condition between data channel (tx/rx) and command channel completion. In these cases, the data present in buffer is not valid during small window between command descriptor completion and data descriptor completion. This patch generates NAND transfer completion when both (Data and Command) DMA channels have completed all its DMA descriptors. It assigns completion callback in last DMA descriptors of that channel and wait for completion. Fixes: 8d6b6d7 ("mtd: nand: qcom: support for command descriptor formation") Cc: stable@vger.kernel.org Signed-off-by: Abhishek Sahu <absahu@codeaurora.org> Signed-off-by: Miquel Raynal <miquel.raynal@bootlin.com>
1 parent 7ddb937 commit 6f20070

File tree

1 file changed

+52
-1
lines changed

1 file changed

+52
-1
lines changed

drivers/mtd/nand/raw/qcom_nandc.c

Lines changed: 52 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -213,6 +213,8 @@ nandc_set_reg(nandc, NAND_READ_LOCATION_##reg, \
213213
#define QPIC_PER_CW_CMD_SGL 32
214214
#define QPIC_PER_CW_DATA_SGL 8
215215

216+
#define QPIC_NAND_COMPLETION_TIMEOUT msecs_to_jiffies(2000)
217+
216218
/*
217219
* Flags used in DMA descriptor preparation helper functions
218220
* (i.e. read_reg_dma/write_reg_dma/read_data_dma/write_data_dma)
@@ -245,6 +247,11 @@ nandc_set_reg(nandc, NAND_READ_LOCATION_##reg, \
245247
* @tx_sgl_start - start index in data sgl for tx.
246248
* @rx_sgl_pos - current index in data sgl for rx.
247249
* @rx_sgl_start - start index in data sgl for rx.
250+
* @wait_second_completion - wait for second DMA desc completion before making
251+
* the NAND transfer completion.
252+
* @txn_done - completion for NAND transfer.
253+
* @last_data_desc - last DMA desc in data channel (tx/rx).
254+
* @last_cmd_desc - last DMA desc in command channel.
248255
*/
249256
struct bam_transaction {
250257
struct bam_cmd_element *bam_ce;
@@ -258,6 +265,10 @@ struct bam_transaction {
258265
u32 tx_sgl_start;
259266
u32 rx_sgl_pos;
260267
u32 rx_sgl_start;
268+
bool wait_second_completion;
269+
struct completion txn_done;
270+
struct dma_async_tx_descriptor *last_data_desc;
271+
struct dma_async_tx_descriptor *last_cmd_desc;
261272
};
262273

263274
/*
@@ -504,6 +515,8 @@ alloc_bam_transaction(struct qcom_nand_controller *nandc)
504515

505516
bam_txn->data_sgl = bam_txn_buf;
506517

518+
init_completion(&bam_txn->txn_done);
519+
507520
return bam_txn;
508521
}
509522

@@ -523,11 +536,33 @@ static void clear_bam_transaction(struct qcom_nand_controller *nandc)
523536
bam_txn->tx_sgl_start = 0;
524537
bam_txn->rx_sgl_pos = 0;
525538
bam_txn->rx_sgl_start = 0;
539+
bam_txn->last_data_desc = NULL;
540+
bam_txn->wait_second_completion = false;
526541

527542
sg_init_table(bam_txn->cmd_sgl, nandc->max_cwperpage *
528543
QPIC_PER_CW_CMD_SGL);
529544
sg_init_table(bam_txn->data_sgl, nandc->max_cwperpage *
530545
QPIC_PER_CW_DATA_SGL);
546+
547+
reinit_completion(&bam_txn->txn_done);
548+
}
549+
550+
/* Callback for DMA descriptor completion */
551+
static void qpic_bam_dma_done(void *data)
552+
{
553+
struct bam_transaction *bam_txn = data;
554+
555+
/*
556+
* In case of data transfer with NAND, 2 callbacks will be generated.
557+
* One for command channel and another one for data channel.
558+
* If current transaction has data descriptors
559+
* (i.e. wait_second_completion is true), then set this to false
560+
* and wait for second DMA descriptor completion.
561+
*/
562+
if (bam_txn->wait_second_completion)
563+
bam_txn->wait_second_completion = false;
564+
else
565+
complete(&bam_txn->txn_done);
531566
}
532567

533568
static inline struct qcom_nand_host *to_qcom_nand_host(struct nand_chip *chip)
@@ -756,6 +791,12 @@ static int prepare_bam_async_desc(struct qcom_nand_controller *nandc,
756791

757792
desc->dma_desc = dma_desc;
758793

794+
/* update last data/command descriptor */
795+
if (chan == nandc->cmd_chan)
796+
bam_txn->last_cmd_desc = dma_desc;
797+
else
798+
bam_txn->last_data_desc = dma_desc;
799+
759800
list_add_tail(&desc->node, &nandc->desc_list);
760801

761802
return 0;
@@ -1273,10 +1314,20 @@ static int submit_descs(struct qcom_nand_controller *nandc)
12731314
cookie = dmaengine_submit(desc->dma_desc);
12741315

12751316
if (nandc->props->is_bam) {
1317+
bam_txn->last_cmd_desc->callback = qpic_bam_dma_done;
1318+
bam_txn->last_cmd_desc->callback_param = bam_txn;
1319+
if (bam_txn->last_data_desc) {
1320+
bam_txn->last_data_desc->callback = qpic_bam_dma_done;
1321+
bam_txn->last_data_desc->callback_param = bam_txn;
1322+
bam_txn->wait_second_completion = true;
1323+
}
1324+
12761325
dma_async_issue_pending(nandc->tx_chan);
12771326
dma_async_issue_pending(nandc->rx_chan);
1327+
dma_async_issue_pending(nandc->cmd_chan);
12781328

1279-
if (dma_sync_wait(nandc->cmd_chan, cookie) != DMA_COMPLETE)
1329+
if (!wait_for_completion_timeout(&bam_txn->txn_done,
1330+
QPIC_NAND_COMPLETION_TIMEOUT))
12801331
return -ETIMEDOUT;
12811332
} else {
12821333
if (dma_sync_wait(nandc->chan, cookie) != DMA_COMPLETE)

0 commit comments

Comments
 (0)