Skip to content

Commit 13c15e0

Browse files
committed
mtd: spinand: Handle the case where PROGRAM LOAD does not reset the cache
Looks like PROGRAM LOAD (AKA write cache) does not necessarily reset the cache content to 0xFF (depends on vendor implementation), so we must fill the page cache entirely even if we only want to program the data portion of the page, otherwise we might corrupt the BBM or user data previously programmed in OOB area. Fixes: 7529df4 ("mtd: nand: Add core infrastructure to support SPI NANDs") Reported-by: Stefan Roese <sr@denx.de> Cc: <stable@vger.kernel.org> Signed-off-by: Boris Brezillon <bbrezillon@kernel.org> Tested-by: Stefan Roese <sr@denx.de> Reviewed-by: Stefan Roese <sr@denx.de> Acked-by: Miquel Raynal <miquel.raynal@bootlin.com>
1 parent 49a5785 commit 13c15e0

File tree

1 file changed

+20
-22
lines changed

1 file changed

+20
-22
lines changed

drivers/mtd/nand/spi/core.c

Lines changed: 20 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -304,24 +304,30 @@ static int spinand_write_to_cache_op(struct spinand_device *spinand,
304304
struct nand_device *nand = spinand_to_nand(spinand);
305305
struct mtd_info *mtd = nanddev_to_mtd(nand);
306306
struct nand_page_io_req adjreq = *req;
307-
unsigned int nbytes = 0;
308-
void *buf = NULL;
307+
void *buf = spinand->databuf;
308+
unsigned int nbytes;
309309
u16 column = 0;
310310
int ret;
311311

312-
memset(spinand->databuf, 0xff,
313-
nanddev_page_size(nand) +
314-
nanddev_per_page_oobsize(nand));
312+
/*
313+
* Looks like PROGRAM LOAD (AKA write cache) does not necessarily reset
314+
* the cache content to 0xFF (depends on vendor implementation), so we
315+
* must fill the page cache entirely even if we only want to program
316+
* the data portion of the page, otherwise we might corrupt the BBM or
317+
* user data previously programmed in OOB area.
318+
*/
319+
nbytes = nanddev_page_size(nand) + nanddev_per_page_oobsize(nand);
320+
memset(spinand->databuf, 0xff, nbytes);
321+
adjreq.dataoffs = 0;
322+
adjreq.datalen = nanddev_page_size(nand);
323+
adjreq.databuf.out = spinand->databuf;
324+
adjreq.ooblen = nanddev_per_page_oobsize(nand);
325+
adjreq.ooboffs = 0;
326+
adjreq.oobbuf.out = spinand->oobbuf;
315327

316-
if (req->datalen) {
328+
if (req->datalen)
317329
memcpy(spinand->databuf + req->dataoffs, req->databuf.out,
318330
req->datalen);
319-
adjreq.dataoffs = 0;
320-
adjreq.datalen = nanddev_page_size(nand);
321-
adjreq.databuf.out = spinand->databuf;
322-
nbytes = adjreq.datalen;
323-
buf = spinand->databuf;
324-
}
325331

326332
if (req->ooblen) {
327333
if (req->mode == MTD_OPS_AUTO_OOB)
@@ -332,14 +338,6 @@ static int spinand_write_to_cache_op(struct spinand_device *spinand,
332338
else
333339
memcpy(spinand->oobbuf + req->ooboffs, req->oobbuf.out,
334340
req->ooblen);
335-
336-
adjreq.ooblen = nanddev_per_page_oobsize(nand);
337-
adjreq.ooboffs = 0;
338-
nbytes += nanddev_per_page_oobsize(nand);
339-
if (!buf) {
340-
buf = spinand->oobbuf;
341-
column = nanddev_page_size(nand);
342-
}
343341
}
344342

345343
spinand_cache_op_adjust_colum(spinand, &adjreq, &column);
@@ -370,8 +368,8 @@ static int spinand_write_to_cache_op(struct spinand_device *spinand,
370368

371369
/*
372370
* We need to use the RANDOM LOAD CACHE operation if there's
373-
* more than one iteration, because the LOAD operation resets
374-
* the cache to 0xff.
371+
* more than one iteration, because the LOAD operation might
372+
* reset the cache to 0xff.
375373
*/
376374
if (nbytes) {
377375
column = op.addr.val;

0 commit comments

Comments
 (0)