Skip to content

Commit 631cfdd

Browse files
committed
mtd: spi-nand: Add continuous read support
A regular page read consist in: - Asking one page of content from the NAND array to be loaded in the chip's SRAM, - Waiting for the operation to be done, - Retrieving the data (I/O phase) from the chip's SRAM. When reading several sequential pages, the above operation is repeated over and over. There is however a way to optimize these accesses, by enabling continuous reads. The feature requires the NAND chip to have a second internal SRAM area plus a bit of additional internal logic to trigger another internal transfer between the NAND array and the second SRAM area while the I/O phase is ongoing. Once the first I/O phase is done, the host can continue reading more data, continuously, as the chip will automatically switch to the second SRAM content (which has already been loaded) and in turns trigger the next load into the first SRAM area again. From an instruction perspective, the command op-codes are different, but the same cycles are required. The only difference is that after a continuous read (which is stopped by a CS deassert), the host must observe a delay of tRST. However, because there is no guarantee in Linux regarding the actual state of the CS pin after a transfer (in order to speed-up the next transfer if targeting the same device), it was necessary to manually end the continuous read with a configuration register write operation. Continuous reads have two main drawbacks: * They only work on full pages (column address ignored) * Only the main data area is pulled, out-of-band bytes are not accessible. Said otherwise, the feature can only be useful with on-die ECC engines. Performance wise, measures have been performed on a Zynq platform using Macronix SPI-NAND controller with a Macronix chip (based on the flash_speed tool modified for testing sequential reads): - 1-1-1 mode: performances improved from +3% (2-pages) up to +10% after a dozen pages. - 1-1-4 mode: performances improved from +15% (2-pages) up to +40% after a dozen pages. This series is based on a previous work from Macronix engineer Jaime Liao. Signed-off-by: Miquel Raynal <[email protected]> Reviewed-by: Pratyush Yadav <[email protected]> Link: https://lore.kernel.org/linux-mtd/[email protected]
1 parent 79da170 commit 631cfdd

File tree

2 files changed

+184
-8
lines changed

2 files changed

+184
-8
lines changed

drivers/mtd/nand/spi/core.c

Lines changed: 168 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -200,6 +200,12 @@ static int spinand_ecc_enable(struct spinand_device *spinand,
200200
enable ? CFG_ECC_ENABLE : 0);
201201
}
202202

203+
static int spinand_cont_read_enable(struct spinand_device *spinand,
204+
bool enable)
205+
{
206+
return spinand->set_cont_read(spinand, enable);
207+
}
208+
203209
static int spinand_check_ecc_status(struct spinand_device *spinand, u8 status)
204210
{
205211
struct nand_device *nand = spinand_to_nand(spinand);
@@ -311,10 +317,22 @@ static int spinand_ondie_ecc_finish_io_req(struct nand_device *nand,
311317

312318
/* Finish a page read: check the status, report errors/bitflips */
313319
ret = spinand_check_ecc_status(spinand, engine_conf->status);
314-
if (ret == -EBADMSG)
320+
if (ret == -EBADMSG) {
315321
mtd->ecc_stats.failed++;
316-
else if (ret > 0)
317-
mtd->ecc_stats.corrected += ret;
322+
} else if (ret > 0) {
323+
unsigned int pages;
324+
325+
/*
326+
* Continuous reads don't allow us to get the detail,
327+
* so we may exagerate the actual number of corrected bitflips.
328+
*/
329+
if (!req->continuous)
330+
pages = 1;
331+
else
332+
pages = req->datalen / nanddev_page_size(nand);
333+
334+
mtd->ecc_stats.corrected += ret * pages;
335+
}
318336

319337
return ret;
320338
}
@@ -369,7 +387,11 @@ static int spinand_read_from_cache_op(struct spinand_device *spinand,
369387

370388
if (req->datalen) {
371389
buf = spinand->databuf;
372-
nbytes = nanddev_page_size(nand);
390+
if (!req->continuous)
391+
nbytes = nanddev_page_size(nand);
392+
else
393+
nbytes = round_up(req->dataoffs + req->datalen,
394+
nanddev_page_size(nand));
373395
column = 0;
374396
}
375397

@@ -397,6 +419,13 @@ static int spinand_read_from_cache_op(struct spinand_device *spinand,
397419
nbytes -= ret;
398420
column += ret;
399421
buf += ret;
422+
423+
/*
424+
* Dirmap accesses are allowed to toggle the CS.
425+
* Toggling the CS during a continuous read is forbidden.
426+
*/
427+
if (nbytes && req->continuous)
428+
return -EIO;
400429
}
401430

402431
if (req->datalen)
@@ -672,6 +701,125 @@ static int spinand_mtd_regular_page_read(struct mtd_info *mtd, loff_t from,
672701
return ret;
673702
}
674703

704+
static int spinand_mtd_continuous_page_read(struct mtd_info *mtd, loff_t from,
705+
struct mtd_oob_ops *ops,
706+
unsigned int *max_bitflips)
707+
{
708+
struct spinand_device *spinand = mtd_to_spinand(mtd);
709+
struct nand_device *nand = mtd_to_nanddev(mtd);
710+
struct nand_io_iter iter;
711+
u8 status;
712+
int ret;
713+
714+
ret = spinand_cont_read_enable(spinand, true);
715+
if (ret)
716+
return ret;
717+
718+
/*
719+
* The cache is divided into two halves. While one half of the cache has
720+
* the requested data, the other half is loaded with the next chunk of data.
721+
* Therefore, the host can read out the data continuously from page to page.
722+
* Each data read must be a multiple of 4-bytes and full pages should be read;
723+
* otherwise, the data output might get out of sequence from one read command
724+
* to another.
725+
*/
726+
nanddev_io_for_each_block(nand, NAND_PAGE_READ, from, ops, &iter) {
727+
ret = spinand_select_target(spinand, iter.req.pos.target);
728+
if (ret)
729+
goto end_cont_read;
730+
731+
ret = nand_ecc_prepare_io_req(nand, &iter.req);
732+
if (ret)
733+
goto end_cont_read;
734+
735+
ret = spinand_load_page_op(spinand, &iter.req);
736+
if (ret)
737+
goto end_cont_read;
738+
739+
ret = spinand_wait(spinand, SPINAND_READ_INITIAL_DELAY_US,
740+
SPINAND_READ_POLL_DELAY_US, NULL);
741+
if (ret < 0)
742+
goto end_cont_read;
743+
744+
ret = spinand_read_from_cache_op(spinand, &iter.req);
745+
if (ret)
746+
goto end_cont_read;
747+
748+
ops->retlen += iter.req.datalen;
749+
750+
ret = spinand_read_status(spinand, &status);
751+
if (ret)
752+
goto end_cont_read;
753+
754+
spinand_ondie_ecc_save_status(nand, status);
755+
756+
ret = nand_ecc_finish_io_req(nand, &iter.req);
757+
if (ret < 0)
758+
goto end_cont_read;
759+
760+
*max_bitflips = max_t(unsigned int, *max_bitflips, ret);
761+
ret = 0;
762+
}
763+
764+
end_cont_read:
765+
/*
766+
* Once all the data has been read out, the host can either pull CS#
767+
* high and wait for tRST or manually clear the bit in the configuration
768+
* register to terminate the continuous read operation. We have no
769+
* guarantee the SPI controller drivers will effectively deassert the CS
770+
* when we expect them to, so take the register based approach.
771+
*/
772+
spinand_cont_read_enable(spinand, false);
773+
774+
return ret;
775+
}
776+
777+
static void spinand_cont_read_init(struct spinand_device *spinand)
778+
{
779+
struct nand_device *nand = spinand_to_nand(spinand);
780+
enum nand_ecc_engine_type engine_type = nand->ecc.ctx.conf.engine_type;
781+
782+
/* OOBs cannot be retrieved so external/on-host ECC engine won't work */
783+
if (spinand->set_cont_read &&
784+
(engine_type == NAND_ECC_ENGINE_TYPE_ON_DIE ||
785+
engine_type == NAND_ECC_ENGINE_TYPE_NONE)) {
786+
spinand->cont_read_possible = true;
787+
}
788+
}
789+
790+
static bool spinand_use_cont_read(struct mtd_info *mtd, loff_t from,
791+
struct mtd_oob_ops *ops)
792+
{
793+
struct nand_device *nand = mtd_to_nanddev(mtd);
794+
struct spinand_device *spinand = nand_to_spinand(nand);
795+
struct nand_pos start_pos, end_pos;
796+
797+
if (!spinand->cont_read_possible)
798+
return false;
799+
800+
/* OOBs won't be retrieved */
801+
if (ops->ooblen || ops->oobbuf)
802+
return false;
803+
804+
nanddev_offs_to_pos(nand, from, &start_pos);
805+
nanddev_offs_to_pos(nand, from + ops->len - 1, &end_pos);
806+
807+
/*
808+
* Continuous reads never cross LUN boundaries. Some devices don't
809+
* support crossing planes boundaries. Some devices don't even support
810+
* crossing blocks boundaries. The common case being to read through UBI,
811+
* we will very rarely read two consequent blocks or more, so it is safer
812+
* and easier (can be improved) to only enable continuous reads when
813+
* reading within the same erase block.
814+
*/
815+
if (start_pos.target != end_pos.target ||
816+
start_pos.plane != end_pos.plane ||
817+
start_pos.eraseblock != end_pos.eraseblock)
818+
return false;
819+
820+
return start_pos.page < end_pos.page;
821+
}
822+
675823
static int spinand_mtd_read(struct mtd_info *mtd, loff_t from,
676824
struct mtd_oob_ops *ops)
677825
{
@@ -684,7 +832,10 @@ static int spinand_mtd_read(struct mtd_info *mtd, loff_t from,
684832

685833
old_stats = mtd->ecc_stats;
686834

687-
ret = spinand_mtd_regular_page_read(mtd, from, ops, &max_bitflips);
835+
if (spinand_use_cont_read(mtd, from, ops))
836+
ret = spinand_mtd_continuous_page_read(mtd, from, ops, &max_bitflips);
837+
else
838+
ret = spinand_mtd_regular_page_read(mtd, from, ops, &max_bitflips);
688839

689840
if (ops->stats) {
690841
ops->stats->uncorrectable_errors +=
@@ -874,6 +1025,9 @@ static int spinand_create_dirmap(struct spinand_device *spinand,
8741025
};
8751026
struct spi_mem_dirmap_desc *desc;
8761027

1028+
if (spinand->cont_read_possible)
1029+
info.length = nanddev_eraseblock_size(nand);
1030+
8771031
/* The plane number is passed in MSB just above the column address */
8781032
info.offset = plane << fls(nand->memorg.pagesize);
8791033

@@ -1107,6 +1261,7 @@ int spinand_match_and_init(struct spinand_device *spinand,
11071261
spinand->flags = table[i].flags;
11081262
spinand->id.len = 1 + table[i].devid.len;
11091263
spinand->select_target = table[i].select_target;
1264+
spinand->set_cont_read = table[i].set_cont_read;
11101265

11111266
op = spinand_select_op_variant(spinand,
11121267
info->op_variants.read_cache);
@@ -1248,9 +1403,8 @@ static int spinand_init(struct spinand_device *spinand)
12481403
* may use this buffer for DMA access.
12491404
* Memory allocated by devm_ does not guarantee DMA-safe alignment.
12501405
*/
1251-
spinand->databuf = kzalloc(nanddev_page_size(nand) +
1252-
nanddev_per_page_oobsize(nand),
1253-
GFP_KERNEL);
1406+
spinand->databuf = kzalloc(nanddev_eraseblock_size(nand),
1407+
GFP_KERNEL);
12541408
if (!spinand->databuf) {
12551409
ret = -ENOMEM;
12561410
goto err_free_bufs;
@@ -1279,6 +1433,12 @@ static int spinand_init(struct spinand_device *spinand)
12791433
if (ret)
12801434
goto err_cleanup_nanddev;
12811435

1436+
/*
1437+
* Continuous read can only be enabled with an on-die ECC engine, so the
1438+
* ECC initialization must have happened previously.
1439+
*/
1440+
spinand_cont_read_init(spinand);
1441+
12821442
mtd->_read_oob = spinand_mtd_read;
12831443
mtd->_write_oob = spinand_mtd_write;
12841444
mtd->_block_isbad = spinand_mtd_block_isbad;

include/linux/mtd/spinand.h

Lines changed: 16 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -336,6 +336,7 @@ struct spinand_ondie_ecc_conf {
336336
* @op_variants.update_cache: variants of the update-cache operation
337337
* @select_target: function used to select a target/die. Required only for
338338
* multi-die chips
339+
* @set_cont_read: enable/disable continuous cached reads
339340
*
340341
* Each SPI NAND manufacturer driver should have a spinand_info table
341342
* describing all the chips supported by the driver.
@@ -354,6 +355,8 @@ struct spinand_info {
354355
} op_variants;
355356
int (*select_target)(struct spinand_device *spinand,
356357
unsigned int target);
358+
int (*set_cont_read)(struct spinand_device *spinand,
359+
bool enable);
357360
};
358361

359362
#define SPINAND_ID(__method, ...) \
@@ -379,6 +382,9 @@ struct spinand_info {
379382
#define SPINAND_SELECT_TARGET(__func) \
380383
.select_target = __func,
381384

385+
#define SPINAND_CONT_READ(__set_cont_read) \
386+
.set_cont_read = __set_cont_read,
387+
382388
#define SPINAND_INFO(__model, __id, __memorg, __eccreq, __op_variants, \
383389
__flags, ...) \
384390
{ \
@@ -422,6 +428,12 @@ struct spinand_dirmap {
422428
* passed in spi_mem_op be DMA-able, so we can't based the bufs on
423429
* the stack
424430
* @manufacturer: SPI NAND manufacturer information
431+
* @cont_read_possible: Field filled by the core once the whole system
432+
* configuration is known to tell whether continuous reads are
433+
* suitable to use or not in general with this chip/configuration.
434+
* A per-transfer check must of course be done to ensure it is
435+
* actually relevant to enable this feature.
436+
* @set_cont_read: Enable/disable the continuous read feature
425437
* @priv: manufacturer private data
426438
*/
427439
struct spinand_device {
@@ -451,6 +463,10 @@ struct spinand_device {
451463
u8 *scratchbuf;
452464
const struct spinand_manufacturer *manufacturer;
453465
void *priv;
466+
467+
bool cont_read_possible;
468+
int (*set_cont_read)(struct spinand_device *spinand,
469+
bool enable);
454470
};
455471

456472
/**

0 commit comments

Comments
 (0)