@@ -103,6 +103,8 @@ enum nand_page_io_req_type {
103
103
* @ooblen: the number of OOB bytes to read from/write to this page
104
104
* @oobbuf: buffer to store OOB data in or get OOB data from
105
105
* @mode: one of the %MTD_OPS_XXX mode
106
+ * @continuous: no need to start over the operation at the end of each page, the
107
+ * NAND device will automatically prepare the next one
106
108
*
107
109
* This object is used to pass per-page I/O requests to NAND sub-layers. This
108
110
* way all useful information are already formatted in a useful way and
@@ -125,6 +127,7 @@ struct nand_page_io_req {
125
127
void * in ;
126
128
} oobbuf ;
127
129
int mode ;
130
+ bool continuous ;
128
131
};
129
132
130
133
const struct mtd_ooblayout_ops * nand_get_small_page_ooblayout (void );
@@ -937,6 +940,43 @@ static inline void nanddev_io_page_iter_init(struct nand_device *nand,
937
940
iter -> req .ooblen = min_t (unsigned int ,
938
941
iter -> oobbytes_per_page - iter -> req .ooboffs ,
939
942
iter -> oobleft );
943
+ iter -> req .continuous = false;
944
+ }
945
+
946
+ /**
947
+ * nand_io_block_iter_init - Initialize a NAND I/O iterator
948
+ * @nand: NAND device
949
+ * @offs: absolute offset
950
+ * @req: MTD request
951
+ * @iter: NAND I/O iterator
952
+ *
953
+ * Initializes a NAND iterator based on the information passed by the MTD
954
+ * layer for block jumps (no OOB)
955
+ *
956
+ * In practice only reads may leverage this iterator.
957
+ */
958
+ static inline void nanddev_io_block_iter_init (struct nand_device * nand ,
959
+ enum nand_page_io_req_type reqtype ,
960
+ loff_t offs , struct mtd_oob_ops * req ,
961
+ struct nand_io_iter * iter )
962
+ {
963
+ unsigned int offs_in_eb ;
964
+
965
+ iter -> req .type = reqtype ;
966
+ iter -> req .mode = req -> mode ;
967
+ iter -> req .dataoffs = nanddev_offs_to_pos (nand , offs , & iter -> req .pos );
968
+ iter -> req .ooboffs = 0 ;
969
+ iter -> oobbytes_per_page = 0 ;
970
+ iter -> dataleft = req -> len ;
971
+ iter -> oobleft = 0 ;
972
+ iter -> req .databuf .in = req -> datbuf ;
973
+ offs_in_eb = (nand -> memorg .pagesize * iter -> req .pos .page ) + iter -> req .dataoffs ;
974
+ iter -> req .datalen = min_t (unsigned int ,
975
+ nanddev_eraseblock_size (nand ) - offs_in_eb ,
976
+ iter -> dataleft );
977
+ iter -> req .oobbuf .in = NULL ;
978
+ iter -> req .ooblen = 0 ;
979
+ iter -> req .continuous = true;
940
980
}
941
981
942
982
/**
@@ -962,6 +1002,25 @@ static inline void nanddev_io_iter_next_page(struct nand_device *nand,
962
1002
iter -> oobleft );
963
1003
}
964
1004
1005
+ /**
1006
+ * nand_io_iter_next_block - Move to the next block
1007
+ * @nand: NAND device
1008
+ * @iter: NAND I/O iterator
1009
+ *
1010
+ * Updates the @iter to point to the next block.
1011
+ * No OOB handling available.
1012
+ */
1013
+ static inline void nanddev_io_iter_next_block (struct nand_device * nand ,
1014
+ struct nand_io_iter * iter )
1015
+ {
1016
+ nanddev_pos_next_eraseblock (nand , & iter -> req .pos );
1017
+ iter -> dataleft -= iter -> req .datalen ;
1018
+ iter -> req .databuf .in += iter -> req .datalen ;
1019
+ iter -> req .dataoffs = 0 ;
1020
+ iter -> req .datalen = min_t (unsigned int , nanddev_eraseblock_size (nand ),
1021
+ iter -> dataleft );
1022
+ }
1023
+
965
1024
/**
966
1025
* nand_io_iter_end - Should end iteration or not
967
1026
* @nand: NAND device
@@ -997,6 +1056,21 @@ static inline bool nanddev_io_iter_end(struct nand_device *nand,
997
1056
!nanddev_io_iter_end(nand, iter); \
998
1057
nanddev_io_iter_next_page(nand, iter))
999
1058
1059
+ /**
1060
+ * nand_io_for_each_block - Iterate over all NAND pages contained in an MTD I/O
1061
+ * request, one block at a time
1062
+ * @nand: NAND device
1063
+ * @start: start address to read/write from
1064
+ * @req: MTD I/O request
1065
+ * @iter: NAND I/O iterator
1066
+ *
1067
+ * Should be used for iterating over blocks that are contained in an MTD request.
1068
+ */
1069
+ #define nanddev_io_for_each_block (nand , type , start , req , iter ) \
1070
+ for (nanddev_io_block_iter_init(nand, type, start, req, iter); \
1071
+ !nanddev_io_iter_end(nand, iter); \
1072
+ nanddev_io_iter_next_block(nand, iter))
1073
+
1000
1074
bool nanddev_isbad (struct nand_device * nand , const struct nand_pos * pos );
1001
1075
bool nanddev_isreserved (struct nand_device * nand , const struct nand_pos * pos );
1002
1076
int nanddev_markbad (struct nand_device * nand , const struct nand_pos * pos );
0 commit comments