Commit 14772d11c4674adab8eea0132988a6f5e1f425dd

Authored by Alice Guo
Committed by Ye Li
1 parent 4886ec745e

MLK-22580-1: nand: mxs_nand: make imx8mm-evk can use hardware BCH and

randomizer

imx8mm-evk needs to BCH encode and set NAND page number needed to be
randomized

modify conditional compilation

Should use CONFIG_IMX8M, it should apply to imx8mq/mm/mn

Signed-off-by: Alice Guo <alice.guo@nxp.com>
(cherry picked from commit da40cd99e4b3a78d2609ee777d60d651d6dbc313)

Showing 1 changed file with 2 additions and 2 deletions Inline Diff

drivers/mtd/nand/raw/mxs_nand.c
1 // SPDX-License-Identifier: GPL-2.0+ 1 // SPDX-License-Identifier: GPL-2.0+
2 /* 2 /*
3 * Freescale i.MX28 NAND flash driver 3 * Freescale i.MX28 NAND flash driver
4 * 4 *
5 * Copyright (C) 2011 Marek Vasut <marek.vasut@gmail.com> 5 * Copyright (C) 2011 Marek Vasut <marek.vasut@gmail.com>
6 * on behalf of DENX Software Engineering GmbH 6 * on behalf of DENX Software Engineering GmbH
7 * 7 *
8 * Based on code from LTIB: 8 * Based on code from LTIB:
9 * Freescale GPMI NFC NAND Flash Driver 9 * Freescale GPMI NFC NAND Flash Driver
10 * 10 *
11 * Copyright (C) 2010 Freescale Semiconductor, Inc. 11 * Copyright (C) 2010 Freescale Semiconductor, Inc.
12 * Copyright (C) 2008 Embedded Alley Solutions, Inc. 12 * Copyright (C) 2008 Embedded Alley Solutions, Inc.
13 * Copyright 2017-2019 NXP 13 * Copyright 2017-2019 NXP
14 */ 14 */
15 15
16 #include <common.h> 16 #include <common.h>
17 #include <dm.h> 17 #include <dm.h>
18 #include <linux/mtd/rawnand.h> 18 #include <linux/mtd/rawnand.h>
19 #include <linux/sizes.h> 19 #include <linux/sizes.h>
20 #include <linux/types.h> 20 #include <linux/types.h>
21 #include <malloc.h> 21 #include <malloc.h>
22 #include <linux/errno.h> 22 #include <linux/errno.h>
23 #include <asm/io.h> 23 #include <asm/io.h>
24 #include <asm/arch/clock.h> 24 #include <asm/arch/clock.h>
25 #include <asm/arch/imx-regs.h> 25 #include <asm/arch/imx-regs.h>
26 #include <asm/mach-imx/regs-bch.h> 26 #include <asm/mach-imx/regs-bch.h>
27 #include <asm/mach-imx/regs-gpmi.h> 27 #include <asm/mach-imx/regs-gpmi.h>
28 #include <asm/arch/sys_proto.h> 28 #include <asm/arch/sys_proto.h>
29 #include <mxs_nand.h> 29 #include <mxs_nand.h>
30 30
31 #define MXS_NAND_DMA_DESCRIPTOR_COUNT 4 31 #define MXS_NAND_DMA_DESCRIPTOR_COUNT 4
32 32
33 #if (defined(CONFIG_MX6) || defined(CONFIG_MX7) || defined(CONFIG_IMX8) || defined(CONFIG_IMX8M)) 33 #if (defined(CONFIG_MX6) || defined(CONFIG_MX7) || defined(CONFIG_IMX8) || defined(CONFIG_IMX8M))
34 #define MXS_NAND_CHUNK_DATA_CHUNK_SIZE_SHIFT 2 34 #define MXS_NAND_CHUNK_DATA_CHUNK_SIZE_SHIFT 2
35 #else 35 #else
36 #define MXS_NAND_CHUNK_DATA_CHUNK_SIZE_SHIFT 0 36 #define MXS_NAND_CHUNK_DATA_CHUNK_SIZE_SHIFT 0
37 #endif 37 #endif
38 #define MXS_NAND_METADATA_SIZE 10 38 #define MXS_NAND_METADATA_SIZE 10
39 #define MXS_NAND_BITS_PER_ECC_LEVEL 13 39 #define MXS_NAND_BITS_PER_ECC_LEVEL 13
40 40
41 #if !defined(CONFIG_SYS_CACHELINE_SIZE) || CONFIG_SYS_CACHELINE_SIZE < 32 41 #if !defined(CONFIG_SYS_CACHELINE_SIZE) || CONFIG_SYS_CACHELINE_SIZE < 32
42 #define MXS_NAND_COMMAND_BUFFER_SIZE 32 42 #define MXS_NAND_COMMAND_BUFFER_SIZE 32
43 #else 43 #else
44 #define MXS_NAND_COMMAND_BUFFER_SIZE CONFIG_SYS_CACHELINE_SIZE 44 #define MXS_NAND_COMMAND_BUFFER_SIZE CONFIG_SYS_CACHELINE_SIZE
45 #endif 45 #endif
46 46
47 #define MXS_NAND_BCH_TIMEOUT 10000 47 #define MXS_NAND_BCH_TIMEOUT 10000
48 48
49 struct nand_ecclayout fake_ecc_layout; 49 struct nand_ecclayout fake_ecc_layout;
50 50
51 /* 51 /*
52 * Cache management functions 52 * Cache management functions
53 */ 53 */
54 #ifndef CONFIG_SYS_DCACHE_OFF 54 #ifndef CONFIG_SYS_DCACHE_OFF
55 static void mxs_nand_flush_data_buf(struct mxs_nand_info *info) 55 static void mxs_nand_flush_data_buf(struct mxs_nand_info *info)
56 { 56 {
57 uint32_t addr = (uintptr_t)info->data_buf; 57 uint32_t addr = (uintptr_t)info->data_buf;
58 58
59 flush_dcache_range(addr, addr + info->data_buf_size); 59 flush_dcache_range(addr, addr + info->data_buf_size);
60 } 60 }
61 61
62 static void mxs_nand_inval_data_buf(struct mxs_nand_info *info) 62 static void mxs_nand_inval_data_buf(struct mxs_nand_info *info)
63 { 63 {
64 uint32_t addr = (uintptr_t)info->data_buf; 64 uint32_t addr = (uintptr_t)info->data_buf;
65 65
66 invalidate_dcache_range(addr, addr + info->data_buf_size); 66 invalidate_dcache_range(addr, addr + info->data_buf_size);
67 } 67 }
68 68
69 static void mxs_nand_flush_cmd_buf(struct mxs_nand_info *info) 69 static void mxs_nand_flush_cmd_buf(struct mxs_nand_info *info)
70 { 70 {
71 uint32_t addr = (uintptr_t)info->cmd_buf; 71 uint32_t addr = (uintptr_t)info->cmd_buf;
72 72
73 flush_dcache_range(addr, addr + MXS_NAND_COMMAND_BUFFER_SIZE); 73 flush_dcache_range(addr, addr + MXS_NAND_COMMAND_BUFFER_SIZE);
74 } 74 }
75 #else 75 #else
76 static inline void mxs_nand_flush_data_buf(struct mxs_nand_info *info) {} 76 static inline void mxs_nand_flush_data_buf(struct mxs_nand_info *info) {}
77 static inline void mxs_nand_inval_data_buf(struct mxs_nand_info *info) {} 77 static inline void mxs_nand_inval_data_buf(struct mxs_nand_info *info) {}
78 static inline void mxs_nand_flush_cmd_buf(struct mxs_nand_info *info) {} 78 static inline void mxs_nand_flush_cmd_buf(struct mxs_nand_info *info) {}
79 #endif 79 #endif
80 80
81 static struct mxs_dma_desc *mxs_nand_get_dma_desc(struct mxs_nand_info *info) 81 static struct mxs_dma_desc *mxs_nand_get_dma_desc(struct mxs_nand_info *info)
82 { 82 {
83 struct mxs_dma_desc *desc; 83 struct mxs_dma_desc *desc;
84 84
85 if (info->desc_index >= MXS_NAND_DMA_DESCRIPTOR_COUNT) { 85 if (info->desc_index >= MXS_NAND_DMA_DESCRIPTOR_COUNT) {
86 printf("MXS NAND: Too many DMA descriptors requested\n"); 86 printf("MXS NAND: Too many DMA descriptors requested\n");
87 return NULL; 87 return NULL;
88 } 88 }
89 89
90 desc = info->desc[info->desc_index]; 90 desc = info->desc[info->desc_index];
91 info->desc_index++; 91 info->desc_index++;
92 92
93 return desc; 93 return desc;
94 } 94 }
95 95
96 static void mxs_nand_return_dma_descs(struct mxs_nand_info *info) 96 static void mxs_nand_return_dma_descs(struct mxs_nand_info *info)
97 { 97 {
98 int i; 98 int i;
99 struct mxs_dma_desc *desc; 99 struct mxs_dma_desc *desc;
100 100
101 for (i = 0; i < info->desc_index; i++) { 101 for (i = 0; i < info->desc_index; i++) {
102 desc = info->desc[i]; 102 desc = info->desc[i];
103 memset(desc, 0, sizeof(struct mxs_dma_desc)); 103 memset(desc, 0, sizeof(struct mxs_dma_desc));
104 desc->address = (dma_addr_t)desc; 104 desc->address = (dma_addr_t)desc;
105 } 105 }
106 106
107 info->desc_index = 0; 107 info->desc_index = 0;
108 } 108 }
109 109
110 static uint32_t mxs_nand_aux_status_offset(void) 110 static uint32_t mxs_nand_aux_status_offset(void)
111 { 111 {
112 return (MXS_NAND_METADATA_SIZE + 0x3) & ~0x3; 112 return (MXS_NAND_METADATA_SIZE + 0x3) & ~0x3;
113 } 113 }
114 114
115 static inline bool mxs_nand_bbm_in_data_chunk(struct bch_geometry *geo, struct mtd_info *mtd, 115 static inline bool mxs_nand_bbm_in_data_chunk(struct bch_geometry *geo, struct mtd_info *mtd,
116 unsigned int *chunk_num) 116 unsigned int *chunk_num)
117 { 117 {
118 unsigned int i, j; 118 unsigned int i, j;
119 119
120 if (geo->ecc_chunk0_size != geo->ecc_chunkn_size) { 120 if (geo->ecc_chunk0_size != geo->ecc_chunkn_size) {
121 dev_err(this->dev, "The size of chunk0 must equal to chunkn\n"); 121 dev_err(this->dev, "The size of chunk0 must equal to chunkn\n");
122 return false; 122 return false;
123 } 123 }
124 124
125 i = (mtd->writesize * 8 - MXS_NAND_METADATA_SIZE * 8) / 125 i = (mtd->writesize * 8 - MXS_NAND_METADATA_SIZE * 8) /
126 (geo->gf_len * geo->ecc_strength + 126 (geo->gf_len * geo->ecc_strength +
127 geo->ecc_chunkn_size * 8); 127 geo->ecc_chunkn_size * 8);
128 128
129 j = (mtd->writesize * 8 - MXS_NAND_METADATA_SIZE * 8) - 129 j = (mtd->writesize * 8 - MXS_NAND_METADATA_SIZE * 8) -
130 (geo->gf_len * geo->ecc_strength + 130 (geo->gf_len * geo->ecc_strength +
131 geo->ecc_chunkn_size * 8) * i; 131 geo->ecc_chunkn_size * 8) * i;
132 132
133 if (j < geo->ecc_chunkn_size * 8) { 133 if (j < geo->ecc_chunkn_size * 8) {
134 *chunk_num = i+1; 134 *chunk_num = i+1;
135 dev_dbg(this->dev, "Set ecc to %d and bbm in chunk %d\n", 135 dev_dbg(this->dev, "Set ecc to %d and bbm in chunk %d\n",
136 geo->ecc_strength, *chunk_num); 136 geo->ecc_strength, *chunk_num);
137 return true; 137 return true;
138 } 138 }
139 139
140 return false; 140 return false;
141 } 141 }
142 142
143 static inline int mxs_nand_calc_ecc_layout_by_info(struct bch_geometry *geo, 143 static inline int mxs_nand_calc_ecc_layout_by_info(struct bch_geometry *geo,
144 struct mtd_info *mtd, 144 struct mtd_info *mtd,
145 unsigned int ecc_strength, 145 unsigned int ecc_strength,
146 unsigned int ecc_step) 146 unsigned int ecc_step)
147 { 147 {
148 struct nand_chip *chip = mtd_to_nand(mtd); 148 struct nand_chip *chip = mtd_to_nand(mtd);
149 struct mxs_nand_info *nand_info = nand_get_controller_data(chip); 149 struct mxs_nand_info *nand_info = nand_get_controller_data(chip);
150 unsigned int block_mark_bit_offset; 150 unsigned int block_mark_bit_offset;
151 151
152 switch (ecc_step) { 152 switch (ecc_step) {
153 case SZ_512: 153 case SZ_512:
154 geo->gf_len = 13; 154 geo->gf_len = 13;
155 break; 155 break;
156 case SZ_1K: 156 case SZ_1K:
157 geo->gf_len = 14; 157 geo->gf_len = 14;
158 break; 158 break;
159 default: 159 default:
160 return -EINVAL; 160 return -EINVAL;
161 } 161 }
162 162
163 geo->ecc_chunk0_size = ecc_step; 163 geo->ecc_chunk0_size = ecc_step;
164 geo->ecc_chunkn_size = ecc_step; 164 geo->ecc_chunkn_size = ecc_step;
165 geo->ecc_strength = round_up(ecc_strength, 2); 165 geo->ecc_strength = round_up(ecc_strength, 2);
166 166
167 /* Keep the C >= O */ 167 /* Keep the C >= O */
168 if (geo->ecc_chunkn_size < mtd->oobsize) 168 if (geo->ecc_chunkn_size < mtd->oobsize)
169 return -EINVAL; 169 return -EINVAL;
170 170
171 if (geo->ecc_strength > nand_info->max_ecc_strength_supported) 171 if (geo->ecc_strength > nand_info->max_ecc_strength_supported)
172 return -EINVAL; 172 return -EINVAL;
173 173
174 geo->ecc_chunk_count = mtd->writesize / geo->ecc_chunkn_size; 174 geo->ecc_chunk_count = mtd->writesize / geo->ecc_chunkn_size;
175 175
176 /* For bit swap. */ 176 /* For bit swap. */
177 block_mark_bit_offset = mtd->writesize * 8 - 177 block_mark_bit_offset = mtd->writesize * 8 -
178 (geo->ecc_strength * geo->gf_len * (geo->ecc_chunk_count - 1) 178 (geo->ecc_strength * geo->gf_len * (geo->ecc_chunk_count - 1)
179 + MXS_NAND_METADATA_SIZE * 8); 179 + MXS_NAND_METADATA_SIZE * 8);
180 180
181 geo->block_mark_byte_offset = block_mark_bit_offset / 8; 181 geo->block_mark_byte_offset = block_mark_bit_offset / 8;
182 geo->block_mark_bit_offset = block_mark_bit_offset % 8; 182 geo->block_mark_bit_offset = block_mark_bit_offset % 8;
183 183
184 return 0; 184 return 0;
185 } 185 }
186 186
187 static inline int mxs_nand_legacy_calc_ecc_layout(struct bch_geometry *geo, 187 static inline int mxs_nand_legacy_calc_ecc_layout(struct bch_geometry *geo,
188 struct mtd_info *mtd) 188 struct mtd_info *mtd)
189 { 189 {
190 struct nand_chip *chip = mtd_to_nand(mtd); 190 struct nand_chip *chip = mtd_to_nand(mtd);
191 struct mxs_nand_info *nand_info = nand_get_controller_data(chip); 191 struct mxs_nand_info *nand_info = nand_get_controller_data(chip);
192 unsigned int block_mark_bit_offset; 192 unsigned int block_mark_bit_offset;
193 193
194 /* The default for the length of Galois Field. */ 194 /* The default for the length of Galois Field. */
195 geo->gf_len = 13; 195 geo->gf_len = 13;
196 196
197 /* The default for chunk size. */ 197 /* The default for chunk size. */
198 geo->ecc_chunk0_size = 512; 198 geo->ecc_chunk0_size = 512;
199 geo->ecc_chunkn_size = 512; 199 geo->ecc_chunkn_size = 512;
200 200
201 if (geo->ecc_chunkn_size < mtd->oobsize) { 201 if (geo->ecc_chunkn_size < mtd->oobsize) {
202 geo->gf_len = 14; 202 geo->gf_len = 14;
203 geo->ecc_chunk0_size *= 2; 203 geo->ecc_chunk0_size *= 2;
204 geo->ecc_chunkn_size *= 2; 204 geo->ecc_chunkn_size *= 2;
205 } 205 }
206 206
207 geo->ecc_chunk_count = mtd->writesize / geo->ecc_chunkn_size; 207 geo->ecc_chunk_count = mtd->writesize / geo->ecc_chunkn_size;
208 208
209 /* 209 /*
210 * Determine the ECC layout with the formula: 210 * Determine the ECC layout with the formula:
211 * ECC bits per chunk = (total page spare data bits) / 211 * ECC bits per chunk = (total page spare data bits) /
212 * (bits per ECC level) / (chunks per page) 212 * (bits per ECC level) / (chunks per page)
213 * where: 213 * where:
214 * total page spare data bits = 214 * total page spare data bits =
215 * (page oob size - meta data size) * (bits per byte) 215 * (page oob size - meta data size) * (bits per byte)
216 */ 216 */
217 geo->ecc_strength = ((mtd->oobsize - MXS_NAND_METADATA_SIZE) * 8) 217 geo->ecc_strength = ((mtd->oobsize - MXS_NAND_METADATA_SIZE) * 8)
218 / (geo->gf_len * geo->ecc_chunk_count); 218 / (geo->gf_len * geo->ecc_chunk_count);
219 219
220 geo->ecc_strength = min(round_down(geo->ecc_strength, 2), 220 geo->ecc_strength = min(round_down(geo->ecc_strength, 2),
221 nand_info->max_ecc_strength_supported); 221 nand_info->max_ecc_strength_supported);
222 222
223 block_mark_bit_offset = mtd->writesize * 8 - 223 block_mark_bit_offset = mtd->writesize * 8 -
224 (geo->ecc_strength * geo->gf_len * (geo->ecc_chunk_count - 1) 224 (geo->ecc_strength * geo->gf_len * (geo->ecc_chunk_count - 1)
225 + MXS_NAND_METADATA_SIZE * 8); 225 + MXS_NAND_METADATA_SIZE * 8);
226 226
227 geo->block_mark_byte_offset = block_mark_bit_offset / 8; 227 geo->block_mark_byte_offset = block_mark_bit_offset / 8;
228 geo->block_mark_bit_offset = block_mark_bit_offset % 8; 228 geo->block_mark_bit_offset = block_mark_bit_offset % 8;
229 229
230 return 0; 230 return 0;
231 } 231 }
232 232
233 static inline int mxs_nand_calc_ecc_for_large_oob(struct bch_geometry *geo, 233 static inline int mxs_nand_calc_ecc_for_large_oob(struct bch_geometry *geo,
234 struct mtd_info *mtd) 234 struct mtd_info *mtd)
235 { 235 {
236 struct nand_chip *chip = mtd_to_nand(mtd); 236 struct nand_chip *chip = mtd_to_nand(mtd);
237 struct mxs_nand_info *nand_info = nand_get_controller_data(chip); 237 struct mxs_nand_info *nand_info = nand_get_controller_data(chip);
238 unsigned int block_mark_bit_offset; 238 unsigned int block_mark_bit_offset;
239 unsigned int max_ecc; 239 unsigned int max_ecc;
240 unsigned int bbm_chunk; 240 unsigned int bbm_chunk;
241 unsigned int i; 241 unsigned int i;
242 242
243 /* sanity check for the minimum ecc nand required */ 243 /* sanity check for the minimum ecc nand required */
244 if (!(chip->ecc_strength_ds > 0 && chip->ecc_step_ds > 0)) 244 if (!(chip->ecc_strength_ds > 0 && chip->ecc_step_ds > 0))
245 return -EINVAL; 245 return -EINVAL;
246 geo->ecc_strength = chip->ecc_strength_ds; 246 geo->ecc_strength = chip->ecc_strength_ds;
247 247
248 /* calculate the maximum ecc platform can support*/ 248 /* calculate the maximum ecc platform can support*/
249 geo->gf_len = 14; 249 geo->gf_len = 14;
250 geo->ecc_chunk0_size = 1024; 250 geo->ecc_chunk0_size = 1024;
251 geo->ecc_chunkn_size = 1024; 251 geo->ecc_chunkn_size = 1024;
252 geo->ecc_chunk_count = mtd->writesize / geo->ecc_chunkn_size; 252 geo->ecc_chunk_count = mtd->writesize / geo->ecc_chunkn_size;
253 max_ecc = ((mtd->oobsize - MXS_NAND_METADATA_SIZE) * 8) 253 max_ecc = ((mtd->oobsize - MXS_NAND_METADATA_SIZE) * 8)
254 / (geo->gf_len * geo->ecc_chunk_count); 254 / (geo->gf_len * geo->ecc_chunk_count);
255 max_ecc = min(round_down(max_ecc, 2), 255 max_ecc = min(round_down(max_ecc, 2),
256 nand_info->max_ecc_strength_supported); 256 nand_info->max_ecc_strength_supported);
257 257
258 258
259 /* search a supported ecc strength that makes bbm */ 259 /* search a supported ecc strength that makes bbm */
260 /* located in data chunk */ 260 /* located in data chunk */
261 geo->ecc_strength = chip->ecc_strength_ds; 261 geo->ecc_strength = chip->ecc_strength_ds;
262 while (!(geo->ecc_strength > max_ecc)) { 262 while (!(geo->ecc_strength > max_ecc)) {
263 if (mxs_nand_bbm_in_data_chunk(geo, mtd, &bbm_chunk)) 263 if (mxs_nand_bbm_in_data_chunk(geo, mtd, &bbm_chunk))
264 break; 264 break;
265 geo->ecc_strength += 2; 265 geo->ecc_strength += 2;
266 } 266 }
267 267
268 /* if none of them works, keep using the minimum ecc */ 268 /* if none of them works, keep using the minimum ecc */
269 /* nand required but changing ecc page layout */ 269 /* nand required but changing ecc page layout */
270 if (geo->ecc_strength > max_ecc) { 270 if (geo->ecc_strength > max_ecc) {
271 geo->ecc_strength = chip->ecc_strength_ds; 271 geo->ecc_strength = chip->ecc_strength_ds;
272 /* add extra ecc for meta data */ 272 /* add extra ecc for meta data */
273 geo->ecc_chunk0_size = 0; 273 geo->ecc_chunk0_size = 0;
274 geo->ecc_chunk_count = (mtd->writesize / geo->ecc_chunkn_size) + 1; 274 geo->ecc_chunk_count = (mtd->writesize / geo->ecc_chunkn_size) + 1;
275 geo->ecc_for_meta = 1; 275 geo->ecc_for_meta = 1;
276 /* check if oob can afford this extra ecc chunk */ 276 /* check if oob can afford this extra ecc chunk */
277 if (mtd->oobsize * 8 < MXS_NAND_METADATA_SIZE * 8 + 277 if (mtd->oobsize * 8 < MXS_NAND_METADATA_SIZE * 8 +
278 geo->gf_len * geo->ecc_strength 278 geo->gf_len * geo->ecc_strength
279 * geo->ecc_chunk_count) { 279 * geo->ecc_chunk_count) {
280 printf("unsupported NAND chip with new layout\n"); 280 printf("unsupported NAND chip with new layout\n");
281 return -EINVAL; 281 return -EINVAL;
282 } 282 }
283 283
284 /* calculate in which chunk bbm located */ 284 /* calculate in which chunk bbm located */
285 bbm_chunk = (mtd->writesize * 8 - MXS_NAND_METADATA_SIZE * 8 - 285 bbm_chunk = (mtd->writesize * 8 - MXS_NAND_METADATA_SIZE * 8 -
286 geo->gf_len * geo->ecc_strength) / 286 geo->gf_len * geo->ecc_strength) /
287 (geo->gf_len * geo->ecc_strength + 287 (geo->gf_len * geo->ecc_strength +
288 geo->ecc_chunkn_size * 8) + 1; 288 geo->ecc_chunkn_size * 8) + 1;
289 } 289 }
290 290
291 /* calculate the number of ecc chunk behind the bbm */ 291 /* calculate the number of ecc chunk behind the bbm */
292 i = (mtd->writesize / geo->ecc_chunkn_size) - bbm_chunk + 1; 292 i = (mtd->writesize / geo->ecc_chunkn_size) - bbm_chunk + 1;
293 293
294 block_mark_bit_offset = mtd->writesize * 8 - 294 block_mark_bit_offset = mtd->writesize * 8 -
295 (geo->ecc_strength * geo->gf_len * (geo->ecc_chunk_count - i) 295 (geo->ecc_strength * geo->gf_len * (geo->ecc_chunk_count - i)
296 + MXS_NAND_METADATA_SIZE * 8); 296 + MXS_NAND_METADATA_SIZE * 8);
297 297
298 geo->block_mark_byte_offset = block_mark_bit_offset / 8; 298 geo->block_mark_byte_offset = block_mark_bit_offset / 8;
299 geo->block_mark_bit_offset = block_mark_bit_offset % 8; 299 geo->block_mark_bit_offset = block_mark_bit_offset % 8;
300 300
301 return 0; 301 return 0;
302 } 302 }
303 303
304 /* 304 /*
305 * Wait for BCH complete IRQ and clear the IRQ 305 * Wait for BCH complete IRQ and clear the IRQ
306 */ 306 */
307 static int mxs_nand_wait_for_bch_complete(struct mxs_nand_info *nand_info) 307 static int mxs_nand_wait_for_bch_complete(struct mxs_nand_info *nand_info)
308 { 308 {
309 int timeout = MXS_NAND_BCH_TIMEOUT; 309 int timeout = MXS_NAND_BCH_TIMEOUT;
310 int ret; 310 int ret;
311 311
312 ret = mxs_wait_mask_set(&nand_info->bch_regs->hw_bch_ctrl_reg, 312 ret = mxs_wait_mask_set(&nand_info->bch_regs->hw_bch_ctrl_reg,
313 BCH_CTRL_COMPLETE_IRQ, timeout); 313 BCH_CTRL_COMPLETE_IRQ, timeout);
314 314
315 writel(BCH_CTRL_COMPLETE_IRQ, &nand_info->bch_regs->hw_bch_ctrl_clr); 315 writel(BCH_CTRL_COMPLETE_IRQ, &nand_info->bch_regs->hw_bch_ctrl_clr);
316 316
317 return ret; 317 return ret;
318 } 318 }
319 319
320 /* 320 /*
321 * This is the function that we install in the cmd_ctrl function pointer of the 321 * This is the function that we install in the cmd_ctrl function pointer of the
322 * owning struct nand_chip. The only functions in the reference implementation 322 * owning struct nand_chip. The only functions in the reference implementation
323 * that use these functions pointers are cmdfunc and select_chip. 323 * that use these functions pointers are cmdfunc and select_chip.
324 * 324 *
325 * In this driver, we implement our own select_chip, so this function will only 325 * In this driver, we implement our own select_chip, so this function will only
326 * be called by the reference implementation's cmdfunc. For this reason, we can 326 * be called by the reference implementation's cmdfunc. For this reason, we can
327 * ignore the chip enable bit and concentrate only on sending bytes to the NAND 327 * ignore the chip enable bit and concentrate only on sending bytes to the NAND
328 * Flash. 328 * Flash.
329 */ 329 */
330 static void mxs_nand_cmd_ctrl(struct mtd_info *mtd, int data, unsigned int ctrl) 330 static void mxs_nand_cmd_ctrl(struct mtd_info *mtd, int data, unsigned int ctrl)
331 { 331 {
332 struct nand_chip *nand = mtd_to_nand(mtd); 332 struct nand_chip *nand = mtd_to_nand(mtd);
333 struct mxs_nand_info *nand_info = nand_get_controller_data(nand); 333 struct mxs_nand_info *nand_info = nand_get_controller_data(nand);
334 struct mxs_dma_desc *d; 334 struct mxs_dma_desc *d;
335 uint32_t channel = MXS_DMA_CHANNEL_AHB_APBH_GPMI0 + nand_info->cur_chip; 335 uint32_t channel = MXS_DMA_CHANNEL_AHB_APBH_GPMI0 + nand_info->cur_chip;
336 int ret; 336 int ret;
337 337
338 /* 338 /*
339 * If this condition is true, something is _VERY_ wrong in MTD 339 * If this condition is true, something is _VERY_ wrong in MTD
340 * subsystem! 340 * subsystem!
341 */ 341 */
342 if (nand_info->cmd_queue_len == MXS_NAND_COMMAND_BUFFER_SIZE) { 342 if (nand_info->cmd_queue_len == MXS_NAND_COMMAND_BUFFER_SIZE) {
343 printf("MXS NAND: Command queue too long\n"); 343 printf("MXS NAND: Command queue too long\n");
344 return; 344 return;
345 } 345 }
346 346
347 /* 347 /*
348 * Every operation begins with a command byte and a series of zero or 348 * Every operation begins with a command byte and a series of zero or
349 * more address bytes. These are distinguished by either the Address 349 * more address bytes. These are distinguished by either the Address
350 * Latch Enable (ALE) or Command Latch Enable (CLE) signals being 350 * Latch Enable (ALE) or Command Latch Enable (CLE) signals being
351 * asserted. When MTD is ready to execute the command, it will 351 * asserted. When MTD is ready to execute the command, it will
352 * deasert both latch enables. 352 * deasert both latch enables.
353 * 353 *
354 * Rather than run a separate DMA operation for every single byte, we 354 * Rather than run a separate DMA operation for every single byte, we
355 * queue them up and run a single DMA operation for the entire series 355 * queue them up and run a single DMA operation for the entire series
356 * of command and data bytes. 356 * of command and data bytes.
357 */ 357 */
358 if (ctrl & (NAND_ALE | NAND_CLE)) { 358 if (ctrl & (NAND_ALE | NAND_CLE)) {
359 if (data != NAND_CMD_NONE) 359 if (data != NAND_CMD_NONE)
360 nand_info->cmd_buf[nand_info->cmd_queue_len++] = data; 360 nand_info->cmd_buf[nand_info->cmd_queue_len++] = data;
361 return; 361 return;
362 } 362 }
363 363
364 /* 364 /*
365 * If control arrives here, MTD has deasserted both the ALE and CLE, 365 * If control arrives here, MTD has deasserted both the ALE and CLE,
366 * which means it's ready to run an operation. Check if we have any 366 * which means it's ready to run an operation. Check if we have any
367 * bytes to send. 367 * bytes to send.
368 */ 368 */
369 if (nand_info->cmd_queue_len == 0) 369 if (nand_info->cmd_queue_len == 0)
370 return; 370 return;
371 371
372 /* Compile the DMA descriptor -- a descriptor that sends command. */ 372 /* Compile the DMA descriptor -- a descriptor that sends command. */
373 d = mxs_nand_get_dma_desc(nand_info); 373 d = mxs_nand_get_dma_desc(nand_info);
374 d->cmd.data = 374 d->cmd.data =
375 MXS_DMA_DESC_COMMAND_DMA_READ | MXS_DMA_DESC_IRQ | 375 MXS_DMA_DESC_COMMAND_DMA_READ | MXS_DMA_DESC_IRQ |
376 MXS_DMA_DESC_CHAIN | MXS_DMA_DESC_DEC_SEM | 376 MXS_DMA_DESC_CHAIN | MXS_DMA_DESC_DEC_SEM |
377 MXS_DMA_DESC_WAIT4END | (3 << MXS_DMA_DESC_PIO_WORDS_OFFSET) | 377 MXS_DMA_DESC_WAIT4END | (3 << MXS_DMA_DESC_PIO_WORDS_OFFSET) |
378 (nand_info->cmd_queue_len << MXS_DMA_DESC_BYTES_OFFSET); 378 (nand_info->cmd_queue_len << MXS_DMA_DESC_BYTES_OFFSET);
379 379
380 d->cmd.address = (dma_addr_t)nand_info->cmd_buf; 380 d->cmd.address = (dma_addr_t)nand_info->cmd_buf;
381 381
382 d->cmd.pio_words[0] = 382 d->cmd.pio_words[0] =
383 GPMI_CTRL0_COMMAND_MODE_WRITE | 383 GPMI_CTRL0_COMMAND_MODE_WRITE |
384 GPMI_CTRL0_WORD_LENGTH | 384 GPMI_CTRL0_WORD_LENGTH |
385 (nand_info->cur_chip << GPMI_CTRL0_CS_OFFSET) | 385 (nand_info->cur_chip << GPMI_CTRL0_CS_OFFSET) |
386 GPMI_CTRL0_ADDRESS_NAND_CLE | 386 GPMI_CTRL0_ADDRESS_NAND_CLE |
387 GPMI_CTRL0_ADDRESS_INCREMENT | 387 GPMI_CTRL0_ADDRESS_INCREMENT |
388 nand_info->cmd_queue_len; 388 nand_info->cmd_queue_len;
389 389
390 mxs_dma_desc_append(channel, d); 390 mxs_dma_desc_append(channel, d);
391 391
392 /* Flush caches */ 392 /* Flush caches */
393 mxs_nand_flush_cmd_buf(nand_info); 393 mxs_nand_flush_cmd_buf(nand_info);
394 394
395 /* Execute the DMA chain. */ 395 /* Execute the DMA chain. */
396 ret = mxs_dma_go(channel); 396 ret = mxs_dma_go(channel);
397 if (ret) 397 if (ret)
398 printf("MXS NAND: Error sending command\n"); 398 printf("MXS NAND: Error sending command\n");
399 399
400 mxs_nand_return_dma_descs(nand_info); 400 mxs_nand_return_dma_descs(nand_info);
401 401
402 /* Reset the command queue. */ 402 /* Reset the command queue. */
403 nand_info->cmd_queue_len = 0; 403 nand_info->cmd_queue_len = 0;
404 } 404 }
405 405
406 /* 406 /*
407 * Test if the NAND flash is ready. 407 * Test if the NAND flash is ready.
408 */ 408 */
409 static int mxs_nand_device_ready(struct mtd_info *mtd) 409 static int mxs_nand_device_ready(struct mtd_info *mtd)
410 { 410 {
411 struct nand_chip *chip = mtd_to_nand(mtd); 411 struct nand_chip *chip = mtd_to_nand(mtd);
412 struct mxs_nand_info *nand_info = nand_get_controller_data(chip); 412 struct mxs_nand_info *nand_info = nand_get_controller_data(chip);
413 uint32_t tmp; 413 uint32_t tmp;
414 414
415 tmp = readl(&nand_info->gpmi_regs->hw_gpmi_stat); 415 tmp = readl(&nand_info->gpmi_regs->hw_gpmi_stat);
416 tmp >>= (GPMI_STAT_READY_BUSY_OFFSET + nand_info->cur_chip); 416 tmp >>= (GPMI_STAT_READY_BUSY_OFFSET + nand_info->cur_chip);
417 417
418 return tmp & 1; 418 return tmp & 1;
419 } 419 }
420 420
421 /* 421 /*
422 * Select the NAND chip. 422 * Select the NAND chip.
423 */ 423 */
424 static void mxs_nand_select_chip(struct mtd_info *mtd, int chip) 424 static void mxs_nand_select_chip(struct mtd_info *mtd, int chip)
425 { 425 {
426 struct nand_chip *nand = mtd_to_nand(mtd); 426 struct nand_chip *nand = mtd_to_nand(mtd);
427 struct mxs_nand_info *nand_info = nand_get_controller_data(nand); 427 struct mxs_nand_info *nand_info = nand_get_controller_data(nand);
428 428
429 nand_info->cur_chip = chip; 429 nand_info->cur_chip = chip;
430 } 430 }
431 431
432 /* 432 /*
433 * Handle block mark swapping. 433 * Handle block mark swapping.
434 * 434 *
435 * Note that, when this function is called, it doesn't know whether it's 435 * Note that, when this function is called, it doesn't know whether it's
436 * swapping the block mark, or swapping it *back* -- but it doesn't matter 436 * swapping the block mark, or swapping it *back* -- but it doesn't matter
437 * because the the operation is the same. 437 * because the the operation is the same.
438 */ 438 */
439 static void mxs_nand_swap_block_mark(struct bch_geometry *geo, 439 static void mxs_nand_swap_block_mark(struct bch_geometry *geo,
440 uint8_t *data_buf, uint8_t *oob_buf) 440 uint8_t *data_buf, uint8_t *oob_buf)
441 { 441 {
442 uint32_t bit_offset = geo->block_mark_bit_offset; 442 uint32_t bit_offset = geo->block_mark_bit_offset;
443 uint32_t buf_offset = geo->block_mark_byte_offset; 443 uint32_t buf_offset = geo->block_mark_byte_offset;
444 444
445 uint32_t src; 445 uint32_t src;
446 uint32_t dst; 446 uint32_t dst;
447 447
448 /* 448 /*
449 * Get the byte from the data area that overlays the block mark. Since 449 * Get the byte from the data area that overlays the block mark. Since
450 * the ECC engine applies its own view to the bits in the page, the 450 * the ECC engine applies its own view to the bits in the page, the
451 * physical block mark won't (in general) appear on a byte boundary in 451 * physical block mark won't (in general) appear on a byte boundary in
452 * the data. 452 * the data.
453 */ 453 */
454 src = data_buf[buf_offset] >> bit_offset; 454 src = data_buf[buf_offset] >> bit_offset;
455 src |= data_buf[buf_offset + 1] << (8 - bit_offset); 455 src |= data_buf[buf_offset + 1] << (8 - bit_offset);
456 456
457 dst = oob_buf[0]; 457 dst = oob_buf[0];
458 458
459 oob_buf[0] = src; 459 oob_buf[0] = src;
460 460
461 data_buf[buf_offset] &= ~(0xff << bit_offset); 461 data_buf[buf_offset] &= ~(0xff << bit_offset);
462 data_buf[buf_offset + 1] &= 0xff << bit_offset; 462 data_buf[buf_offset + 1] &= 0xff << bit_offset;
463 463
464 data_buf[buf_offset] |= dst << bit_offset; 464 data_buf[buf_offset] |= dst << bit_offset;
465 data_buf[buf_offset + 1] |= dst >> (8 - bit_offset); 465 data_buf[buf_offset + 1] |= dst >> (8 - bit_offset);
466 } 466 }
467 467
468 /* 468 /*
469 * Read data from NAND. 469 * Read data from NAND.
470 */ 470 */
471 static void mxs_nand_read_buf(struct mtd_info *mtd, uint8_t *buf, int length) 471 static void mxs_nand_read_buf(struct mtd_info *mtd, uint8_t *buf, int length)
472 { 472 {
473 struct nand_chip *nand = mtd_to_nand(mtd); 473 struct nand_chip *nand = mtd_to_nand(mtd);
474 struct mxs_nand_info *nand_info = nand_get_controller_data(nand); 474 struct mxs_nand_info *nand_info = nand_get_controller_data(nand);
475 struct mxs_dma_desc *d; 475 struct mxs_dma_desc *d;
476 uint32_t channel = MXS_DMA_CHANNEL_AHB_APBH_GPMI0 + nand_info->cur_chip; 476 uint32_t channel = MXS_DMA_CHANNEL_AHB_APBH_GPMI0 + nand_info->cur_chip;
477 int ret; 477 int ret;
478 478
479 if (length > NAND_MAX_PAGESIZE) { 479 if (length > NAND_MAX_PAGESIZE) {
480 printf("MXS NAND: DMA buffer too big\n"); 480 printf("MXS NAND: DMA buffer too big\n");
481 return; 481 return;
482 } 482 }
483 483
484 if (!buf) { 484 if (!buf) {
485 printf("MXS NAND: DMA buffer is NULL\n"); 485 printf("MXS NAND: DMA buffer is NULL\n");
486 return; 486 return;
487 } 487 }
488 488
489 /* Compile the DMA descriptor - a descriptor that reads data. */ 489 /* Compile the DMA descriptor - a descriptor that reads data. */
490 d = mxs_nand_get_dma_desc(nand_info); 490 d = mxs_nand_get_dma_desc(nand_info);
491 d->cmd.data = 491 d->cmd.data =
492 MXS_DMA_DESC_COMMAND_DMA_WRITE | MXS_DMA_DESC_IRQ | 492 MXS_DMA_DESC_COMMAND_DMA_WRITE | MXS_DMA_DESC_IRQ |
493 MXS_DMA_DESC_DEC_SEM | MXS_DMA_DESC_WAIT4END | 493 MXS_DMA_DESC_DEC_SEM | MXS_DMA_DESC_WAIT4END |
494 (1 << MXS_DMA_DESC_PIO_WORDS_OFFSET) | 494 (1 << MXS_DMA_DESC_PIO_WORDS_OFFSET) |
495 (length << MXS_DMA_DESC_BYTES_OFFSET); 495 (length << MXS_DMA_DESC_BYTES_OFFSET);
496 496
497 d->cmd.address = (dma_addr_t)nand_info->data_buf; 497 d->cmd.address = (dma_addr_t)nand_info->data_buf;
498 498
499 d->cmd.pio_words[0] = 499 d->cmd.pio_words[0] =
500 GPMI_CTRL0_COMMAND_MODE_READ | 500 GPMI_CTRL0_COMMAND_MODE_READ |
501 GPMI_CTRL0_WORD_LENGTH | 501 GPMI_CTRL0_WORD_LENGTH |
502 (nand_info->cur_chip << GPMI_CTRL0_CS_OFFSET) | 502 (nand_info->cur_chip << GPMI_CTRL0_CS_OFFSET) |
503 GPMI_CTRL0_ADDRESS_NAND_DATA | 503 GPMI_CTRL0_ADDRESS_NAND_DATA |
504 length; 504 length;
505 505
506 mxs_dma_desc_append(channel, d); 506 mxs_dma_desc_append(channel, d);
507 507
508 /* 508 /*
509 * A DMA descriptor that waits for the command to end and the chip to 509 * A DMA descriptor that waits for the command to end and the chip to
510 * become ready. 510 * become ready.
511 * 511 *
512 * I think we actually should *not* be waiting for the chip to become 512 * I think we actually should *not* be waiting for the chip to become
513 * ready because, after all, we don't care. I think the original code 513 * ready because, after all, we don't care. I think the original code
514 * did that and no one has re-thought it yet. 514 * did that and no one has re-thought it yet.
515 */ 515 */
516 d = mxs_nand_get_dma_desc(nand_info); 516 d = mxs_nand_get_dma_desc(nand_info);
517 d->cmd.data = 517 d->cmd.data =
518 MXS_DMA_DESC_COMMAND_NO_DMAXFER | MXS_DMA_DESC_IRQ | 518 MXS_DMA_DESC_COMMAND_NO_DMAXFER | MXS_DMA_DESC_IRQ |
519 MXS_DMA_DESC_NAND_WAIT_4_READY | MXS_DMA_DESC_DEC_SEM | 519 MXS_DMA_DESC_NAND_WAIT_4_READY | MXS_DMA_DESC_DEC_SEM |
520 MXS_DMA_DESC_WAIT4END | (1 << MXS_DMA_DESC_PIO_WORDS_OFFSET); 520 MXS_DMA_DESC_WAIT4END | (1 << MXS_DMA_DESC_PIO_WORDS_OFFSET);
521 521
522 d->cmd.address = 0; 522 d->cmd.address = 0;
523 523
524 d->cmd.pio_words[0] = 524 d->cmd.pio_words[0] =
525 GPMI_CTRL0_COMMAND_MODE_WAIT_FOR_READY | 525 GPMI_CTRL0_COMMAND_MODE_WAIT_FOR_READY |
526 GPMI_CTRL0_WORD_LENGTH | 526 GPMI_CTRL0_WORD_LENGTH |
527 (nand_info->cur_chip << GPMI_CTRL0_CS_OFFSET) | 527 (nand_info->cur_chip << GPMI_CTRL0_CS_OFFSET) |
528 GPMI_CTRL0_ADDRESS_NAND_DATA; 528 GPMI_CTRL0_ADDRESS_NAND_DATA;
529 529
530 mxs_dma_desc_append(channel, d); 530 mxs_dma_desc_append(channel, d);
531 531
532 /* Invalidate caches */ 532 /* Invalidate caches */
533 mxs_nand_inval_data_buf(nand_info); 533 mxs_nand_inval_data_buf(nand_info);
534 534
535 /* Execute the DMA chain. */ 535 /* Execute the DMA chain. */
536 ret = mxs_dma_go(channel); 536 ret = mxs_dma_go(channel);
537 if (ret) { 537 if (ret) {
538 printf("MXS NAND: DMA read error\n"); 538 printf("MXS NAND: DMA read error\n");
539 goto rtn; 539 goto rtn;
540 } 540 }
541 541
542 /* Invalidate caches */ 542 /* Invalidate caches */
543 mxs_nand_inval_data_buf(nand_info); 543 mxs_nand_inval_data_buf(nand_info);
544 544
545 memcpy(buf, nand_info->data_buf, length); 545 memcpy(buf, nand_info->data_buf, length);
546 546
547 rtn: 547 rtn:
548 mxs_nand_return_dma_descs(nand_info); 548 mxs_nand_return_dma_descs(nand_info);
549 } 549 }
550 550
551 /* 551 /*
552 * Write data to NAND. 552 * Write data to NAND.
553 */ 553 */
554 static void mxs_nand_write_buf(struct mtd_info *mtd, const uint8_t *buf, 554 static void mxs_nand_write_buf(struct mtd_info *mtd, const uint8_t *buf,
555 int length) 555 int length)
556 { 556 {
557 struct nand_chip *nand = mtd_to_nand(mtd); 557 struct nand_chip *nand = mtd_to_nand(mtd);
558 struct mxs_nand_info *nand_info = nand_get_controller_data(nand); 558 struct mxs_nand_info *nand_info = nand_get_controller_data(nand);
559 struct mxs_dma_desc *d; 559 struct mxs_dma_desc *d;
560 uint32_t channel = MXS_DMA_CHANNEL_AHB_APBH_GPMI0 + nand_info->cur_chip; 560 uint32_t channel = MXS_DMA_CHANNEL_AHB_APBH_GPMI0 + nand_info->cur_chip;
561 int ret; 561 int ret;
562 562
563 if (length > NAND_MAX_PAGESIZE) { 563 if (length > NAND_MAX_PAGESIZE) {
564 printf("MXS NAND: DMA buffer too big\n"); 564 printf("MXS NAND: DMA buffer too big\n");
565 return; 565 return;
566 } 566 }
567 567
568 if (!buf) { 568 if (!buf) {
569 printf("MXS NAND: DMA buffer is NULL\n"); 569 printf("MXS NAND: DMA buffer is NULL\n");
570 return; 570 return;
571 } 571 }
572 572
573 memcpy(nand_info->data_buf, buf, length); 573 memcpy(nand_info->data_buf, buf, length);
574 574
575 /* Compile the DMA descriptor - a descriptor that writes data. */ 575 /* Compile the DMA descriptor - a descriptor that writes data. */
576 d = mxs_nand_get_dma_desc(nand_info); 576 d = mxs_nand_get_dma_desc(nand_info);
577 d->cmd.data = 577 d->cmd.data =
578 MXS_DMA_DESC_COMMAND_DMA_READ | MXS_DMA_DESC_IRQ | 578 MXS_DMA_DESC_COMMAND_DMA_READ | MXS_DMA_DESC_IRQ |
579 MXS_DMA_DESC_DEC_SEM | MXS_DMA_DESC_WAIT4END | 579 MXS_DMA_DESC_DEC_SEM | MXS_DMA_DESC_WAIT4END |
580 (1 << MXS_DMA_DESC_PIO_WORDS_OFFSET) | 580 (1 << MXS_DMA_DESC_PIO_WORDS_OFFSET) |
581 (length << MXS_DMA_DESC_BYTES_OFFSET); 581 (length << MXS_DMA_DESC_BYTES_OFFSET);
582 582
583 d->cmd.address = (dma_addr_t)nand_info->data_buf; 583 d->cmd.address = (dma_addr_t)nand_info->data_buf;
584 584
585 d->cmd.pio_words[0] = 585 d->cmd.pio_words[0] =
586 GPMI_CTRL0_COMMAND_MODE_WRITE | 586 GPMI_CTRL0_COMMAND_MODE_WRITE |
587 GPMI_CTRL0_WORD_LENGTH | 587 GPMI_CTRL0_WORD_LENGTH |
588 (nand_info->cur_chip << GPMI_CTRL0_CS_OFFSET) | 588 (nand_info->cur_chip << GPMI_CTRL0_CS_OFFSET) |
589 GPMI_CTRL0_ADDRESS_NAND_DATA | 589 GPMI_CTRL0_ADDRESS_NAND_DATA |
590 length; 590 length;
591 591
592 mxs_dma_desc_append(channel, d); 592 mxs_dma_desc_append(channel, d);
593 593
594 /* Flush caches */ 594 /* Flush caches */
595 mxs_nand_flush_data_buf(nand_info); 595 mxs_nand_flush_data_buf(nand_info);
596 596
597 /* Execute the DMA chain. */ 597 /* Execute the DMA chain. */
598 ret = mxs_dma_go(channel); 598 ret = mxs_dma_go(channel);
599 if (ret) 599 if (ret)
600 printf("MXS NAND: DMA write error\n"); 600 printf("MXS NAND: DMA write error\n");
601 601
602 mxs_nand_return_dma_descs(nand_info); 602 mxs_nand_return_dma_descs(nand_info);
603 } 603 }
604 604
605 /* 605 /*
606 * Read a single byte from NAND. 606 * Read a single byte from NAND.
607 */ 607 */
608 static uint8_t mxs_nand_read_byte(struct mtd_info *mtd) 608 static uint8_t mxs_nand_read_byte(struct mtd_info *mtd)
609 { 609 {
610 uint8_t buf; 610 uint8_t buf;
611 mxs_nand_read_buf(mtd, &buf, 1); 611 mxs_nand_read_buf(mtd, &buf, 1);
612 return buf; 612 return buf;
613 } 613 }
614 614
615 static bool mxs_nand_erased_page(struct mtd_info *mtd, struct nand_chip *nand, 615 static bool mxs_nand_erased_page(struct mtd_info *mtd, struct nand_chip *nand,
616 uint8_t *buf, int chunk, int page) 616 uint8_t *buf, int chunk, int page)
617 { 617 {
618 struct mxs_nand_info *nand_info = nand_get_controller_data(nand); 618 struct mxs_nand_info *nand_info = nand_get_controller_data(nand);
619 struct bch_geometry *geo = &nand_info->bch_geometry; 619 struct bch_geometry *geo = &nand_info->bch_geometry;
620 unsigned int flip_bits = 0, flip_bits_noecc = 0; 620 unsigned int flip_bits = 0, flip_bits_noecc = 0;
621 unsigned int threshold; 621 unsigned int threshold;
622 unsigned int base = geo->ecc_chunkn_size * chunk; 622 unsigned int base = geo->ecc_chunkn_size * chunk;
623 uint32_t *dma_buf = (uint32_t *)buf; 623 uint32_t *dma_buf = (uint32_t *)buf;
624 int i; 624 int i;
625 625
626 threshold = geo->gf_len / 2; 626 threshold = geo->gf_len / 2;
627 if (threshold > geo->ecc_strength) 627 if (threshold > geo->ecc_strength)
628 threshold = geo->ecc_strength; 628 threshold = geo->ecc_strength;
629 629
630 for (i = 0; i < geo->ecc_chunkn_size; i++) { 630 for (i = 0; i < geo->ecc_chunkn_size; i++) {
631 flip_bits += hweight8(~buf[base + i]); 631 flip_bits += hweight8(~buf[base + i]);
632 if (flip_bits > threshold) 632 if (flip_bits > threshold)
633 return false; 633 return false;
634 } 634 }
635 635
636 nand->cmdfunc(mtd, NAND_CMD_READ0, 0, page); 636 nand->cmdfunc(mtd, NAND_CMD_READ0, 0, page);
637 nand->read_buf(mtd, buf, mtd->writesize); 637 nand->read_buf(mtd, buf, mtd->writesize);
638 638
639 for (i = 0; i < mtd->writesize / 4; i++) { 639 for (i = 0; i < mtd->writesize / 4; i++) {
640 flip_bits_noecc += hweight32(~dma_buf[i]); 640 flip_bits_noecc += hweight32(~dma_buf[i]);
641 if (flip_bits_noecc > threshold) 641 if (flip_bits_noecc > threshold)
642 return false; 642 return false;
643 } 643 }
644 644
645 mtd->ecc_stats.corrected += flip_bits; 645 mtd->ecc_stats.corrected += flip_bits;
646 646
647 memset(buf, 0xff, mtd->writesize); 647 memset(buf, 0xff, mtd->writesize);
648 648
649 printf("The page(%d) is an erased page(%d,%d,%d,%d).\n", page, chunk, threshold, flip_bits, flip_bits_noecc); 649 printf("The page(%d) is an erased page(%d,%d,%d,%d).\n", page, chunk, threshold, flip_bits, flip_bits_noecc);
650 650
651 return true; 651 return true;
652 } 652 }
653 653
654 /* 654 /*
655 * Read a page from NAND. 655 * Read a page from NAND.
656 */ 656 */
657 static int mxs_nand_ecc_read_page(struct mtd_info *mtd, struct nand_chip *nand, 657 static int mxs_nand_ecc_read_page(struct mtd_info *mtd, struct nand_chip *nand,
658 uint8_t *buf, int oob_required, 658 uint8_t *buf, int oob_required,
659 int page) 659 int page)
660 { 660 {
661 struct mxs_nand_info *nand_info = nand_get_controller_data(nand); 661 struct mxs_nand_info *nand_info = nand_get_controller_data(nand);
662 struct bch_geometry *geo = &nand_info->bch_geometry; 662 struct bch_geometry *geo = &nand_info->bch_geometry;
663 struct mxs_bch_regs *bch_regs = nand_info->bch_regs; 663 struct mxs_bch_regs *bch_regs = nand_info->bch_regs;
664 struct mxs_dma_desc *d; 664 struct mxs_dma_desc *d;
665 uint32_t channel = MXS_DMA_CHANNEL_AHB_APBH_GPMI0 + nand_info->cur_chip; 665 uint32_t channel = MXS_DMA_CHANNEL_AHB_APBH_GPMI0 + nand_info->cur_chip;
666 uint32_t corrected = 0, failed = 0; 666 uint32_t corrected = 0, failed = 0;
667 uint8_t *status; 667 uint8_t *status;
668 int i, ret; 668 int i, ret;
669 int flag = 0; 669 int flag = 0;
670 670
671 /* Compile the DMA descriptor - wait for ready. */ 671 /* Compile the DMA descriptor - wait for ready. */
672 d = mxs_nand_get_dma_desc(nand_info); 672 d = mxs_nand_get_dma_desc(nand_info);
673 d->cmd.data = 673 d->cmd.data =
674 MXS_DMA_DESC_COMMAND_NO_DMAXFER | MXS_DMA_DESC_CHAIN | 674 MXS_DMA_DESC_COMMAND_NO_DMAXFER | MXS_DMA_DESC_CHAIN |
675 MXS_DMA_DESC_NAND_WAIT_4_READY | MXS_DMA_DESC_WAIT4END | 675 MXS_DMA_DESC_NAND_WAIT_4_READY | MXS_DMA_DESC_WAIT4END |
676 (1 << MXS_DMA_DESC_PIO_WORDS_OFFSET); 676 (1 << MXS_DMA_DESC_PIO_WORDS_OFFSET);
677 677
678 d->cmd.address = 0; 678 d->cmd.address = 0;
679 679
680 d->cmd.pio_words[0] = 680 d->cmd.pio_words[0] =
681 GPMI_CTRL0_COMMAND_MODE_WAIT_FOR_READY | 681 GPMI_CTRL0_COMMAND_MODE_WAIT_FOR_READY |
682 GPMI_CTRL0_WORD_LENGTH | 682 GPMI_CTRL0_WORD_LENGTH |
683 (nand_info->cur_chip << GPMI_CTRL0_CS_OFFSET) | 683 (nand_info->cur_chip << GPMI_CTRL0_CS_OFFSET) |
684 GPMI_CTRL0_ADDRESS_NAND_DATA; 684 GPMI_CTRL0_ADDRESS_NAND_DATA;
685 685
686 mxs_dma_desc_append(channel, d); 686 mxs_dma_desc_append(channel, d);
687 687
688 /* Compile the DMA descriptor - enable the BCH block and read. */ 688 /* Compile the DMA descriptor - enable the BCH block and read. */
689 d = mxs_nand_get_dma_desc(nand_info); 689 d = mxs_nand_get_dma_desc(nand_info);
690 d->cmd.data = 690 d->cmd.data =
691 MXS_DMA_DESC_COMMAND_NO_DMAXFER | MXS_DMA_DESC_CHAIN | 691 MXS_DMA_DESC_COMMAND_NO_DMAXFER | MXS_DMA_DESC_CHAIN |
692 MXS_DMA_DESC_WAIT4END | (6 << MXS_DMA_DESC_PIO_WORDS_OFFSET); 692 MXS_DMA_DESC_WAIT4END | (6 << MXS_DMA_DESC_PIO_WORDS_OFFSET);
693 693
694 d->cmd.address = 0; 694 d->cmd.address = 0;
695 695
696 d->cmd.pio_words[0] = 696 d->cmd.pio_words[0] =
697 GPMI_CTRL0_COMMAND_MODE_READ | 697 GPMI_CTRL0_COMMAND_MODE_READ |
698 GPMI_CTRL0_WORD_LENGTH | 698 GPMI_CTRL0_WORD_LENGTH |
699 (nand_info->cur_chip << GPMI_CTRL0_CS_OFFSET) | 699 (nand_info->cur_chip << GPMI_CTRL0_CS_OFFSET) |
700 GPMI_CTRL0_ADDRESS_NAND_DATA | 700 GPMI_CTRL0_ADDRESS_NAND_DATA |
701 (mtd->writesize + mtd->oobsize); 701 (mtd->writesize + mtd->oobsize);
702 d->cmd.pio_words[1] = 0; 702 d->cmd.pio_words[1] = 0;
703 d->cmd.pio_words[2] = 703 d->cmd.pio_words[2] =
704 GPMI_ECCCTRL_ENABLE_ECC | 704 GPMI_ECCCTRL_ENABLE_ECC |
705 GPMI_ECCCTRL_ECC_CMD_DECODE | 705 GPMI_ECCCTRL_ECC_CMD_DECODE |
706 GPMI_ECCCTRL_BUFFER_MASK_BCH_PAGE; 706 GPMI_ECCCTRL_BUFFER_MASK_BCH_PAGE;
707 d->cmd.pio_words[3] = mtd->writesize + mtd->oobsize; 707 d->cmd.pio_words[3] = mtd->writesize + mtd->oobsize;
708 d->cmd.pio_words[4] = (dma_addr_t)nand_info->data_buf; 708 d->cmd.pio_words[4] = (dma_addr_t)nand_info->data_buf;
709 d->cmd.pio_words[5] = (dma_addr_t)nand_info->oob_buf; 709 d->cmd.pio_words[5] = (dma_addr_t)nand_info->oob_buf;
710 710
711 mxs_dma_desc_append(channel, d); 711 mxs_dma_desc_append(channel, d);
712 712
713 /* Compile the DMA descriptor - disable the BCH block. */ 713 /* Compile the DMA descriptor - disable the BCH block. */
714 d = mxs_nand_get_dma_desc(nand_info); 714 d = mxs_nand_get_dma_desc(nand_info);
715 d->cmd.data = 715 d->cmd.data =
716 MXS_DMA_DESC_COMMAND_NO_DMAXFER | MXS_DMA_DESC_CHAIN | 716 MXS_DMA_DESC_COMMAND_NO_DMAXFER | MXS_DMA_DESC_CHAIN |
717 MXS_DMA_DESC_NAND_WAIT_4_READY | MXS_DMA_DESC_WAIT4END | 717 MXS_DMA_DESC_NAND_WAIT_4_READY | MXS_DMA_DESC_WAIT4END |
718 (3 << MXS_DMA_DESC_PIO_WORDS_OFFSET); 718 (3 << MXS_DMA_DESC_PIO_WORDS_OFFSET);
719 719
720 d->cmd.address = 0; 720 d->cmd.address = 0;
721 721
722 d->cmd.pio_words[0] = 722 d->cmd.pio_words[0] =
723 GPMI_CTRL0_COMMAND_MODE_WAIT_FOR_READY | 723 GPMI_CTRL0_COMMAND_MODE_WAIT_FOR_READY |
724 GPMI_CTRL0_WORD_LENGTH | 724 GPMI_CTRL0_WORD_LENGTH |
725 (nand_info->cur_chip << GPMI_CTRL0_CS_OFFSET) | 725 (nand_info->cur_chip << GPMI_CTRL0_CS_OFFSET) |
726 GPMI_CTRL0_ADDRESS_NAND_DATA | 726 GPMI_CTRL0_ADDRESS_NAND_DATA |
727 (mtd->writesize + mtd->oobsize); 727 (mtd->writesize + mtd->oobsize);
728 d->cmd.pio_words[1] = 0; 728 d->cmd.pio_words[1] = 0;
729 d->cmd.pio_words[2] = 0; 729 d->cmd.pio_words[2] = 0;
730 730
731 mxs_dma_desc_append(channel, d); 731 mxs_dma_desc_append(channel, d);
732 732
733 /* Compile the DMA descriptor - deassert the NAND lock and interrupt. */ 733 /* Compile the DMA descriptor - deassert the NAND lock and interrupt. */
734 d = mxs_nand_get_dma_desc(nand_info); 734 d = mxs_nand_get_dma_desc(nand_info);
735 d->cmd.data = 735 d->cmd.data =
736 MXS_DMA_DESC_COMMAND_NO_DMAXFER | MXS_DMA_DESC_IRQ | 736 MXS_DMA_DESC_COMMAND_NO_DMAXFER | MXS_DMA_DESC_IRQ |
737 MXS_DMA_DESC_DEC_SEM; 737 MXS_DMA_DESC_DEC_SEM;
738 738
739 d->cmd.address = 0; 739 d->cmd.address = 0;
740 740
741 mxs_dma_desc_append(channel, d); 741 mxs_dma_desc_append(channel, d);
742 742
743 /* Invalidate caches */ 743 /* Invalidate caches */
744 mxs_nand_inval_data_buf(nand_info); 744 mxs_nand_inval_data_buf(nand_info);
745 745
746 /* Execute the DMA chain. */ 746 /* Execute the DMA chain. */
747 ret = mxs_dma_go(channel); 747 ret = mxs_dma_go(channel);
748 if (ret) { 748 if (ret) {
749 printf("MXS NAND: DMA read error\n"); 749 printf("MXS NAND: DMA read error\n");
750 goto rtn; 750 goto rtn;
751 } 751 }
752 752
753 ret = mxs_nand_wait_for_bch_complete(nand_info); 753 ret = mxs_nand_wait_for_bch_complete(nand_info);
754 if (ret) { 754 if (ret) {
755 printf("MXS NAND: BCH read timeout\n"); 755 printf("MXS NAND: BCH read timeout\n");
756 goto rtn; 756 goto rtn;
757 } 757 }
758 758
759 mxs_nand_return_dma_descs(nand_info); 759 mxs_nand_return_dma_descs(nand_info);
760 760
761 /* Invalidate caches */ 761 /* Invalidate caches */
762 mxs_nand_inval_data_buf(nand_info); 762 mxs_nand_inval_data_buf(nand_info);
763 763
764 /* Read DMA completed, now do the mark swapping. */ 764 /* Read DMA completed, now do the mark swapping. */
765 mxs_nand_swap_block_mark(geo, nand_info->data_buf, nand_info->oob_buf); 765 mxs_nand_swap_block_mark(geo, nand_info->data_buf, nand_info->oob_buf);
766 766
767 /* Loop over status bytes, accumulating ECC status. */ 767 /* Loop over status bytes, accumulating ECC status. */
768 status = nand_info->oob_buf + mxs_nand_aux_status_offset(); 768 status = nand_info->oob_buf + mxs_nand_aux_status_offset();
769 for (i = 0; i < geo->ecc_chunk_count; i++) { 769 for (i = 0; i < geo->ecc_chunk_count; i++) {
770 if (status[i] == 0x00) 770 if (status[i] == 0x00)
771 continue; 771 continue;
772 772
773 if (status[i] == 0xff) { 773 if (status[i] == 0xff) {
774 if (is_mx6dqp() || is_mx7() || 774 if (is_mx6dqp() || is_mx7() ||
775 is_mx6ul() || is_imx8() || is_imx8m()) 775 is_mx6ul() || is_imx8() || is_imx8m())
776 if (readl(&bch_regs->hw_bch_debug1)) 776 if (readl(&bch_regs->hw_bch_debug1))
777 flag = 1; 777 flag = 1;
778 continue; 778 continue;
779 } 779 }
780 780
781 if (status[i] == 0xfe) { 781 if (status[i] == 0xfe) {
782 if (mxs_nand_erased_page(mtd, nand, 782 if (mxs_nand_erased_page(mtd, nand,
783 nand_info->data_buf, i, page)) 783 nand_info->data_buf, i, page))
784 break; 784 break;
785 failed++; 785 failed++;
786 continue; 786 continue;
787 } 787 }
788 788
789 corrected += status[i]; 789 corrected += status[i];
790 } 790 }
791 791
792 /* Propagate ECC status to the owning MTD. */ 792 /* Propagate ECC status to the owning MTD. */
793 mtd->ecc_stats.failed += failed; 793 mtd->ecc_stats.failed += failed;
794 mtd->ecc_stats.corrected += corrected; 794 mtd->ecc_stats.corrected += corrected;
795 795
796 /* 796 /*
797 * It's time to deliver the OOB bytes. See mxs_nand_ecc_read_oob() for 797 * It's time to deliver the OOB bytes. See mxs_nand_ecc_read_oob() for
798 * details about our policy for delivering the OOB. 798 * details about our policy for delivering the OOB.
799 * 799 *
800 * We fill the caller's buffer with set bits, and then copy the block 800 * We fill the caller's buffer with set bits, and then copy the block
801 * mark to the caller's buffer. Note that, if block mark swapping was 801 * mark to the caller's buffer. Note that, if block mark swapping was
802 * necessary, it has already been done, so we can rely on the first 802 * necessary, it has already been done, so we can rely on the first
803 * byte of the auxiliary buffer to contain the block mark. 803 * byte of the auxiliary buffer to contain the block mark.
804 */ 804 */
805 memset(nand->oob_poi, 0xff, mtd->oobsize); 805 memset(nand->oob_poi, 0xff, mtd->oobsize);
806 806
807 nand->oob_poi[0] = nand_info->oob_buf[0]; 807 nand->oob_poi[0] = nand_info->oob_buf[0];
808 808
809 memcpy(buf, nand_info->data_buf, mtd->writesize); 809 memcpy(buf, nand_info->data_buf, mtd->writesize);
810 810
811 if (flag) 811 if (flag)
812 memset(buf, 0xff, mtd->writesize); 812 memset(buf, 0xff, mtd->writesize);
813 rtn: 813 rtn:
814 mxs_nand_return_dma_descs(nand_info); 814 mxs_nand_return_dma_descs(nand_info);
815 815
816 return ret; 816 return ret;
817 } 817 }
818 818
819 /* 819 /*
820 * Write a page to NAND. 820 * Write a page to NAND.
821 */ 821 */
822 static int mxs_nand_ecc_write_page(struct mtd_info *mtd, 822 static int mxs_nand_ecc_write_page(struct mtd_info *mtd,
823 struct nand_chip *nand, const uint8_t *buf, 823 struct nand_chip *nand, const uint8_t *buf,
824 int oob_required, int page) 824 int oob_required, int page)
825 { 825 {
826 struct mxs_nand_info *nand_info = nand_get_controller_data(nand); 826 struct mxs_nand_info *nand_info = nand_get_controller_data(nand);
827 struct bch_geometry *geo = &nand_info->bch_geometry; 827 struct bch_geometry *geo = &nand_info->bch_geometry;
828 struct mxs_dma_desc *d; 828 struct mxs_dma_desc *d;
829 uint32_t channel = MXS_DMA_CHANNEL_AHB_APBH_GPMI0 + nand_info->cur_chip; 829 uint32_t channel = MXS_DMA_CHANNEL_AHB_APBH_GPMI0 + nand_info->cur_chip;
830 int ret; 830 int ret;
831 831
832 memcpy(nand_info->data_buf, buf, mtd->writesize); 832 memcpy(nand_info->data_buf, buf, mtd->writesize);
833 memcpy(nand_info->oob_buf, nand->oob_poi, mtd->oobsize); 833 memcpy(nand_info->oob_buf, nand->oob_poi, mtd->oobsize);
834 834
835 /* Handle block mark swapping. */ 835 /* Handle block mark swapping. */
836 mxs_nand_swap_block_mark(geo, nand_info->data_buf, nand_info->oob_buf); 836 mxs_nand_swap_block_mark(geo, nand_info->data_buf, nand_info->oob_buf);
837 837
838 /* Compile the DMA descriptor - write data. */ 838 /* Compile the DMA descriptor - write data. */
839 d = mxs_nand_get_dma_desc(nand_info); 839 d = mxs_nand_get_dma_desc(nand_info);
840 d->cmd.data = 840 d->cmd.data =
841 MXS_DMA_DESC_COMMAND_NO_DMAXFER | MXS_DMA_DESC_IRQ | 841 MXS_DMA_DESC_COMMAND_NO_DMAXFER | MXS_DMA_DESC_IRQ |
842 MXS_DMA_DESC_DEC_SEM | MXS_DMA_DESC_WAIT4END | 842 MXS_DMA_DESC_DEC_SEM | MXS_DMA_DESC_WAIT4END |
843 (6 << MXS_DMA_DESC_PIO_WORDS_OFFSET); 843 (6 << MXS_DMA_DESC_PIO_WORDS_OFFSET);
844 844
845 d->cmd.address = 0; 845 d->cmd.address = 0;
846 846
847 d->cmd.pio_words[0] = 847 d->cmd.pio_words[0] =
848 GPMI_CTRL0_COMMAND_MODE_WRITE | 848 GPMI_CTRL0_COMMAND_MODE_WRITE |
849 GPMI_CTRL0_WORD_LENGTH | 849 GPMI_CTRL0_WORD_LENGTH |
850 (nand_info->cur_chip << GPMI_CTRL0_CS_OFFSET) | 850 (nand_info->cur_chip << GPMI_CTRL0_CS_OFFSET) |
851 GPMI_CTRL0_ADDRESS_NAND_DATA; 851 GPMI_CTRL0_ADDRESS_NAND_DATA;
852 d->cmd.pio_words[1] = 0; 852 d->cmd.pio_words[1] = 0;
853 d->cmd.pio_words[2] = 853 d->cmd.pio_words[2] =
854 GPMI_ECCCTRL_ENABLE_ECC | 854 GPMI_ECCCTRL_ENABLE_ECC |
855 GPMI_ECCCTRL_ECC_CMD_ENCODE | 855 GPMI_ECCCTRL_ECC_CMD_ENCODE |
856 GPMI_ECCCTRL_BUFFER_MASK_BCH_PAGE; 856 GPMI_ECCCTRL_BUFFER_MASK_BCH_PAGE;
857 d->cmd.pio_words[3] = (mtd->writesize + mtd->oobsize); 857 d->cmd.pio_words[3] = (mtd->writesize + mtd->oobsize);
858 d->cmd.pio_words[4] = (dma_addr_t)nand_info->data_buf; 858 d->cmd.pio_words[4] = (dma_addr_t)nand_info->data_buf;
859 d->cmd.pio_words[5] = (dma_addr_t)nand_info->oob_buf; 859 d->cmd.pio_words[5] = (dma_addr_t)nand_info->oob_buf;
860 860
861 if (is_mx7() && nand_info->en_randomizer) { 861 if ((is_mx7() && nand_info->en_randomizer) || (is_imx8m() && nand_info->en_randomizer)) {
862 d->cmd.pio_words[2] |= GPMI_ECCCTRL_RANDOMIZER_ENABLE | 862 d->cmd.pio_words[2] |= GPMI_ECCCTRL_RANDOMIZER_ENABLE |
863 GPMI_ECCCTRL_RANDOMIZER_TYPE2; 863 GPMI_ECCCTRL_RANDOMIZER_TYPE2;
864 /* 864 /*
865 * Write NAND page number needed to be randomized 865 * Write NAND page number needed to be randomized
866 * to GPMI_ECCCOUNT register. 866 * to GPMI_ECCCOUNT register.
867 * 867 *
868 * The value is between 0-255. For additional details 868 * The value is between 0-255. For additional details
869 * check 9.6.6.4 of i.MX7D Applications Processor reference 869 * check 9.6.6.4 of i.MX7D Applications Processor reference
870 */ 870 */
871 d->cmd.pio_words[3] |= (page % 255) << 16; 871 d->cmd.pio_words[3] |= (page % 255) << 16;
872 } 872 }
873 873
874 mxs_dma_desc_append(channel, d); 874 mxs_dma_desc_append(channel, d);
875 875
876 /* Flush caches */ 876 /* Flush caches */
877 mxs_nand_flush_data_buf(nand_info); 877 mxs_nand_flush_data_buf(nand_info);
878 878
879 /* Execute the DMA chain. */ 879 /* Execute the DMA chain. */
880 ret = mxs_dma_go(channel); 880 ret = mxs_dma_go(channel);
881 if (ret) { 881 if (ret) {
882 printf("MXS NAND: DMA write error\n"); 882 printf("MXS NAND: DMA write error\n");
883 goto rtn; 883 goto rtn;
884 } 884 }
885 885
886 ret = mxs_nand_wait_for_bch_complete(nand_info); 886 ret = mxs_nand_wait_for_bch_complete(nand_info);
887 if (ret) { 887 if (ret) {
888 printf("MXS NAND: BCH write timeout\n"); 888 printf("MXS NAND: BCH write timeout\n");
889 goto rtn; 889 goto rtn;
890 } 890 }
891 891
892 rtn: 892 rtn:
893 mxs_nand_return_dma_descs(nand_info); 893 mxs_nand_return_dma_descs(nand_info);
894 return 0; 894 return 0;
895 } 895 }
896 896
897 /* 897 /*
898 * Read OOB from NAND. 898 * Read OOB from NAND.
899 * 899 *
900 * This function is a veneer that replaces the function originally installed by 900 * This function is a veneer that replaces the function originally installed by
901 * the NAND Flash MTD code. 901 * the NAND Flash MTD code.
902 */ 902 */
903 static int mxs_nand_hook_read_oob(struct mtd_info *mtd, loff_t from, 903 static int mxs_nand_hook_read_oob(struct mtd_info *mtd, loff_t from,
904 struct mtd_oob_ops *ops) 904 struct mtd_oob_ops *ops)
905 { 905 {
906 struct nand_chip *chip = mtd_to_nand(mtd); 906 struct nand_chip *chip = mtd_to_nand(mtd);
907 struct mxs_nand_info *nand_info = nand_get_controller_data(chip); 907 struct mxs_nand_info *nand_info = nand_get_controller_data(chip);
908 int ret; 908 int ret;
909 909
910 if (ops->mode == MTD_OPS_RAW) 910 if (ops->mode == MTD_OPS_RAW)
911 nand_info->raw_oob_mode = 1; 911 nand_info->raw_oob_mode = 1;
912 else 912 else
913 nand_info->raw_oob_mode = 0; 913 nand_info->raw_oob_mode = 0;
914 914
915 ret = nand_info->hooked_read_oob(mtd, from, ops); 915 ret = nand_info->hooked_read_oob(mtd, from, ops);
916 916
917 nand_info->raw_oob_mode = 0; 917 nand_info->raw_oob_mode = 0;
918 918
919 return ret; 919 return ret;
920 } 920 }
921 921
922 /* 922 /*
923 * Write OOB to NAND. 923 * Write OOB to NAND.
924 * 924 *
925 * This function is a veneer that replaces the function originally installed by 925 * This function is a veneer that replaces the function originally installed by
926 * the NAND Flash MTD code. 926 * the NAND Flash MTD code.
927 */ 927 */
928 static int mxs_nand_hook_write_oob(struct mtd_info *mtd, loff_t to, 928 static int mxs_nand_hook_write_oob(struct mtd_info *mtd, loff_t to,
929 struct mtd_oob_ops *ops) 929 struct mtd_oob_ops *ops)
930 { 930 {
931 struct nand_chip *chip = mtd_to_nand(mtd); 931 struct nand_chip *chip = mtd_to_nand(mtd);
932 struct mxs_nand_info *nand_info = nand_get_controller_data(chip); 932 struct mxs_nand_info *nand_info = nand_get_controller_data(chip);
933 int ret; 933 int ret;
934 934
935 if (ops->mode == MTD_OPS_RAW) 935 if (ops->mode == MTD_OPS_RAW)
936 nand_info->raw_oob_mode = 1; 936 nand_info->raw_oob_mode = 1;
937 else 937 else
938 nand_info->raw_oob_mode = 0; 938 nand_info->raw_oob_mode = 0;
939 939
940 ret = nand_info->hooked_write_oob(mtd, to, ops); 940 ret = nand_info->hooked_write_oob(mtd, to, ops);
941 941
942 nand_info->raw_oob_mode = 0; 942 nand_info->raw_oob_mode = 0;
943 943
944 return ret; 944 return ret;
945 } 945 }
946 946
947 /* 947 /*
948 * Mark a block bad in NAND. 948 * Mark a block bad in NAND.
949 * 949 *
950 * This function is a veneer that replaces the function originally installed by 950 * This function is a veneer that replaces the function originally installed by
951 * the NAND Flash MTD code. 951 * the NAND Flash MTD code.
952 */ 952 */
953 static int mxs_nand_hook_block_markbad(struct mtd_info *mtd, loff_t ofs) 953 static int mxs_nand_hook_block_markbad(struct mtd_info *mtd, loff_t ofs)
954 { 954 {
955 struct nand_chip *chip = mtd_to_nand(mtd); 955 struct nand_chip *chip = mtd_to_nand(mtd);
956 struct mxs_nand_info *nand_info = nand_get_controller_data(chip); 956 struct mxs_nand_info *nand_info = nand_get_controller_data(chip);
957 int ret; 957 int ret;
958 958
959 nand_info->marking_block_bad = 1; 959 nand_info->marking_block_bad = 1;
960 960
961 ret = nand_info->hooked_block_markbad(mtd, ofs); 961 ret = nand_info->hooked_block_markbad(mtd, ofs);
962 962
963 nand_info->marking_block_bad = 0; 963 nand_info->marking_block_bad = 0;
964 964
965 return ret; 965 return ret;
966 } 966 }
967 967
968 /* 968 /*
969 * There are several places in this driver where we have to handle the OOB and 969 * There are several places in this driver where we have to handle the OOB and
970 * block marks. This is the function where things are the most complicated, so 970 * block marks. This is the function where things are the most complicated, so
971 * this is where we try to explain it all. All the other places refer back to 971 * this is where we try to explain it all. All the other places refer back to
972 * here. 972 * here.
973 * 973 *
974 * These are the rules, in order of decreasing importance: 974 * These are the rules, in order of decreasing importance:
975 * 975 *
976 * 1) Nothing the caller does can be allowed to imperil the block mark, so all 976 * 1) Nothing the caller does can be allowed to imperil the block mark, so all
977 * write operations take measures to protect it. 977 * write operations take measures to protect it.
978 * 978 *
979 * 2) In read operations, the first byte of the OOB we return must reflect the 979 * 2) In read operations, the first byte of the OOB we return must reflect the
980 * true state of the block mark, no matter where that block mark appears in 980 * true state of the block mark, no matter where that block mark appears in
981 * the physical page. 981 * the physical page.
982 * 982 *
983 * 3) ECC-based read operations return an OOB full of set bits (since we never 983 * 3) ECC-based read operations return an OOB full of set bits (since we never
984 * allow ECC-based writes to the OOB, it doesn't matter what ECC-based reads 984 * allow ECC-based writes to the OOB, it doesn't matter what ECC-based reads
985 * return). 985 * return).
986 * 986 *
987 * 4) "Raw" read operations return a direct view of the physical bytes in the 987 * 4) "Raw" read operations return a direct view of the physical bytes in the
988 * page, using the conventional definition of which bytes are data and which 988 * page, using the conventional definition of which bytes are data and which
989 * are OOB. This gives the caller a way to see the actual, physical bytes 989 * are OOB. This gives the caller a way to see the actual, physical bytes
990 * in the page, without the distortions applied by our ECC engine. 990 * in the page, without the distortions applied by our ECC engine.
991 * 991 *
992 * What we do for this specific read operation depends on whether we're doing 992 * What we do for this specific read operation depends on whether we're doing
993 * "raw" read, or an ECC-based read. 993 * "raw" read, or an ECC-based read.
994 * 994 *
995 * It turns out that knowing whether we want an "ECC-based" or "raw" read is not 995 * It turns out that knowing whether we want an "ECC-based" or "raw" read is not
996 * easy. When reading a page, for example, the NAND Flash MTD code calls our 996 * easy. When reading a page, for example, the NAND Flash MTD code calls our
997 * ecc.read_page or ecc.read_page_raw function. Thus, the fact that MTD wants an 997 * ecc.read_page or ecc.read_page_raw function. Thus, the fact that MTD wants an
998 * ECC-based or raw view of the page is implicit in which function it calls 998 * ECC-based or raw view of the page is implicit in which function it calls
999 * (there is a similar pair of ECC-based/raw functions for writing). 999 * (there is a similar pair of ECC-based/raw functions for writing).
1000 * 1000 *
1001 * Since MTD assumes the OOB is not covered by ECC, there is no pair of 1001 * Since MTD assumes the OOB is not covered by ECC, there is no pair of
1002 * ECC-based/raw functions for reading or or writing the OOB. The fact that the 1002 * ECC-based/raw functions for reading or or writing the OOB. The fact that the
1003 * caller wants an ECC-based or raw view of the page is not propagated down to 1003 * caller wants an ECC-based or raw view of the page is not propagated down to
1004 * this driver. 1004 * this driver.
1005 * 1005 *
1006 * Since our OOB *is* covered by ECC, we need this information. So, we hook the 1006 * Since our OOB *is* covered by ECC, we need this information. So, we hook the
1007 * ecc.read_oob and ecc.write_oob function pointers in the owning 1007 * ecc.read_oob and ecc.write_oob function pointers in the owning
1008 * struct mtd_info with our own functions. These hook functions set the 1008 * struct mtd_info with our own functions. These hook functions set the
1009 * raw_oob_mode field so that, when control finally arrives here, we'll know 1009 * raw_oob_mode field so that, when control finally arrives here, we'll know
1010 * what to do. 1010 * what to do.
1011 */ 1011 */
1012 static int mxs_nand_ecc_read_oob(struct mtd_info *mtd, struct nand_chip *nand, 1012 static int mxs_nand_ecc_read_oob(struct mtd_info *mtd, struct nand_chip *nand,
1013 int page) 1013 int page)
1014 { 1014 {
1015 struct mxs_nand_info *nand_info = nand_get_controller_data(nand); 1015 struct mxs_nand_info *nand_info = nand_get_controller_data(nand);
1016 1016
1017 /* 1017 /*
1018 * First, fill in the OOB buffer. If we're doing a raw read, we need to 1018 * First, fill in the OOB buffer. If we're doing a raw read, we need to
1019 * get the bytes from the physical page. If we're not doing a raw read, 1019 * get the bytes from the physical page. If we're not doing a raw read,
1020 * we need to fill the buffer with set bits. 1020 * we need to fill the buffer with set bits.
1021 */ 1021 */
1022 if (nand_info->raw_oob_mode) { 1022 if (nand_info->raw_oob_mode) {
1023 /* 1023 /*
1024 * If control arrives here, we're doing a "raw" read. Send the 1024 * If control arrives here, we're doing a "raw" read. Send the
1025 * command to read the conventional OOB and read it. 1025 * command to read the conventional OOB and read it.
1026 */ 1026 */
1027 nand->cmdfunc(mtd, NAND_CMD_READ0, mtd->writesize, page); 1027 nand->cmdfunc(mtd, NAND_CMD_READ0, mtd->writesize, page);
1028 nand->read_buf(mtd, nand->oob_poi, mtd->oobsize); 1028 nand->read_buf(mtd, nand->oob_poi, mtd->oobsize);
1029 } else { 1029 } else {
1030 /* 1030 /*
1031 * If control arrives here, we're not doing a "raw" read. Fill 1031 * If control arrives here, we're not doing a "raw" read. Fill
1032 * the OOB buffer with set bits and correct the block mark. 1032 * the OOB buffer with set bits and correct the block mark.
1033 */ 1033 */
1034 memset(nand->oob_poi, 0xff, mtd->oobsize); 1034 memset(nand->oob_poi, 0xff, mtd->oobsize);
1035 1035
1036 nand->cmdfunc(mtd, NAND_CMD_READ0, mtd->writesize, page); 1036 nand->cmdfunc(mtd, NAND_CMD_READ0, mtd->writesize, page);
1037 mxs_nand_read_buf(mtd, nand->oob_poi, 1); 1037 mxs_nand_read_buf(mtd, nand->oob_poi, 1);
1038 } 1038 }
1039 1039
1040 return 0; 1040 return 0;
1041 1041
1042 } 1042 }
1043 1043
1044 /* 1044 /*
1045 * Write OOB data to NAND. 1045 * Write OOB data to NAND.
1046 */ 1046 */
1047 static int mxs_nand_ecc_write_oob(struct mtd_info *mtd, struct nand_chip *nand, 1047 static int mxs_nand_ecc_write_oob(struct mtd_info *mtd, struct nand_chip *nand,
1048 int page) 1048 int page)
1049 { 1049 {
1050 struct mxs_nand_info *nand_info = nand_get_controller_data(nand); 1050 struct mxs_nand_info *nand_info = nand_get_controller_data(nand);
1051 uint8_t block_mark = 0; 1051 uint8_t block_mark = 0;
1052 1052
1053 /* 1053 /*
1054 * There are fundamental incompatibilities between the i.MX GPMI NFC and 1054 * There are fundamental incompatibilities between the i.MX GPMI NFC and
1055 * the NAND Flash MTD model that make it essentially impossible to write 1055 * the NAND Flash MTD model that make it essentially impossible to write
1056 * the out-of-band bytes. 1056 * the out-of-band bytes.
1057 * 1057 *
1058 * We permit *ONE* exception. If the *intent* of writing the OOB is to 1058 * We permit *ONE* exception. If the *intent* of writing the OOB is to
1059 * mark a block bad, we can do that. 1059 * mark a block bad, we can do that.
1060 */ 1060 */
1061 1061
1062 if (!nand_info->marking_block_bad) { 1062 if (!nand_info->marking_block_bad) {
1063 printf("NXS NAND: Writing OOB isn't supported\n"); 1063 printf("NXS NAND: Writing OOB isn't supported\n");
1064 return -EIO; 1064 return -EIO;
1065 } 1065 }
1066 1066
1067 /* Write the block mark. */ 1067 /* Write the block mark. */
1068 nand->cmdfunc(mtd, NAND_CMD_SEQIN, mtd->writesize, page); 1068 nand->cmdfunc(mtd, NAND_CMD_SEQIN, mtd->writesize, page);
1069 nand->write_buf(mtd, &block_mark, 1); 1069 nand->write_buf(mtd, &block_mark, 1);
1070 nand->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1); 1070 nand->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
1071 1071
1072 /* Check if it worked. */ 1072 /* Check if it worked. */
1073 if (nand->waitfunc(mtd, nand) & NAND_STATUS_FAIL) 1073 if (nand->waitfunc(mtd, nand) & NAND_STATUS_FAIL)
1074 return -EIO; 1074 return -EIO;
1075 1075
1076 return 0; 1076 return 0;
1077 } 1077 }
1078 1078
1079 /* 1079 /*
1080 * Claims all blocks are good. 1080 * Claims all blocks are good.
1081 * 1081 *
1082 * In principle, this function is *only* called when the NAND Flash MTD system 1082 * In principle, this function is *only* called when the NAND Flash MTD system
1083 * isn't allowed to keep an in-memory bad block table, so it is forced to ask 1083 * isn't allowed to keep an in-memory bad block table, so it is forced to ask
1084 * the driver for bad block information. 1084 * the driver for bad block information.
1085 * 1085 *
1086 * In fact, we permit the NAND Flash MTD system to have an in-memory BBT, so 1086 * In fact, we permit the NAND Flash MTD system to have an in-memory BBT, so
1087 * this function is *only* called when we take it away. 1087 * this function is *only* called when we take it away.
1088 * 1088 *
1089 * Thus, this function is only called when we want *all* blocks to look good, 1089 * Thus, this function is only called when we want *all* blocks to look good,
1090 * so it *always* return success. 1090 * so it *always* return success.
1091 */ 1091 */
1092 static int mxs_nand_block_bad(struct mtd_info *mtd, loff_t ofs) 1092 static int mxs_nand_block_bad(struct mtd_info *mtd, loff_t ofs)
1093 { 1093 {
1094 return 0; 1094 return 0;
1095 } 1095 }
1096 1096
1097 static int mxs_nand_set_geometry(struct mtd_info *mtd, struct bch_geometry *geo) 1097 static int mxs_nand_set_geometry(struct mtd_info *mtd, struct bch_geometry *geo)
1098 { 1098 {
1099 struct nand_chip *chip = mtd_to_nand(mtd); 1099 struct nand_chip *chip = mtd_to_nand(mtd);
1100 struct nand_chip *nand = mtd_to_nand(mtd); 1100 struct nand_chip *nand = mtd_to_nand(mtd);
1101 struct mxs_nand_info *nand_info = nand_get_controller_data(nand); 1101 struct mxs_nand_info *nand_info = nand_get_controller_data(nand);
1102 1102
1103 if (chip->ecc_strength_ds > nand_info->max_ecc_strength_supported) { 1103 if (chip->ecc_strength_ds > nand_info->max_ecc_strength_supported) {
1104 printf("unsupported NAND chip, minimum ecc required %d\n" 1104 printf("unsupported NAND chip, minimum ecc required %d\n"
1105 , chip->ecc_strength_ds); 1105 , chip->ecc_strength_ds);
1106 return -EINVAL; 1106 return -EINVAL;
1107 } 1107 }
1108 1108
1109 if ((!(chip->ecc_strength_ds > 0 && chip->ecc_step_ds > 0) && 1109 if ((!(chip->ecc_strength_ds > 0 && chip->ecc_step_ds > 0) &&
1110 (mtd->oobsize < 1024)) || nand_info->legacy_bch_geometry) { 1110 (mtd->oobsize < 1024)) || nand_info->legacy_bch_geometry) {
1111 dev_warn(this->dev, "use legacy bch geometry\n"); 1111 dev_warn(this->dev, "use legacy bch geometry\n");
1112 return mxs_nand_legacy_calc_ecc_layout(geo, mtd); 1112 return mxs_nand_legacy_calc_ecc_layout(geo, mtd);
1113 } 1113 }
1114 1114
1115 if (mtd->oobsize > 1024 || chip->ecc_step_ds < mtd->oobsize) 1115 if (mtd->oobsize > 1024 || chip->ecc_step_ds < mtd->oobsize)
1116 return mxs_nand_calc_ecc_for_large_oob(geo, mtd); 1116 return mxs_nand_calc_ecc_for_large_oob(geo, mtd);
1117 1117
1118 return mxs_nand_calc_ecc_layout_by_info(geo, mtd, 1118 return mxs_nand_calc_ecc_layout_by_info(geo, mtd,
1119 chip->ecc_strength_ds, chip->ecc_step_ds); 1119 chip->ecc_strength_ds, chip->ecc_step_ds);
1120 1120
1121 return 0; 1121 return 0;
1122 } 1122 }
1123 1123
1124 /* 1124 /*
1125 * At this point, the physical NAND Flash chips have been identified and 1125 * At this point, the physical NAND Flash chips have been identified and
1126 * counted, so we know the physical geometry. This enables us to make some 1126 * counted, so we know the physical geometry. This enables us to make some
1127 * important configuration decisions. 1127 * important configuration decisions.
1128 * 1128 *
1129 * The return value of this function propagates directly back to this driver's 1129 * The return value of this function propagates directly back to this driver's
1130 * board_nand_init(). Anything other than zero will cause this driver to 1130 * board_nand_init(). Anything other than zero will cause this driver to
1131 * tear everything down and declare failure. 1131 * tear everything down and declare failure.
1132 */ 1132 */
1133 int mxs_nand_setup_ecc(struct mtd_info *mtd) 1133 int mxs_nand_setup_ecc(struct mtd_info *mtd)
1134 { 1134 {
1135 struct nand_chip *nand = mtd_to_nand(mtd); 1135 struct nand_chip *nand = mtd_to_nand(mtd);
1136 struct mxs_nand_info *nand_info = nand_get_controller_data(nand); 1136 struct mxs_nand_info *nand_info = nand_get_controller_data(nand);
1137 struct bch_geometry *geo = &nand_info->bch_geometry; 1137 struct bch_geometry *geo = &nand_info->bch_geometry;
1138 struct mxs_bch_regs *bch_regs = nand_info->bch_regs; 1138 struct mxs_bch_regs *bch_regs = nand_info->bch_regs;
1139 uint32_t tmp; 1139 uint32_t tmp;
1140 int ret; 1140 int ret;
1141 1141
1142 nand_info->en_randomizer = 0; 1142 nand_info->en_randomizer = 0;
1143 nand_info->oobsize = mtd->oobsize; 1143 nand_info->oobsize = mtd->oobsize;
1144 nand_info->writesize = mtd->writesize; 1144 nand_info->writesize = mtd->writesize;
1145 1145
1146 ret = mxs_nand_set_geometry(mtd, geo); 1146 ret = mxs_nand_set_geometry(mtd, geo);
1147 if (ret) 1147 if (ret)
1148 return ret; 1148 return ret;
1149 1149
1150 /* Configure BCH and set NFC geometry */ 1150 /* Configure BCH and set NFC geometry */
1151 mxs_reset_block(&bch_regs->hw_bch_ctrl_reg); 1151 mxs_reset_block(&bch_regs->hw_bch_ctrl_reg);
1152 1152
1153 /* Configure layout 0 */ 1153 /* Configure layout 0 */
1154 tmp = (geo->ecc_chunk_count - 1) << BCH_FLASHLAYOUT0_NBLOCKS_OFFSET; 1154 tmp = (geo->ecc_chunk_count - 1) << BCH_FLASHLAYOUT0_NBLOCKS_OFFSET;
1155 tmp |= MXS_NAND_METADATA_SIZE << BCH_FLASHLAYOUT0_META_SIZE_OFFSET; 1155 tmp |= MXS_NAND_METADATA_SIZE << BCH_FLASHLAYOUT0_META_SIZE_OFFSET;
1156 tmp |= (geo->ecc_strength >> 1) << BCH_FLASHLAYOUT0_ECC0_OFFSET; 1156 tmp |= (geo->ecc_strength >> 1) << BCH_FLASHLAYOUT0_ECC0_OFFSET;
1157 tmp |= geo->ecc_chunk0_size >> MXS_NAND_CHUNK_DATA_CHUNK_SIZE_SHIFT; 1157 tmp |= geo->ecc_chunk0_size >> MXS_NAND_CHUNK_DATA_CHUNK_SIZE_SHIFT;
1158 tmp |= (geo->gf_len == 14 ? 1 : 0) << 1158 tmp |= (geo->gf_len == 14 ? 1 : 0) <<
1159 BCH_FLASHLAYOUT0_GF13_0_GF14_1_OFFSET; 1159 BCH_FLASHLAYOUT0_GF13_0_GF14_1_OFFSET;
1160 writel(tmp, &bch_regs->hw_bch_flash0layout0); 1160 writel(tmp, &bch_regs->hw_bch_flash0layout0);
1161 nand_info->bch_flash0layout0 = tmp; 1161 nand_info->bch_flash0layout0 = tmp;
1162 1162
1163 tmp = (mtd->writesize + mtd->oobsize) 1163 tmp = (mtd->writesize + mtd->oobsize)
1164 << BCH_FLASHLAYOUT1_PAGE_SIZE_OFFSET; 1164 << BCH_FLASHLAYOUT1_PAGE_SIZE_OFFSET;
1165 tmp |= (geo->ecc_strength >> 1) << BCH_FLASHLAYOUT1_ECCN_OFFSET; 1165 tmp |= (geo->ecc_strength >> 1) << BCH_FLASHLAYOUT1_ECCN_OFFSET;
1166 tmp |= geo->ecc_chunkn_size >> MXS_NAND_CHUNK_DATA_CHUNK_SIZE_SHIFT; 1166 tmp |= geo->ecc_chunkn_size >> MXS_NAND_CHUNK_DATA_CHUNK_SIZE_SHIFT;
1167 tmp |= (geo->gf_len == 14 ? 1 : 0) << 1167 tmp |= (geo->gf_len == 14 ? 1 : 0) <<
1168 BCH_FLASHLAYOUT1_GF13_0_GF14_1_OFFSET; 1168 BCH_FLASHLAYOUT1_GF13_0_GF14_1_OFFSET;
1169 writel(tmp, &bch_regs->hw_bch_flash0layout1); 1169 writel(tmp, &bch_regs->hw_bch_flash0layout1);
1170 nand_info->bch_flash0layout1 = tmp; 1170 nand_info->bch_flash0layout1 = tmp;
1171 1171
1172 /* Set erase threshold to ecc strength for mx6ul, mx6qp and mx7 */ 1172 /* Set erase threshold to ecc strength for mx6ul, mx6qp and mx7 */
1173 if (is_mx6dqp() || is_mx7() || 1173 if (is_mx6dqp() || is_mx7() ||
1174 is_mx6ul() || is_imx8() || is_imx8m()) 1174 is_mx6ul() || is_imx8() || is_imx8m())
1175 writel(BCH_MODE_ERASE_THRESHOLD(geo->ecc_strength), 1175 writel(BCH_MODE_ERASE_THRESHOLD(geo->ecc_strength),
1176 &bch_regs->hw_bch_mode); 1176 &bch_regs->hw_bch_mode);
1177 1177
1178 /* Set *all* chip selects to use layout 0 */ 1178 /* Set *all* chip selects to use layout 0 */
1179 writel(0, &bch_regs->hw_bch_layoutselect); 1179 writel(0, &bch_regs->hw_bch_layoutselect);
1180 1180
1181 /* Enable BCH complete interrupt */ 1181 /* Enable BCH complete interrupt */
1182 writel(BCH_CTRL_COMPLETE_IRQ_EN, &bch_regs->hw_bch_ctrl_set); 1182 writel(BCH_CTRL_COMPLETE_IRQ_EN, &bch_regs->hw_bch_ctrl_set);
1183 1183
1184 /* Hook some operations at the MTD level. */ 1184 /* Hook some operations at the MTD level. */
1185 if (mtd->_read_oob != mxs_nand_hook_read_oob) { 1185 if (mtd->_read_oob != mxs_nand_hook_read_oob) {
1186 nand_info->hooked_read_oob = mtd->_read_oob; 1186 nand_info->hooked_read_oob = mtd->_read_oob;
1187 mtd->_read_oob = mxs_nand_hook_read_oob; 1187 mtd->_read_oob = mxs_nand_hook_read_oob;
1188 } 1188 }
1189 1189
1190 if (mtd->_write_oob != mxs_nand_hook_write_oob) { 1190 if (mtd->_write_oob != mxs_nand_hook_write_oob) {
1191 nand_info->hooked_write_oob = mtd->_write_oob; 1191 nand_info->hooked_write_oob = mtd->_write_oob;
1192 mtd->_write_oob = mxs_nand_hook_write_oob; 1192 mtd->_write_oob = mxs_nand_hook_write_oob;
1193 } 1193 }
1194 1194
1195 if (mtd->_block_markbad != mxs_nand_hook_block_markbad) { 1195 if (mtd->_block_markbad != mxs_nand_hook_block_markbad) {
1196 nand_info->hooked_block_markbad = mtd->_block_markbad; 1196 nand_info->hooked_block_markbad = mtd->_block_markbad;
1197 mtd->_block_markbad = mxs_nand_hook_block_markbad; 1197 mtd->_block_markbad = mxs_nand_hook_block_markbad;
1198 } 1198 }
1199 1199
1200 return 0; 1200 return 0;
1201 } 1201 }
1202 1202
1203 /* 1203 /*
1204 * Allocate DMA buffers 1204 * Allocate DMA buffers
1205 */ 1205 */
1206 int mxs_nand_alloc_buffers(struct mxs_nand_info *nand_info) 1206 int mxs_nand_alloc_buffers(struct mxs_nand_info *nand_info)
1207 { 1207 {
1208 uint8_t *buf; 1208 uint8_t *buf;
1209 const int size = NAND_MAX_PAGESIZE + NAND_MAX_OOBSIZE; 1209 const int size = NAND_MAX_PAGESIZE + NAND_MAX_OOBSIZE;
1210 1210
1211 nand_info->data_buf_size = roundup(size, MXS_DMA_ALIGNMENT); 1211 nand_info->data_buf_size = roundup(size, MXS_DMA_ALIGNMENT);
1212 1212
1213 /* DMA buffers */ 1213 /* DMA buffers */
1214 buf = memalign(MXS_DMA_ALIGNMENT, nand_info->data_buf_size); 1214 buf = memalign(MXS_DMA_ALIGNMENT, nand_info->data_buf_size);
1215 if (!buf) { 1215 if (!buf) {
1216 printf("MXS NAND: Error allocating DMA buffers\n"); 1216 printf("MXS NAND: Error allocating DMA buffers\n");
1217 return -ENOMEM; 1217 return -ENOMEM;
1218 } 1218 }
1219 1219
1220 memset(buf, 0, nand_info->data_buf_size); 1220 memset(buf, 0, nand_info->data_buf_size);
1221 1221
1222 nand_info->data_buf = buf; 1222 nand_info->data_buf = buf;
1223 nand_info->oob_buf = buf + NAND_MAX_PAGESIZE; 1223 nand_info->oob_buf = buf + NAND_MAX_PAGESIZE;
1224 /* Command buffers */ 1224 /* Command buffers */
1225 nand_info->cmd_buf = memalign(MXS_DMA_ALIGNMENT, 1225 nand_info->cmd_buf = memalign(MXS_DMA_ALIGNMENT,
1226 MXS_NAND_COMMAND_BUFFER_SIZE); 1226 MXS_NAND_COMMAND_BUFFER_SIZE);
1227 if (!nand_info->cmd_buf) { 1227 if (!nand_info->cmd_buf) {
1228 free(buf); 1228 free(buf);
1229 printf("MXS NAND: Error allocating command buffers\n"); 1229 printf("MXS NAND: Error allocating command buffers\n");
1230 return -ENOMEM; 1230 return -ENOMEM;
1231 } 1231 }
1232 memset(nand_info->cmd_buf, 0, MXS_NAND_COMMAND_BUFFER_SIZE); 1232 memset(nand_info->cmd_buf, 0, MXS_NAND_COMMAND_BUFFER_SIZE);
1233 nand_info->cmd_queue_len = 0; 1233 nand_info->cmd_queue_len = 0;
1234 1234
1235 return 0; 1235 return 0;
1236 } 1236 }
1237 1237
1238 /* 1238 /*
1239 * Initializes the NFC hardware. 1239 * Initializes the NFC hardware.
1240 */ 1240 */
1241 static int mxs_nand_init_dma(struct mxs_nand_info *info) 1241 static int mxs_nand_init_dma(struct mxs_nand_info *info)
1242 { 1242 {
1243 int i = 0, j, ret = 0; 1243 int i = 0, j, ret = 0;
1244 1244
1245 #ifdef CONFIG_MX6 1245 #ifdef CONFIG_MX6
1246 if (check_module_fused(MX6_MODULE_GPMI)) { 1246 if (check_module_fused(MX6_MODULE_GPMI)) {
1247 printf("NAND GPMI@0x%x is fused, disable it\n", (u32)info->gpmi_regs); 1247 printf("NAND GPMI@0x%x is fused, disable it\n", (u32)info->gpmi_regs);
1248 return -EPERM; 1248 return -EPERM;
1249 } 1249 }
1250 #endif 1250 #endif
1251 1251
1252 info->desc = malloc(sizeof(struct mxs_dma_desc *) * 1252 info->desc = malloc(sizeof(struct mxs_dma_desc *) *
1253 MXS_NAND_DMA_DESCRIPTOR_COUNT); 1253 MXS_NAND_DMA_DESCRIPTOR_COUNT);
1254 if (!info->desc) { 1254 if (!info->desc) {
1255 ret = -ENOMEM; 1255 ret = -ENOMEM;
1256 goto err1; 1256 goto err1;
1257 } 1257 }
1258 1258
1259 /* Allocate the DMA descriptors. */ 1259 /* Allocate the DMA descriptors. */
1260 for (i = 0; i < MXS_NAND_DMA_DESCRIPTOR_COUNT; i++) { 1260 for (i = 0; i < MXS_NAND_DMA_DESCRIPTOR_COUNT; i++) {
1261 info->desc[i] = mxs_dma_desc_alloc(); 1261 info->desc[i] = mxs_dma_desc_alloc();
1262 if (!info->desc[i]) { 1262 if (!info->desc[i]) {
1263 ret = -ENOMEM; 1263 ret = -ENOMEM;
1264 goto err2; 1264 goto err2;
1265 } 1265 }
1266 } 1266 }
1267 1267
1268 /* Init the DMA controller. */ 1268 /* Init the DMA controller. */
1269 mxs_dma_init(); 1269 mxs_dma_init();
1270 for (j = MXS_DMA_CHANNEL_AHB_APBH_GPMI0; 1270 for (j = MXS_DMA_CHANNEL_AHB_APBH_GPMI0;
1271 j <= MXS_DMA_CHANNEL_AHB_APBH_GPMI7; j++) { 1271 j <= MXS_DMA_CHANNEL_AHB_APBH_GPMI7; j++) {
1272 ret = mxs_dma_init_channel(j); 1272 ret = mxs_dma_init_channel(j);
1273 if (ret) 1273 if (ret)
1274 goto err3; 1274 goto err3;
1275 } 1275 }
1276 1276
1277 /* Reset the GPMI block. */ 1277 /* Reset the GPMI block. */
1278 mxs_reset_block(&info->gpmi_regs->hw_gpmi_ctrl0_reg); 1278 mxs_reset_block(&info->gpmi_regs->hw_gpmi_ctrl0_reg);
1279 mxs_reset_block(&info->bch_regs->hw_bch_ctrl_reg); 1279 mxs_reset_block(&info->bch_regs->hw_bch_ctrl_reg);
1280 1280
1281 /* 1281 /*
1282 * Choose NAND mode, set IRQ polarity, disable write protection and 1282 * Choose NAND mode, set IRQ polarity, disable write protection and
1283 * select BCH ECC. 1283 * select BCH ECC.
1284 */ 1284 */
1285 clrsetbits_le32(&info->gpmi_regs->hw_gpmi_ctrl1, 1285 clrsetbits_le32(&info->gpmi_regs->hw_gpmi_ctrl1,
1286 GPMI_CTRL1_GPMI_MODE, 1286 GPMI_CTRL1_GPMI_MODE,
1287 GPMI_CTRL1_ATA_IRQRDY_POLARITY | GPMI_CTRL1_DEV_RESET | 1287 GPMI_CTRL1_ATA_IRQRDY_POLARITY | GPMI_CTRL1_DEV_RESET |
1288 GPMI_CTRL1_BCH_MODE); 1288 GPMI_CTRL1_BCH_MODE);
1289 1289
1290 return 0; 1290 return 0;
1291 1291
1292 err3: 1292 err3:
1293 for (--j; j >= MXS_DMA_CHANNEL_AHB_APBH_GPMI0; j--) 1293 for (--j; j >= MXS_DMA_CHANNEL_AHB_APBH_GPMI0; j--)
1294 mxs_dma_release(j); 1294 mxs_dma_release(j);
1295 err2: 1295 err2:
1296 for (--i; i >= 0; i--) 1296 for (--i; i >= 0; i--)
1297 mxs_dma_desc_free(info->desc[i]); 1297 mxs_dma_desc_free(info->desc[i]);
1298 free(info->desc); 1298 free(info->desc);
1299 err1: 1299 err1:
1300 if (ret == -ENOMEM) 1300 if (ret == -ENOMEM)
1301 printf("MXS NAND: Unable to allocate DMA descriptors\n"); 1301 printf("MXS NAND: Unable to allocate DMA descriptors\n");
1302 return ret; 1302 return ret;
1303 } 1303 }
1304 1304
1305 int mxs_nand_init_spl(struct nand_chip *nand) 1305 int mxs_nand_init_spl(struct nand_chip *nand)
1306 { 1306 {
1307 struct mxs_nand_info *nand_info; 1307 struct mxs_nand_info *nand_info;
1308 int err; 1308 int err;
1309 1309
1310 nand_info = malloc(sizeof(struct mxs_nand_info)); 1310 nand_info = malloc(sizeof(struct mxs_nand_info));
1311 if (!nand_info) { 1311 if (!nand_info) {
1312 printf("MXS NAND: Failed to allocate private data\n"); 1312 printf("MXS NAND: Failed to allocate private data\n");
1313 return -ENOMEM; 1313 return -ENOMEM;
1314 } 1314 }
1315 memset(nand_info, 0, sizeof(struct mxs_nand_info)); 1315 memset(nand_info, 0, sizeof(struct mxs_nand_info));
1316 1316
1317 nand_info->gpmi_regs = (struct mxs_gpmi_regs *)MXS_GPMI_BASE; 1317 nand_info->gpmi_regs = (struct mxs_gpmi_regs *)MXS_GPMI_BASE;
1318 nand_info->bch_regs = (struct mxs_bch_regs *)MXS_BCH_BASE; 1318 nand_info->bch_regs = (struct mxs_bch_regs *)MXS_BCH_BASE;
1319 1319
1320 if (is_mx6sx() || is_mx7() || is_imx8() || is_imx8m()) 1320 if (is_mx6sx() || is_mx7() || is_imx8() || is_imx8m())
1321 nand_info->max_ecc_strength_supported = 62; 1321 nand_info->max_ecc_strength_supported = 62;
1322 else 1322 else
1323 nand_info->max_ecc_strength_supported = 40; 1323 nand_info->max_ecc_strength_supported = 40;
1324 1324
1325 err = mxs_nand_alloc_buffers(nand_info); 1325 err = mxs_nand_alloc_buffers(nand_info);
1326 if (err) 1326 if (err)
1327 return err; 1327 return err;
1328 1328
1329 err = mxs_nand_init_dma(nand_info); 1329 err = mxs_nand_init_dma(nand_info);
1330 if (err) 1330 if (err)
1331 return err; 1331 return err;
1332 1332
1333 nand_set_controller_data(nand, nand_info); 1333 nand_set_controller_data(nand, nand_info);
1334 1334
1335 nand->options |= NAND_NO_SUBPAGE_WRITE; 1335 nand->options |= NAND_NO_SUBPAGE_WRITE;
1336 1336
1337 nand->cmd_ctrl = mxs_nand_cmd_ctrl; 1337 nand->cmd_ctrl = mxs_nand_cmd_ctrl;
1338 nand->dev_ready = mxs_nand_device_ready; 1338 nand->dev_ready = mxs_nand_device_ready;
1339 nand->select_chip = mxs_nand_select_chip; 1339 nand->select_chip = mxs_nand_select_chip;
1340 1340
1341 nand->read_byte = mxs_nand_read_byte; 1341 nand->read_byte = mxs_nand_read_byte;
1342 nand->read_buf = mxs_nand_read_buf; 1342 nand->read_buf = mxs_nand_read_buf;
1343 1343
1344 nand->ecc.read_page = mxs_nand_ecc_read_page; 1344 nand->ecc.read_page = mxs_nand_ecc_read_page;
1345 1345
1346 nand->ecc.mode = NAND_ECC_HW; 1346 nand->ecc.mode = NAND_ECC_HW;
1347 1347
1348 return 0; 1348 return 0;
1349 } 1349 }
1350 1350
1351 int mxs_nand_init_ctrl(struct mxs_nand_info *nand_info) 1351 int mxs_nand_init_ctrl(struct mxs_nand_info *nand_info)
1352 { 1352 {
1353 struct mtd_info *mtd; 1353 struct mtd_info *mtd;
1354 struct nand_chip *nand; 1354 struct nand_chip *nand;
1355 int err; 1355 int err;
1356 1356
1357 nand = &nand_info->chip; 1357 nand = &nand_info->chip;
1358 mtd = nand_to_mtd(nand); 1358 mtd = nand_to_mtd(nand);
1359 err = mxs_nand_alloc_buffers(nand_info); 1359 err = mxs_nand_alloc_buffers(nand_info);
1360 if (err) 1360 if (err)
1361 return err; 1361 return err;
1362 1362
1363 err = mxs_nand_init_dma(nand_info); 1363 err = mxs_nand_init_dma(nand_info);
1364 if (err) 1364 if (err)
1365 goto err_free_buffers; 1365 goto err_free_buffers;
1366 1366
1367 memset(&fake_ecc_layout, 0, sizeof(fake_ecc_layout)); 1367 memset(&fake_ecc_layout, 0, sizeof(fake_ecc_layout));
1368 1368
1369 #ifdef CONFIG_SYS_NAND_USE_FLASH_BBT 1369 #ifdef CONFIG_SYS_NAND_USE_FLASH_BBT
1370 nand->bbt_options |= NAND_BBT_USE_FLASH | NAND_BBT_NO_OOB; 1370 nand->bbt_options |= NAND_BBT_USE_FLASH | NAND_BBT_NO_OOB;
1371 #endif 1371 #endif
1372 1372
1373 nand_set_controller_data(nand, nand_info); 1373 nand_set_controller_data(nand, nand_info);
1374 nand->options |= NAND_NO_SUBPAGE_WRITE; 1374 nand->options |= NAND_NO_SUBPAGE_WRITE;
1375 1375
1376 if (nand_info->dev) 1376 if (nand_info->dev)
1377 nand->flash_node = dev_of_offset(nand_info->dev); 1377 nand->flash_node = dev_of_offset(nand_info->dev);
1378 1378
1379 nand->cmd_ctrl = mxs_nand_cmd_ctrl; 1379 nand->cmd_ctrl = mxs_nand_cmd_ctrl;
1380 1380
1381 nand->dev_ready = mxs_nand_device_ready; 1381 nand->dev_ready = mxs_nand_device_ready;
1382 nand->select_chip = mxs_nand_select_chip; 1382 nand->select_chip = mxs_nand_select_chip;
1383 nand->block_bad = mxs_nand_block_bad; 1383 nand->block_bad = mxs_nand_block_bad;
1384 1384
1385 nand->read_byte = mxs_nand_read_byte; 1385 nand->read_byte = mxs_nand_read_byte;
1386 1386
1387 nand->read_buf = mxs_nand_read_buf; 1387 nand->read_buf = mxs_nand_read_buf;
1388 nand->write_buf = mxs_nand_write_buf; 1388 nand->write_buf = mxs_nand_write_buf;
1389 1389
1390 /* first scan to find the device and get the page size */ 1390 /* first scan to find the device and get the page size */
1391 if (nand_scan_ident(mtd, CONFIG_SYS_MAX_NAND_DEVICE, NULL)) 1391 if (nand_scan_ident(mtd, CONFIG_SYS_MAX_NAND_DEVICE, NULL))
1392 goto err_free_buffers; 1392 goto err_free_buffers;
1393 1393
1394 if (mxs_nand_setup_ecc(mtd)) 1394 if (mxs_nand_setup_ecc(mtd))
1395 goto err_free_buffers; 1395 goto err_free_buffers;
1396 1396
1397 nand->ecc.read_page = mxs_nand_ecc_read_page; 1397 nand->ecc.read_page = mxs_nand_ecc_read_page;
1398 nand->ecc.write_page = mxs_nand_ecc_write_page; 1398 nand->ecc.write_page = mxs_nand_ecc_write_page;
1399 nand->ecc.read_oob = mxs_nand_ecc_read_oob; 1399 nand->ecc.read_oob = mxs_nand_ecc_read_oob;
1400 nand->ecc.write_oob = mxs_nand_ecc_write_oob; 1400 nand->ecc.write_oob = mxs_nand_ecc_write_oob;
1401 1401
1402 nand->ecc.layout = &fake_ecc_layout; 1402 nand->ecc.layout = &fake_ecc_layout;
1403 nand->ecc.mode = NAND_ECC_HW; 1403 nand->ecc.mode = NAND_ECC_HW;
1404 nand->ecc.size = nand_info->bch_geometry.ecc_chunkn_size; 1404 nand->ecc.size = nand_info->bch_geometry.ecc_chunkn_size;
1405 nand->ecc.strength = nand_info->bch_geometry.ecc_strength; 1405 nand->ecc.strength = nand_info->bch_geometry.ecc_strength;
1406 1406
1407 /* second phase scan */ 1407 /* second phase scan */
1408 err = nand_scan_tail(mtd); 1408 err = nand_scan_tail(mtd);
1409 if (err) 1409 if (err)
1410 goto err_free_buffers; 1410 goto err_free_buffers;
1411 1411
1412 err = nand_register(0, mtd); 1412 err = nand_register(0, mtd);
1413 if (err) 1413 if (err)
1414 goto err_free_buffers; 1414 goto err_free_buffers;
1415 1415
1416 return 0; 1416 return 0;
1417 1417
1418 err_free_buffers: 1418 err_free_buffers:
1419 free(nand_info->data_buf); 1419 free(nand_info->data_buf);
1420 free(nand_info->cmd_buf); 1420 free(nand_info->cmd_buf);
1421 1421
1422 return err; 1422 return err;
1423 } 1423 }
1424 1424
1425 #ifndef CONFIG_NAND_MXS_DT 1425 #ifndef CONFIG_NAND_MXS_DT
1426 void board_nand_init(void) 1426 void board_nand_init(void)
1427 { 1427 {
1428 struct mxs_nand_info *nand_info; 1428 struct mxs_nand_info *nand_info;
1429 1429
1430 nand_info = malloc(sizeof(struct mxs_nand_info)); 1430 nand_info = malloc(sizeof(struct mxs_nand_info));
1431 if (!nand_info) { 1431 if (!nand_info) {
1432 printf("MXS NAND: Failed to allocate private data\n"); 1432 printf("MXS NAND: Failed to allocate private data\n");
1433 return; 1433 return;
1434 } 1434 }
1435 memset(nand_info, 0, sizeof(struct mxs_nand_info)); 1435 memset(nand_info, 0, sizeof(struct mxs_nand_info));
1436 1436
1437 nand_info->gpmi_regs = (struct mxs_gpmi_regs *)MXS_GPMI_BASE; 1437 nand_info->gpmi_regs = (struct mxs_gpmi_regs *)MXS_GPMI_BASE;
1438 nand_info->bch_regs = (struct mxs_bch_regs *)MXS_BCH_BASE; 1438 nand_info->bch_regs = (struct mxs_bch_regs *)MXS_BCH_BASE;
1439 1439
1440 /* Refer to Chapter 17 for i.MX6DQ, Chapter 18 for i.MX6SX */ 1440 /* Refer to Chapter 17 for i.MX6DQ, Chapter 18 for i.MX6SX */
1441 if (is_mx6sx() || is_mx7()) 1441 if (is_mx6sx() || is_mx7())
1442 nand_info->max_ecc_strength_supported = 62; 1442 nand_info->max_ecc_strength_supported = 62;
1443 else 1443 else
1444 nand_info->max_ecc_strength_supported = 40; 1444 nand_info->max_ecc_strength_supported = 40;
1445 1445
1446 #ifdef CONFIG_NAND_MXS_USE_MINIMUM_ECC 1446 #ifdef CONFIG_NAND_MXS_USE_MINIMUM_ECC
1447 nand_info->use_minimum_ecc = true; 1447 nand_info->use_minimum_ecc = true;
1448 #endif 1448 #endif
1449 1449
1450 if (mxs_nand_init_ctrl(nand_info) < 0) 1450 if (mxs_nand_init_ctrl(nand_info) < 0)
1451 goto err; 1451 goto err;
1452 1452
1453 return; 1453 return;
1454 1454
1455 err: 1455 err:
1456 free(nand_info); 1456 free(nand_info);
1457 } 1457 }
1458 #endif 1458 #endif
1459 1459
1460 #if CONFIG_IS_ENABLED(MX7) || CONFIG_IS_ENABLED(MX6) 1460 #if defined(CONFIG_MX6) || defined(CONFIG_MX7) || defined(CONFIG_IMX8M)
1461 /* 1461 /*
1462 * Read NAND layout for FCB block generation. 1462 * Read NAND layout for FCB block generation.
1463 */ 1463 */
1464 void mxs_nand_get_layout(struct mtd_info *mtd, struct mxs_nand_layout *l) 1464 void mxs_nand_get_layout(struct mtd_info *mtd, struct mxs_nand_layout *l)
1465 { 1465 {
1466 struct mxs_bch_regs *bch_regs = (struct mxs_bch_regs *)MXS_BCH_BASE; 1466 struct mxs_bch_regs *bch_regs = (struct mxs_bch_regs *)MXS_BCH_BASE;
1467 u32 tmp; 1467 u32 tmp;
1468 1468
1469 tmp = readl(&bch_regs->hw_bch_flash0layout0); 1469 tmp = readl(&bch_regs->hw_bch_flash0layout0);
1470 l->nblocks = (tmp & BCH_FLASHLAYOUT0_NBLOCKS_MASK) >> 1470 l->nblocks = (tmp & BCH_FLASHLAYOUT0_NBLOCKS_MASK) >>
1471 BCH_FLASHLAYOUT0_NBLOCKS_OFFSET; 1471 BCH_FLASHLAYOUT0_NBLOCKS_OFFSET;
1472 l->meta_size = (tmp & BCH_FLASHLAYOUT0_META_SIZE_MASK) >> 1472 l->meta_size = (tmp & BCH_FLASHLAYOUT0_META_SIZE_MASK) >>
1473 BCH_FLASHLAYOUT0_META_SIZE_OFFSET; 1473 BCH_FLASHLAYOUT0_META_SIZE_OFFSET;
1474 1474
1475 tmp = readl(&bch_regs->hw_bch_flash0layout1); 1475 tmp = readl(&bch_regs->hw_bch_flash0layout1);
1476 l->data0_size = 4 * ((tmp & BCH_FLASHLAYOUT0_DATA0_SIZE_MASK) >> 1476 l->data0_size = 4 * ((tmp & BCH_FLASHLAYOUT0_DATA0_SIZE_MASK) >>
1477 BCH_FLASHLAYOUT0_DATA0_SIZE_OFFSET); 1477 BCH_FLASHLAYOUT0_DATA0_SIZE_OFFSET);
1478 l->ecc0 = (tmp & BCH_FLASHLAYOUT0_ECC0_MASK) >> 1478 l->ecc0 = (tmp & BCH_FLASHLAYOUT0_ECC0_MASK) >>
1479 BCH_FLASHLAYOUT0_ECC0_OFFSET; 1479 BCH_FLASHLAYOUT0_ECC0_OFFSET;
1480 l->datan_size = 4 * ((tmp & BCH_FLASHLAYOUT1_DATAN_SIZE_MASK) >> 1480 l->datan_size = 4 * ((tmp & BCH_FLASHLAYOUT1_DATAN_SIZE_MASK) >>
1481 BCH_FLASHLAYOUT1_DATAN_SIZE_OFFSET); 1481 BCH_FLASHLAYOUT1_DATAN_SIZE_OFFSET);
1482 l->eccn = (tmp & BCH_FLASHLAYOUT1_ECCN_MASK) >> 1482 l->eccn = (tmp & BCH_FLASHLAYOUT1_ECCN_MASK) >>
1483 BCH_FLASHLAYOUT1_ECCN_OFFSET; 1483 BCH_FLASHLAYOUT1_ECCN_OFFSET;
1484 l->gf_len = (tmp & BCH_FLASHLAYOUT1_GF13_0_GF14_1_MASK) >> 1484 l->gf_len = (tmp & BCH_FLASHLAYOUT1_GF13_0_GF14_1_MASK) >>
1485 BCH_FLASHLAYOUT1_GF13_0_GF14_1_OFFSET; 1485 BCH_FLASHLAYOUT1_GF13_0_GF14_1_OFFSET;
1486 } 1486 }
1487 1487
1488 /* 1488 /*
1489 * Set BCH to specific layout used by ROM bootloader to read FCB. 1489 * Set BCH to specific layout used by ROM bootloader to read FCB.
1490 */ 1490 */
1491 void mxs_nand_mode_fcb(struct mtd_info *mtd) 1491 void mxs_nand_mode_fcb(struct mtd_info *mtd)
1492 { 1492 {
1493 u32 tmp; 1493 u32 tmp;
1494 struct mxs_bch_regs *bch_regs = (struct mxs_bch_regs *)MXS_BCH_BASE; 1494 struct mxs_bch_regs *bch_regs = (struct mxs_bch_regs *)MXS_BCH_BASE;
1495 struct nand_chip *nand = mtd_to_nand(mtd); 1495 struct nand_chip *nand = mtd_to_nand(mtd);
1496 struct mxs_nand_info *nand_info = nand_get_controller_data(nand); 1496 struct mxs_nand_info *nand_info = nand_get_controller_data(nand);
1497 1497
1498 nand_info->en_randomizer = 1; 1498 nand_info->en_randomizer = 1;
1499 1499
1500 mtd->writesize = 1024; 1500 mtd->writesize = 1024;
1501 mtd->oobsize = 1862 - 1024; 1501 mtd->oobsize = 1862 - 1024;
1502 1502
1503 /* 8 ecc_chunks_*/ 1503 /* 8 ecc_chunks_*/
1504 tmp = 7 << BCH_FLASHLAYOUT0_NBLOCKS_OFFSET; 1504 tmp = 7 << BCH_FLASHLAYOUT0_NBLOCKS_OFFSET;
1505 /* 32 bytes for metadata */ 1505 /* 32 bytes for metadata */
1506 tmp |= 32 << BCH_FLASHLAYOUT0_META_SIZE_OFFSET; 1506 tmp |= 32 << BCH_FLASHLAYOUT0_META_SIZE_OFFSET;
1507 /* using ECC62 level to be performed */ 1507 /* using ECC62 level to be performed */
1508 tmp |= 0x1F << BCH_FLASHLAYOUT0_ECC0_OFFSET; 1508 tmp |= 0x1F << BCH_FLASHLAYOUT0_ECC0_OFFSET;
1509 /* 0x20 * 4 bytes of the data0 block */ 1509 /* 0x20 * 4 bytes of the data0 block */
1510 tmp |= 0x20 << BCH_FLASHLAYOUT0_DATA0_SIZE_OFFSET; 1510 tmp |= 0x20 << BCH_FLASHLAYOUT0_DATA0_SIZE_OFFSET;
1511 tmp |= 0 << BCH_FLASHLAYOUT0_GF13_0_GF14_1_OFFSET; 1511 tmp |= 0 << BCH_FLASHLAYOUT0_GF13_0_GF14_1_OFFSET;
1512 writel(tmp, &bch_regs->hw_bch_flash0layout0); 1512 writel(tmp, &bch_regs->hw_bch_flash0layout0);
1513 1513
1514 /* 1024 for data + 838 for OOB */ 1514 /* 1024 for data + 838 for OOB */
1515 tmp = 1862 << BCH_FLASHLAYOUT1_PAGE_SIZE_OFFSET; 1515 tmp = 1862 << BCH_FLASHLAYOUT1_PAGE_SIZE_OFFSET;
1516 /* using ECC62 level to be performed */ 1516 /* using ECC62 level to be performed */
1517 tmp |= 0x1F << BCH_FLASHLAYOUT1_ECCN_OFFSET; 1517 tmp |= 0x1F << BCH_FLASHLAYOUT1_ECCN_OFFSET;
1518 /* 0x20 * 4 bytes of the data0 block */ 1518 /* 0x20 * 4 bytes of the data0 block */
1519 tmp |= 0x20 << BCH_FLASHLAYOUT1_DATAN_SIZE_OFFSET; 1519 tmp |= 0x20 << BCH_FLASHLAYOUT1_DATAN_SIZE_OFFSET;
1520 tmp |= 0 << BCH_FLASHLAYOUT1_GF13_0_GF14_1_OFFSET; 1520 tmp |= 0 << BCH_FLASHLAYOUT1_GF13_0_GF14_1_OFFSET;
1521 writel(tmp, &bch_regs->hw_bch_flash0layout1); 1521 writel(tmp, &bch_regs->hw_bch_flash0layout1);
1522 } 1522 }
1523 1523
1524 /* 1524 /*
1525 * Restore BCH to normal settings. 1525 * Restore BCH to normal settings.
1526 */ 1526 */
1527 void mxs_nand_mode_normal(struct mtd_info *mtd) 1527 void mxs_nand_mode_normal(struct mtd_info *mtd)
1528 { 1528 {
1529 struct mxs_bch_regs *bch_regs = (struct mxs_bch_regs *)MXS_BCH_BASE; 1529 struct mxs_bch_regs *bch_regs = (struct mxs_bch_regs *)MXS_BCH_BASE;
1530 struct nand_chip *nand = mtd_to_nand(mtd); 1530 struct nand_chip *nand = mtd_to_nand(mtd);
1531 struct mxs_nand_info *nand_info = nand_get_controller_data(nand); 1531 struct mxs_nand_info *nand_info = nand_get_controller_data(nand);
1532 1532
1533 nand_info->en_randomizer = 0; 1533 nand_info->en_randomizer = 0;
1534 1534
1535 mtd->writesize = nand_info->writesize; 1535 mtd->writesize = nand_info->writesize;
1536 mtd->oobsize = nand_info->oobsize; 1536 mtd->oobsize = nand_info->oobsize;
1537 1537
1538 writel(nand_info->bch_flash0layout0, &bch_regs->hw_bch_flash0layout0); 1538 writel(nand_info->bch_flash0layout0, &bch_regs->hw_bch_flash0layout0);
1539 writel(nand_info->bch_flash0layout1, &bch_regs->hw_bch_flash0layout1); 1539 writel(nand_info->bch_flash0layout1, &bch_regs->hw_bch_flash0layout1);
1540 } 1540 }
1541 1541
1542 uint32_t mxs_nand_mark_byte_offset(struct mtd_info *mtd) 1542 uint32_t mxs_nand_mark_byte_offset(struct mtd_info *mtd)
1543 { 1543 {
1544 struct nand_chip *chip = mtd_to_nand(mtd); 1544 struct nand_chip *chip = mtd_to_nand(mtd);
1545 struct mxs_nand_info *nand_info = nand_get_controller_data(chip); 1545 struct mxs_nand_info *nand_info = nand_get_controller_data(chip);
1546 struct bch_geometry *geo = &nand_info->bch_geometry; 1546 struct bch_geometry *geo = &nand_info->bch_geometry;
1547 1547
1548 return geo->block_mark_byte_offset; 1548 return geo->block_mark_byte_offset;
1549 } 1549 }
1550 1550
1551 uint32_t mxs_nand_mark_bit_offset(struct mtd_info *mtd) 1551 uint32_t mxs_nand_mark_bit_offset(struct mtd_info *mtd)
1552 { 1552 {
1553 struct nand_chip *chip = mtd_to_nand(mtd); 1553 struct nand_chip *chip = mtd_to_nand(mtd);
1554 struct mxs_nand_info *nand_info = nand_get_controller_data(chip); 1554 struct mxs_nand_info *nand_info = nand_get_controller_data(chip);
1555 struct bch_geometry *geo = &nand_info->bch_geometry; 1555 struct bch_geometry *geo = &nand_info->bch_geometry;
1556 1556
1557 return geo->block_mark_bit_offset; 1557 return geo->block_mark_bit_offset;
1558 } 1558 }
1559 #endif /* CONFIG_IS_ENABLED(MX7) */ 1559 #endif /* CONFIG_IS_ENABLED(MX7) */
1560 1560