Commit b01b7e1d591de90be780449b26ff7e542dd545e9

Authored by Alice Guo
Committed by Ye Li
1 parent 0e90d90944

MLK-22582-1: nand: enable the Randomizer module for mx7 and mx8 when call

the function named mxs_nand_ecc_read_page

To enable the Randomizer module, set GPMI_ECCCTRL[RANDOMIZER_ENABLE] to
1, then set GPMI_ECCCOUNT[RANDOMIZER_PAGE] to select randomizer page
number needed to be randomized.

Signed-off-by: Alice Guo <alice.guo@nxp.com>
(cherry picked from commit e8271a1c7621cc3607d3e9c7b0a872342b5f4c95)

Showing 1 changed file with 8 additions and 2 deletions Inline Diff

drivers/mtd/nand/raw/mxs_nand.c
1 // SPDX-License-Identifier: GPL-2.0+ 1 // SPDX-License-Identifier: GPL-2.0+
2 /* 2 /*
3 * Freescale i.MX28 NAND flash driver 3 * Freescale i.MX28 NAND flash driver
4 * 4 *
5 * Copyright (C) 2011 Marek Vasut <marek.vasut@gmail.com> 5 * Copyright (C) 2011 Marek Vasut <marek.vasut@gmail.com>
6 * on behalf of DENX Software Engineering GmbH 6 * on behalf of DENX Software Engineering GmbH
7 * 7 *
8 * Based on code from LTIB: 8 * Based on code from LTIB:
9 * Freescale GPMI NFC NAND Flash Driver 9 * Freescale GPMI NFC NAND Flash Driver
10 * 10 *
11 * Copyright (C) 2010 Freescale Semiconductor, Inc. 11 * Copyright (C) 2010 Freescale Semiconductor, Inc.
12 * Copyright (C) 2008 Embedded Alley Solutions, Inc. 12 * Copyright (C) 2008 Embedded Alley Solutions, Inc.
13 * Copyright 2017-2019 NXP 13 * Copyright 2017-2019 NXP
14 */ 14 */
15 15
16 #include <common.h> 16 #include <common.h>
17 #include <dm.h> 17 #include <dm.h>
18 #include <linux/mtd/rawnand.h> 18 #include <linux/mtd/rawnand.h>
19 #include <linux/sizes.h> 19 #include <linux/sizes.h>
20 #include <linux/types.h> 20 #include <linux/types.h>
21 #include <malloc.h> 21 #include <malloc.h>
22 #include <linux/errno.h> 22 #include <linux/errno.h>
23 #include <asm/io.h> 23 #include <asm/io.h>
24 #include <asm/arch/clock.h> 24 #include <asm/arch/clock.h>
25 #include <asm/arch/imx-regs.h> 25 #include <asm/arch/imx-regs.h>
26 #include <asm/mach-imx/regs-bch.h> 26 #include <asm/mach-imx/regs-bch.h>
27 #include <asm/mach-imx/regs-gpmi.h> 27 #include <asm/mach-imx/regs-gpmi.h>
28 #include <asm/arch/sys_proto.h> 28 #include <asm/arch/sys_proto.h>
29 #include <mxs_nand.h> 29 #include <mxs_nand.h>
30 30
31 #define MXS_NAND_DMA_DESCRIPTOR_COUNT 4 31 #define MXS_NAND_DMA_DESCRIPTOR_COUNT 4
32 32
33 #if (defined(CONFIG_MX6) || defined(CONFIG_MX7) || defined(CONFIG_IMX8) || defined(CONFIG_IMX8M)) 33 #if (defined(CONFIG_MX6) || defined(CONFIG_MX7) || defined(CONFIG_IMX8) || defined(CONFIG_IMX8M))
34 #define MXS_NAND_CHUNK_DATA_CHUNK_SIZE_SHIFT 2 34 #define MXS_NAND_CHUNK_DATA_CHUNK_SIZE_SHIFT 2
35 #else 35 #else
36 #define MXS_NAND_CHUNK_DATA_CHUNK_SIZE_SHIFT 0 36 #define MXS_NAND_CHUNK_DATA_CHUNK_SIZE_SHIFT 0
37 #endif 37 #endif
38 #define MXS_NAND_METADATA_SIZE 10 38 #define MXS_NAND_METADATA_SIZE 10
39 #define MXS_NAND_BITS_PER_ECC_LEVEL 13 39 #define MXS_NAND_BITS_PER_ECC_LEVEL 13
40 40
41 #if !defined(CONFIG_SYS_CACHELINE_SIZE) || CONFIG_SYS_CACHELINE_SIZE < 32 41 #if !defined(CONFIG_SYS_CACHELINE_SIZE) || CONFIG_SYS_CACHELINE_SIZE < 32
42 #define MXS_NAND_COMMAND_BUFFER_SIZE 32 42 #define MXS_NAND_COMMAND_BUFFER_SIZE 32
43 #else 43 #else
44 #define MXS_NAND_COMMAND_BUFFER_SIZE CONFIG_SYS_CACHELINE_SIZE 44 #define MXS_NAND_COMMAND_BUFFER_SIZE CONFIG_SYS_CACHELINE_SIZE
45 #endif 45 #endif
46 46
47 #define MXS_NAND_BCH_TIMEOUT 10000 47 #define MXS_NAND_BCH_TIMEOUT 10000
48 48
49 struct nand_ecclayout fake_ecc_layout; 49 struct nand_ecclayout fake_ecc_layout;
50 50
51 /* 51 /*
52 * Cache management functions 52 * Cache management functions
53 */ 53 */
54 #ifndef CONFIG_SYS_DCACHE_OFF 54 #ifndef CONFIG_SYS_DCACHE_OFF
55 static void mxs_nand_flush_data_buf(struct mxs_nand_info *info) 55 static void mxs_nand_flush_data_buf(struct mxs_nand_info *info)
56 { 56 {
57 uint32_t addr = (uintptr_t)info->data_buf; 57 uint32_t addr = (uintptr_t)info->data_buf;
58 58
59 flush_dcache_range(addr, addr + info->data_buf_size); 59 flush_dcache_range(addr, addr + info->data_buf_size);
60 } 60 }
61 61
62 static void mxs_nand_inval_data_buf(struct mxs_nand_info *info) 62 static void mxs_nand_inval_data_buf(struct mxs_nand_info *info)
63 { 63 {
64 uint32_t addr = (uintptr_t)info->data_buf; 64 uint32_t addr = (uintptr_t)info->data_buf;
65 65
66 invalidate_dcache_range(addr, addr + info->data_buf_size); 66 invalidate_dcache_range(addr, addr + info->data_buf_size);
67 } 67 }
68 68
69 static void mxs_nand_flush_cmd_buf(struct mxs_nand_info *info) 69 static void mxs_nand_flush_cmd_buf(struct mxs_nand_info *info)
70 { 70 {
71 uint32_t addr = (uintptr_t)info->cmd_buf; 71 uint32_t addr = (uintptr_t)info->cmd_buf;
72 72
73 flush_dcache_range(addr, addr + MXS_NAND_COMMAND_BUFFER_SIZE); 73 flush_dcache_range(addr, addr + MXS_NAND_COMMAND_BUFFER_SIZE);
74 } 74 }
75 #else 75 #else
76 static inline void mxs_nand_flush_data_buf(struct mxs_nand_info *info) {} 76 static inline void mxs_nand_flush_data_buf(struct mxs_nand_info *info) {}
77 static inline void mxs_nand_inval_data_buf(struct mxs_nand_info *info) {} 77 static inline void mxs_nand_inval_data_buf(struct mxs_nand_info *info) {}
78 static inline void mxs_nand_flush_cmd_buf(struct mxs_nand_info *info) {} 78 static inline void mxs_nand_flush_cmd_buf(struct mxs_nand_info *info) {}
79 #endif 79 #endif
80 80
81 static struct mxs_dma_desc *mxs_nand_get_dma_desc(struct mxs_nand_info *info) 81 static struct mxs_dma_desc *mxs_nand_get_dma_desc(struct mxs_nand_info *info)
82 { 82 {
83 struct mxs_dma_desc *desc; 83 struct mxs_dma_desc *desc;
84 84
85 if (info->desc_index >= MXS_NAND_DMA_DESCRIPTOR_COUNT) { 85 if (info->desc_index >= MXS_NAND_DMA_DESCRIPTOR_COUNT) {
86 printf("MXS NAND: Too many DMA descriptors requested\n"); 86 printf("MXS NAND: Too many DMA descriptors requested\n");
87 return NULL; 87 return NULL;
88 } 88 }
89 89
90 desc = info->desc[info->desc_index]; 90 desc = info->desc[info->desc_index];
91 info->desc_index++; 91 info->desc_index++;
92 92
93 return desc; 93 return desc;
94 } 94 }
95 95
96 static void mxs_nand_return_dma_descs(struct mxs_nand_info *info) 96 static void mxs_nand_return_dma_descs(struct mxs_nand_info *info)
97 { 97 {
98 int i; 98 int i;
99 struct mxs_dma_desc *desc; 99 struct mxs_dma_desc *desc;
100 100
101 for (i = 0; i < info->desc_index; i++) { 101 for (i = 0; i < info->desc_index; i++) {
102 desc = info->desc[i]; 102 desc = info->desc[i];
103 memset(desc, 0, sizeof(struct mxs_dma_desc)); 103 memset(desc, 0, sizeof(struct mxs_dma_desc));
104 desc->address = (dma_addr_t)desc; 104 desc->address = (dma_addr_t)desc;
105 } 105 }
106 106
107 info->desc_index = 0; 107 info->desc_index = 0;
108 } 108 }
109 109
110 static uint32_t mxs_nand_aux_status_offset(void) 110 static uint32_t mxs_nand_aux_status_offset(void)
111 { 111 {
112 return (MXS_NAND_METADATA_SIZE + 0x3) & ~0x3; 112 return (MXS_NAND_METADATA_SIZE + 0x3) & ~0x3;
113 } 113 }
114 114
115 static inline bool mxs_nand_bbm_in_data_chunk(struct bch_geometry *geo, struct mtd_info *mtd, 115 static inline bool mxs_nand_bbm_in_data_chunk(struct bch_geometry *geo, struct mtd_info *mtd,
116 unsigned int *chunk_num) 116 unsigned int *chunk_num)
117 { 117 {
118 unsigned int i, j; 118 unsigned int i, j;
119 119
120 if (geo->ecc_chunk0_size != geo->ecc_chunkn_size) { 120 if (geo->ecc_chunk0_size != geo->ecc_chunkn_size) {
121 dev_err(this->dev, "The size of chunk0 must equal to chunkn\n"); 121 dev_err(this->dev, "The size of chunk0 must equal to chunkn\n");
122 return false; 122 return false;
123 } 123 }
124 124
125 i = (mtd->writesize * 8 - MXS_NAND_METADATA_SIZE * 8) / 125 i = (mtd->writesize * 8 - MXS_NAND_METADATA_SIZE * 8) /
126 (geo->gf_len * geo->ecc_strength + 126 (geo->gf_len * geo->ecc_strength +
127 geo->ecc_chunkn_size * 8); 127 geo->ecc_chunkn_size * 8);
128 128
129 j = (mtd->writesize * 8 - MXS_NAND_METADATA_SIZE * 8) - 129 j = (mtd->writesize * 8 - MXS_NAND_METADATA_SIZE * 8) -
130 (geo->gf_len * geo->ecc_strength + 130 (geo->gf_len * geo->ecc_strength +
131 geo->ecc_chunkn_size * 8) * i; 131 geo->ecc_chunkn_size * 8) * i;
132 132
133 if (j < geo->ecc_chunkn_size * 8) { 133 if (j < geo->ecc_chunkn_size * 8) {
134 *chunk_num = i+1; 134 *chunk_num = i+1;
135 dev_dbg(this->dev, "Set ecc to %d and bbm in chunk %d\n", 135 dev_dbg(this->dev, "Set ecc to %d and bbm in chunk %d\n",
136 geo->ecc_strength, *chunk_num); 136 geo->ecc_strength, *chunk_num);
137 return true; 137 return true;
138 } 138 }
139 139
140 return false; 140 return false;
141 } 141 }
142 142
143 static inline int mxs_nand_calc_ecc_layout_by_info(struct bch_geometry *geo, 143 static inline int mxs_nand_calc_ecc_layout_by_info(struct bch_geometry *geo,
144 struct mtd_info *mtd, 144 struct mtd_info *mtd,
145 unsigned int ecc_strength, 145 unsigned int ecc_strength,
146 unsigned int ecc_step) 146 unsigned int ecc_step)
147 { 147 {
148 struct nand_chip *chip = mtd_to_nand(mtd); 148 struct nand_chip *chip = mtd_to_nand(mtd);
149 struct mxs_nand_info *nand_info = nand_get_controller_data(chip); 149 struct mxs_nand_info *nand_info = nand_get_controller_data(chip);
150 unsigned int block_mark_bit_offset; 150 unsigned int block_mark_bit_offset;
151 151
152 switch (ecc_step) { 152 switch (ecc_step) {
153 case SZ_512: 153 case SZ_512:
154 geo->gf_len = 13; 154 geo->gf_len = 13;
155 break; 155 break;
156 case SZ_1K: 156 case SZ_1K:
157 geo->gf_len = 14; 157 geo->gf_len = 14;
158 break; 158 break;
159 default: 159 default:
160 return -EINVAL; 160 return -EINVAL;
161 } 161 }
162 162
163 geo->ecc_chunk0_size = ecc_step; 163 geo->ecc_chunk0_size = ecc_step;
164 geo->ecc_chunkn_size = ecc_step; 164 geo->ecc_chunkn_size = ecc_step;
165 geo->ecc_strength = round_up(ecc_strength, 2); 165 geo->ecc_strength = round_up(ecc_strength, 2);
166 166
167 /* Keep the C >= O */ 167 /* Keep the C >= O */
168 if (geo->ecc_chunkn_size < mtd->oobsize) 168 if (geo->ecc_chunkn_size < mtd->oobsize)
169 return -EINVAL; 169 return -EINVAL;
170 170
171 if (geo->ecc_strength > nand_info->max_ecc_strength_supported) 171 if (geo->ecc_strength > nand_info->max_ecc_strength_supported)
172 return -EINVAL; 172 return -EINVAL;
173 173
174 geo->ecc_chunk_count = mtd->writesize / geo->ecc_chunkn_size; 174 geo->ecc_chunk_count = mtd->writesize / geo->ecc_chunkn_size;
175 175
176 /* For bit swap. */ 176 /* For bit swap. */
177 block_mark_bit_offset = mtd->writesize * 8 - 177 block_mark_bit_offset = mtd->writesize * 8 -
178 (geo->ecc_strength * geo->gf_len * (geo->ecc_chunk_count - 1) 178 (geo->ecc_strength * geo->gf_len * (geo->ecc_chunk_count - 1)
179 + MXS_NAND_METADATA_SIZE * 8); 179 + MXS_NAND_METADATA_SIZE * 8);
180 180
181 geo->block_mark_byte_offset = block_mark_bit_offset / 8; 181 geo->block_mark_byte_offset = block_mark_bit_offset / 8;
182 geo->block_mark_bit_offset = block_mark_bit_offset % 8; 182 geo->block_mark_bit_offset = block_mark_bit_offset % 8;
183 183
184 return 0; 184 return 0;
185 } 185 }
186 186
187 static inline int mxs_nand_legacy_calc_ecc_layout(struct bch_geometry *geo, 187 static inline int mxs_nand_legacy_calc_ecc_layout(struct bch_geometry *geo,
188 struct mtd_info *mtd) 188 struct mtd_info *mtd)
189 { 189 {
190 struct nand_chip *chip = mtd_to_nand(mtd); 190 struct nand_chip *chip = mtd_to_nand(mtd);
191 struct mxs_nand_info *nand_info = nand_get_controller_data(chip); 191 struct mxs_nand_info *nand_info = nand_get_controller_data(chip);
192 unsigned int block_mark_bit_offset; 192 unsigned int block_mark_bit_offset;
193 193
194 /* The default for the length of Galois Field. */ 194 /* The default for the length of Galois Field. */
195 geo->gf_len = 13; 195 geo->gf_len = 13;
196 196
197 /* The default for chunk size. */ 197 /* The default for chunk size. */
198 geo->ecc_chunk0_size = 512; 198 geo->ecc_chunk0_size = 512;
199 geo->ecc_chunkn_size = 512; 199 geo->ecc_chunkn_size = 512;
200 200
201 if (geo->ecc_chunkn_size < mtd->oobsize) { 201 if (geo->ecc_chunkn_size < mtd->oobsize) {
202 geo->gf_len = 14; 202 geo->gf_len = 14;
203 geo->ecc_chunk0_size *= 2; 203 geo->ecc_chunk0_size *= 2;
204 geo->ecc_chunkn_size *= 2; 204 geo->ecc_chunkn_size *= 2;
205 } 205 }
206 206
207 geo->ecc_chunk_count = mtd->writesize / geo->ecc_chunkn_size; 207 geo->ecc_chunk_count = mtd->writesize / geo->ecc_chunkn_size;
208 208
209 /* 209 /*
210 * Determine the ECC layout with the formula: 210 * Determine the ECC layout with the formula:
211 * ECC bits per chunk = (total page spare data bits) / 211 * ECC bits per chunk = (total page spare data bits) /
212 * (bits per ECC level) / (chunks per page) 212 * (bits per ECC level) / (chunks per page)
213 * where: 213 * where:
214 * total page spare data bits = 214 * total page spare data bits =
215 * (page oob size - meta data size) * (bits per byte) 215 * (page oob size - meta data size) * (bits per byte)
216 */ 216 */
217 geo->ecc_strength = ((mtd->oobsize - MXS_NAND_METADATA_SIZE) * 8) 217 geo->ecc_strength = ((mtd->oobsize - MXS_NAND_METADATA_SIZE) * 8)
218 / (geo->gf_len * geo->ecc_chunk_count); 218 / (geo->gf_len * geo->ecc_chunk_count);
219 219
220 geo->ecc_strength = min(round_down(geo->ecc_strength, 2), 220 geo->ecc_strength = min(round_down(geo->ecc_strength, 2),
221 nand_info->max_ecc_strength_supported); 221 nand_info->max_ecc_strength_supported);
222 222
223 block_mark_bit_offset = mtd->writesize * 8 - 223 block_mark_bit_offset = mtd->writesize * 8 -
224 (geo->ecc_strength * geo->gf_len * (geo->ecc_chunk_count - 1) 224 (geo->ecc_strength * geo->gf_len * (geo->ecc_chunk_count - 1)
225 + MXS_NAND_METADATA_SIZE * 8); 225 + MXS_NAND_METADATA_SIZE * 8);
226 226
227 geo->block_mark_byte_offset = block_mark_bit_offset / 8; 227 geo->block_mark_byte_offset = block_mark_bit_offset / 8;
228 geo->block_mark_bit_offset = block_mark_bit_offset % 8; 228 geo->block_mark_bit_offset = block_mark_bit_offset % 8;
229 229
230 return 0; 230 return 0;
231 } 231 }
232 232
233 static inline int mxs_nand_calc_ecc_for_large_oob(struct bch_geometry *geo, 233 static inline int mxs_nand_calc_ecc_for_large_oob(struct bch_geometry *geo,
234 struct mtd_info *mtd) 234 struct mtd_info *mtd)
235 { 235 {
236 struct nand_chip *chip = mtd_to_nand(mtd); 236 struct nand_chip *chip = mtd_to_nand(mtd);
237 struct mxs_nand_info *nand_info = nand_get_controller_data(chip); 237 struct mxs_nand_info *nand_info = nand_get_controller_data(chip);
238 unsigned int block_mark_bit_offset; 238 unsigned int block_mark_bit_offset;
239 unsigned int max_ecc; 239 unsigned int max_ecc;
240 unsigned int bbm_chunk; 240 unsigned int bbm_chunk;
241 unsigned int i; 241 unsigned int i;
242 242
243 /* sanity check for the minimum ecc nand required */ 243 /* sanity check for the minimum ecc nand required */
244 if (!(chip->ecc_strength_ds > 0 && chip->ecc_step_ds > 0)) 244 if (!(chip->ecc_strength_ds > 0 && chip->ecc_step_ds > 0))
245 return -EINVAL; 245 return -EINVAL;
246 geo->ecc_strength = chip->ecc_strength_ds; 246 geo->ecc_strength = chip->ecc_strength_ds;
247 247
248 /* calculate the maximum ecc platform can support*/ 248 /* calculate the maximum ecc platform can support*/
249 geo->gf_len = 14; 249 geo->gf_len = 14;
250 geo->ecc_chunk0_size = 1024; 250 geo->ecc_chunk0_size = 1024;
251 geo->ecc_chunkn_size = 1024; 251 geo->ecc_chunkn_size = 1024;
252 geo->ecc_chunk_count = mtd->writesize / geo->ecc_chunkn_size; 252 geo->ecc_chunk_count = mtd->writesize / geo->ecc_chunkn_size;
253 max_ecc = ((mtd->oobsize - MXS_NAND_METADATA_SIZE) * 8) 253 max_ecc = ((mtd->oobsize - MXS_NAND_METADATA_SIZE) * 8)
254 / (geo->gf_len * geo->ecc_chunk_count); 254 / (geo->gf_len * geo->ecc_chunk_count);
255 max_ecc = min(round_down(max_ecc, 2), 255 max_ecc = min(round_down(max_ecc, 2),
256 nand_info->max_ecc_strength_supported); 256 nand_info->max_ecc_strength_supported);
257 257
258 258
259 /* search a supported ecc strength that makes bbm */ 259 /* search a supported ecc strength that makes bbm */
260 /* located in data chunk */ 260 /* located in data chunk */
261 geo->ecc_strength = chip->ecc_strength_ds; 261 geo->ecc_strength = chip->ecc_strength_ds;
262 while (!(geo->ecc_strength > max_ecc)) { 262 while (!(geo->ecc_strength > max_ecc)) {
263 if (mxs_nand_bbm_in_data_chunk(geo, mtd, &bbm_chunk)) 263 if (mxs_nand_bbm_in_data_chunk(geo, mtd, &bbm_chunk))
264 break; 264 break;
265 geo->ecc_strength += 2; 265 geo->ecc_strength += 2;
266 } 266 }
267 267
268 /* if none of them works, keep using the minimum ecc */ 268 /* if none of them works, keep using the minimum ecc */
269 /* nand required but changing ecc page layout */ 269 /* nand required but changing ecc page layout */
270 if (geo->ecc_strength > max_ecc) { 270 if (geo->ecc_strength > max_ecc) {
271 geo->ecc_strength = chip->ecc_strength_ds; 271 geo->ecc_strength = chip->ecc_strength_ds;
272 /* add extra ecc for meta data */ 272 /* add extra ecc for meta data */
273 geo->ecc_chunk0_size = 0; 273 geo->ecc_chunk0_size = 0;
274 geo->ecc_chunk_count = (mtd->writesize / geo->ecc_chunkn_size) + 1; 274 geo->ecc_chunk_count = (mtd->writesize / geo->ecc_chunkn_size) + 1;
275 geo->ecc_for_meta = 1; 275 geo->ecc_for_meta = 1;
276 /* check if oob can afford this extra ecc chunk */ 276 /* check if oob can afford this extra ecc chunk */
277 if (mtd->oobsize * 8 < MXS_NAND_METADATA_SIZE * 8 + 277 if (mtd->oobsize * 8 < MXS_NAND_METADATA_SIZE * 8 +
278 geo->gf_len * geo->ecc_strength 278 geo->gf_len * geo->ecc_strength
279 * geo->ecc_chunk_count) { 279 * geo->ecc_chunk_count) {
280 printf("unsupported NAND chip with new layout\n"); 280 printf("unsupported NAND chip with new layout\n");
281 return -EINVAL; 281 return -EINVAL;
282 } 282 }
283 283
284 /* calculate in which chunk bbm located */ 284 /* calculate in which chunk bbm located */
285 bbm_chunk = (mtd->writesize * 8 - MXS_NAND_METADATA_SIZE * 8 - 285 bbm_chunk = (mtd->writesize * 8 - MXS_NAND_METADATA_SIZE * 8 -
286 geo->gf_len * geo->ecc_strength) / 286 geo->gf_len * geo->ecc_strength) /
287 (geo->gf_len * geo->ecc_strength + 287 (geo->gf_len * geo->ecc_strength +
288 geo->ecc_chunkn_size * 8) + 1; 288 geo->ecc_chunkn_size * 8) + 1;
289 } 289 }
290 290
291 /* calculate the number of ecc chunk behind the bbm */ 291 /* calculate the number of ecc chunk behind the bbm */
292 i = (mtd->writesize / geo->ecc_chunkn_size) - bbm_chunk + 1; 292 i = (mtd->writesize / geo->ecc_chunkn_size) - bbm_chunk + 1;
293 293
294 block_mark_bit_offset = mtd->writesize * 8 - 294 block_mark_bit_offset = mtd->writesize * 8 -
295 (geo->ecc_strength * geo->gf_len * (geo->ecc_chunk_count - i) 295 (geo->ecc_strength * geo->gf_len * (geo->ecc_chunk_count - i)
296 + MXS_NAND_METADATA_SIZE * 8); 296 + MXS_NAND_METADATA_SIZE * 8);
297 297
298 geo->block_mark_byte_offset = block_mark_bit_offset / 8; 298 geo->block_mark_byte_offset = block_mark_bit_offset / 8;
299 geo->block_mark_bit_offset = block_mark_bit_offset % 8; 299 geo->block_mark_bit_offset = block_mark_bit_offset % 8;
300 300
301 return 0; 301 return 0;
302 } 302 }
303 303
304 /* 304 /*
305 * Wait for BCH complete IRQ and clear the IRQ 305 * Wait for BCH complete IRQ and clear the IRQ
306 */ 306 */
307 static int mxs_nand_wait_for_bch_complete(struct mxs_nand_info *nand_info) 307 static int mxs_nand_wait_for_bch_complete(struct mxs_nand_info *nand_info)
308 { 308 {
309 int timeout = MXS_NAND_BCH_TIMEOUT; 309 int timeout = MXS_NAND_BCH_TIMEOUT;
310 int ret; 310 int ret;
311 311
312 ret = mxs_wait_mask_set(&nand_info->bch_regs->hw_bch_ctrl_reg, 312 ret = mxs_wait_mask_set(&nand_info->bch_regs->hw_bch_ctrl_reg,
313 BCH_CTRL_COMPLETE_IRQ, timeout); 313 BCH_CTRL_COMPLETE_IRQ, timeout);
314 314
315 writel(BCH_CTRL_COMPLETE_IRQ, &nand_info->bch_regs->hw_bch_ctrl_clr); 315 writel(BCH_CTRL_COMPLETE_IRQ, &nand_info->bch_regs->hw_bch_ctrl_clr);
316 316
317 return ret; 317 return ret;
318 } 318 }
319 319
320 /* 320 /*
321 * This is the function that we install in the cmd_ctrl function pointer of the 321 * This is the function that we install in the cmd_ctrl function pointer of the
322 * owning struct nand_chip. The only functions in the reference implementation 322 * owning struct nand_chip. The only functions in the reference implementation
323 * that use these functions pointers are cmdfunc and select_chip. 323 * that use these functions pointers are cmdfunc and select_chip.
324 * 324 *
325 * In this driver, we implement our own select_chip, so this function will only 325 * In this driver, we implement our own select_chip, so this function will only
326 * be called by the reference implementation's cmdfunc. For this reason, we can 326 * be called by the reference implementation's cmdfunc. For this reason, we can
327 * ignore the chip enable bit and concentrate only on sending bytes to the NAND 327 * ignore the chip enable bit and concentrate only on sending bytes to the NAND
328 * Flash. 328 * Flash.
329 */ 329 */
330 static void mxs_nand_cmd_ctrl(struct mtd_info *mtd, int data, unsigned int ctrl) 330 static void mxs_nand_cmd_ctrl(struct mtd_info *mtd, int data, unsigned int ctrl)
331 { 331 {
332 struct nand_chip *nand = mtd_to_nand(mtd); 332 struct nand_chip *nand = mtd_to_nand(mtd);
333 struct mxs_nand_info *nand_info = nand_get_controller_data(nand); 333 struct mxs_nand_info *nand_info = nand_get_controller_data(nand);
334 struct mxs_dma_desc *d; 334 struct mxs_dma_desc *d;
335 uint32_t channel = MXS_DMA_CHANNEL_AHB_APBH_GPMI0 + nand_info->cur_chip; 335 uint32_t channel = MXS_DMA_CHANNEL_AHB_APBH_GPMI0 + nand_info->cur_chip;
336 int ret; 336 int ret;
337 337
338 /* 338 /*
339 * If this condition is true, something is _VERY_ wrong in MTD 339 * If this condition is true, something is _VERY_ wrong in MTD
340 * subsystem! 340 * subsystem!
341 */ 341 */
342 if (nand_info->cmd_queue_len == MXS_NAND_COMMAND_BUFFER_SIZE) { 342 if (nand_info->cmd_queue_len == MXS_NAND_COMMAND_BUFFER_SIZE) {
343 printf("MXS NAND: Command queue too long\n"); 343 printf("MXS NAND: Command queue too long\n");
344 return; 344 return;
345 } 345 }
346 346
347 /* 347 /*
348 * Every operation begins with a command byte and a series of zero or 348 * Every operation begins with a command byte and a series of zero or
349 * more address bytes. These are distinguished by either the Address 349 * more address bytes. These are distinguished by either the Address
350 * Latch Enable (ALE) or Command Latch Enable (CLE) signals being 350 * Latch Enable (ALE) or Command Latch Enable (CLE) signals being
351 * asserted. When MTD is ready to execute the command, it will 351 * asserted. When MTD is ready to execute the command, it will
352 * deasert both latch enables. 352 * deasert both latch enables.
353 * 353 *
354 * Rather than run a separate DMA operation for every single byte, we 354 * Rather than run a separate DMA operation for every single byte, we
355 * queue them up and run a single DMA operation for the entire series 355 * queue them up and run a single DMA operation for the entire series
356 * of command and data bytes. 356 * of command and data bytes.
357 */ 357 */
358 if (ctrl & (NAND_ALE | NAND_CLE)) { 358 if (ctrl & (NAND_ALE | NAND_CLE)) {
359 if (data != NAND_CMD_NONE) 359 if (data != NAND_CMD_NONE)
360 nand_info->cmd_buf[nand_info->cmd_queue_len++] = data; 360 nand_info->cmd_buf[nand_info->cmd_queue_len++] = data;
361 return; 361 return;
362 } 362 }
363 363
364 /* 364 /*
365 * If control arrives here, MTD has deasserted both the ALE and CLE, 365 * If control arrives here, MTD has deasserted both the ALE and CLE,
366 * which means it's ready to run an operation. Check if we have any 366 * which means it's ready to run an operation. Check if we have any
367 * bytes to send. 367 * bytes to send.
368 */ 368 */
369 if (nand_info->cmd_queue_len == 0) 369 if (nand_info->cmd_queue_len == 0)
370 return; 370 return;
371 371
372 /* Compile the DMA descriptor -- a descriptor that sends command. */ 372 /* Compile the DMA descriptor -- a descriptor that sends command. */
373 d = mxs_nand_get_dma_desc(nand_info); 373 d = mxs_nand_get_dma_desc(nand_info);
374 d->cmd.data = 374 d->cmd.data =
375 MXS_DMA_DESC_COMMAND_DMA_READ | MXS_DMA_DESC_IRQ | 375 MXS_DMA_DESC_COMMAND_DMA_READ | MXS_DMA_DESC_IRQ |
376 MXS_DMA_DESC_CHAIN | MXS_DMA_DESC_DEC_SEM | 376 MXS_DMA_DESC_CHAIN | MXS_DMA_DESC_DEC_SEM |
377 MXS_DMA_DESC_WAIT4END | (3 << MXS_DMA_DESC_PIO_WORDS_OFFSET) | 377 MXS_DMA_DESC_WAIT4END | (3 << MXS_DMA_DESC_PIO_WORDS_OFFSET) |
378 (nand_info->cmd_queue_len << MXS_DMA_DESC_BYTES_OFFSET); 378 (nand_info->cmd_queue_len << MXS_DMA_DESC_BYTES_OFFSET);
379 379
380 d->cmd.address = (dma_addr_t)nand_info->cmd_buf; 380 d->cmd.address = (dma_addr_t)nand_info->cmd_buf;
381 381
382 d->cmd.pio_words[0] = 382 d->cmd.pio_words[0] =
383 GPMI_CTRL0_COMMAND_MODE_WRITE | 383 GPMI_CTRL0_COMMAND_MODE_WRITE |
384 GPMI_CTRL0_WORD_LENGTH | 384 GPMI_CTRL0_WORD_LENGTH |
385 (nand_info->cur_chip << GPMI_CTRL0_CS_OFFSET) | 385 (nand_info->cur_chip << GPMI_CTRL0_CS_OFFSET) |
386 GPMI_CTRL0_ADDRESS_NAND_CLE | 386 GPMI_CTRL0_ADDRESS_NAND_CLE |
387 GPMI_CTRL0_ADDRESS_INCREMENT | 387 GPMI_CTRL0_ADDRESS_INCREMENT |
388 nand_info->cmd_queue_len; 388 nand_info->cmd_queue_len;
389 389
390 mxs_dma_desc_append(channel, d); 390 mxs_dma_desc_append(channel, d);
391 391
392 /* Flush caches */ 392 /* Flush caches */
393 mxs_nand_flush_cmd_buf(nand_info); 393 mxs_nand_flush_cmd_buf(nand_info);
394 394
395 /* Execute the DMA chain. */ 395 /* Execute the DMA chain. */
396 ret = mxs_dma_go(channel); 396 ret = mxs_dma_go(channel);
397 if (ret) 397 if (ret)
398 printf("MXS NAND: Error sending command\n"); 398 printf("MXS NAND: Error sending command\n");
399 399
400 mxs_nand_return_dma_descs(nand_info); 400 mxs_nand_return_dma_descs(nand_info);
401 401
402 /* Reset the command queue. */ 402 /* Reset the command queue. */
403 nand_info->cmd_queue_len = 0; 403 nand_info->cmd_queue_len = 0;
404 } 404 }
405 405
406 /* 406 /*
407 * Test if the NAND flash is ready. 407 * Test if the NAND flash is ready.
408 */ 408 */
409 static int mxs_nand_device_ready(struct mtd_info *mtd) 409 static int mxs_nand_device_ready(struct mtd_info *mtd)
410 { 410 {
411 struct nand_chip *chip = mtd_to_nand(mtd); 411 struct nand_chip *chip = mtd_to_nand(mtd);
412 struct mxs_nand_info *nand_info = nand_get_controller_data(chip); 412 struct mxs_nand_info *nand_info = nand_get_controller_data(chip);
413 uint32_t tmp; 413 uint32_t tmp;
414 414
415 tmp = readl(&nand_info->gpmi_regs->hw_gpmi_stat); 415 tmp = readl(&nand_info->gpmi_regs->hw_gpmi_stat);
416 tmp >>= (GPMI_STAT_READY_BUSY_OFFSET + nand_info->cur_chip); 416 tmp >>= (GPMI_STAT_READY_BUSY_OFFSET + nand_info->cur_chip);
417 417
418 return tmp & 1; 418 return tmp & 1;
419 } 419 }
420 420
421 /* 421 /*
422 * Select the NAND chip. 422 * Select the NAND chip.
423 */ 423 */
424 static void mxs_nand_select_chip(struct mtd_info *mtd, int chip) 424 static void mxs_nand_select_chip(struct mtd_info *mtd, int chip)
425 { 425 {
426 struct nand_chip *nand = mtd_to_nand(mtd); 426 struct nand_chip *nand = mtd_to_nand(mtd);
427 struct mxs_nand_info *nand_info = nand_get_controller_data(nand); 427 struct mxs_nand_info *nand_info = nand_get_controller_data(nand);
428 428
429 nand_info->cur_chip = chip; 429 nand_info->cur_chip = chip;
430 } 430 }
431 431
432 /* 432 /*
433 * Handle block mark swapping. 433 * Handle block mark swapping.
434 * 434 *
435 * Note that, when this function is called, it doesn't know whether it's 435 * Note that, when this function is called, it doesn't know whether it's
436 * swapping the block mark, or swapping it *back* -- but it doesn't matter 436 * swapping the block mark, or swapping it *back* -- but it doesn't matter
437 * because the the operation is the same. 437 * because the the operation is the same.
438 */ 438 */
439 static void mxs_nand_swap_block_mark(struct bch_geometry *geo, 439 static void mxs_nand_swap_block_mark(struct bch_geometry *geo,
440 uint8_t *data_buf, uint8_t *oob_buf) 440 uint8_t *data_buf, uint8_t *oob_buf)
441 { 441 {
442 uint32_t bit_offset = geo->block_mark_bit_offset; 442 uint32_t bit_offset = geo->block_mark_bit_offset;
443 uint32_t buf_offset = geo->block_mark_byte_offset; 443 uint32_t buf_offset = geo->block_mark_byte_offset;
444 444
445 uint32_t src; 445 uint32_t src;
446 uint32_t dst; 446 uint32_t dst;
447 447
448 /* 448 /*
449 * Get the byte from the data area that overlays the block mark. Since 449 * Get the byte from the data area that overlays the block mark. Since
450 * the ECC engine applies its own view to the bits in the page, the 450 * the ECC engine applies its own view to the bits in the page, the
451 * physical block mark won't (in general) appear on a byte boundary in 451 * physical block mark won't (in general) appear on a byte boundary in
452 * the data. 452 * the data.
453 */ 453 */
454 src = data_buf[buf_offset] >> bit_offset; 454 src = data_buf[buf_offset] >> bit_offset;
455 src |= data_buf[buf_offset + 1] << (8 - bit_offset); 455 src |= data_buf[buf_offset + 1] << (8 - bit_offset);
456 456
457 dst = oob_buf[0]; 457 dst = oob_buf[0];
458 458
459 oob_buf[0] = src; 459 oob_buf[0] = src;
460 460
461 data_buf[buf_offset] &= ~(0xff << bit_offset); 461 data_buf[buf_offset] &= ~(0xff << bit_offset);
462 data_buf[buf_offset + 1] &= 0xff << bit_offset; 462 data_buf[buf_offset + 1] &= 0xff << bit_offset;
463 463
464 data_buf[buf_offset] |= dst << bit_offset; 464 data_buf[buf_offset] |= dst << bit_offset;
465 data_buf[buf_offset + 1] |= dst >> (8 - bit_offset); 465 data_buf[buf_offset + 1] |= dst >> (8 - bit_offset);
466 } 466 }
467 467
468 /* 468 /*
469 * Read data from NAND. 469 * Read data from NAND.
470 */ 470 */
471 static void mxs_nand_read_buf(struct mtd_info *mtd, uint8_t *buf, int length) 471 static void mxs_nand_read_buf(struct mtd_info *mtd, uint8_t *buf, int length)
472 { 472 {
473 struct nand_chip *nand = mtd_to_nand(mtd); 473 struct nand_chip *nand = mtd_to_nand(mtd);
474 struct mxs_nand_info *nand_info = nand_get_controller_data(nand); 474 struct mxs_nand_info *nand_info = nand_get_controller_data(nand);
475 struct mxs_dma_desc *d; 475 struct mxs_dma_desc *d;
476 uint32_t channel = MXS_DMA_CHANNEL_AHB_APBH_GPMI0 + nand_info->cur_chip; 476 uint32_t channel = MXS_DMA_CHANNEL_AHB_APBH_GPMI0 + nand_info->cur_chip;
477 int ret; 477 int ret;
478 478
479 if (length > NAND_MAX_PAGESIZE) { 479 if (length > NAND_MAX_PAGESIZE) {
480 printf("MXS NAND: DMA buffer too big\n"); 480 printf("MXS NAND: DMA buffer too big\n");
481 return; 481 return;
482 } 482 }
483 483
484 if (!buf) { 484 if (!buf) {
485 printf("MXS NAND: DMA buffer is NULL\n"); 485 printf("MXS NAND: DMA buffer is NULL\n");
486 return; 486 return;
487 } 487 }
488 488
489 /* Compile the DMA descriptor - a descriptor that reads data. */ 489 /* Compile the DMA descriptor - a descriptor that reads data. */
490 d = mxs_nand_get_dma_desc(nand_info); 490 d = mxs_nand_get_dma_desc(nand_info);
491 d->cmd.data = 491 d->cmd.data =
492 MXS_DMA_DESC_COMMAND_DMA_WRITE | MXS_DMA_DESC_IRQ | 492 MXS_DMA_DESC_COMMAND_DMA_WRITE | MXS_DMA_DESC_IRQ |
493 MXS_DMA_DESC_DEC_SEM | MXS_DMA_DESC_WAIT4END | 493 MXS_DMA_DESC_DEC_SEM | MXS_DMA_DESC_WAIT4END |
494 (1 << MXS_DMA_DESC_PIO_WORDS_OFFSET) | 494 (1 << MXS_DMA_DESC_PIO_WORDS_OFFSET) |
495 (length << MXS_DMA_DESC_BYTES_OFFSET); 495 (length << MXS_DMA_DESC_BYTES_OFFSET);
496 496
497 d->cmd.address = (dma_addr_t)nand_info->data_buf; 497 d->cmd.address = (dma_addr_t)nand_info->data_buf;
498 498
499 d->cmd.pio_words[0] = 499 d->cmd.pio_words[0] =
500 GPMI_CTRL0_COMMAND_MODE_READ | 500 GPMI_CTRL0_COMMAND_MODE_READ |
501 GPMI_CTRL0_WORD_LENGTH | 501 GPMI_CTRL0_WORD_LENGTH |
502 (nand_info->cur_chip << GPMI_CTRL0_CS_OFFSET) | 502 (nand_info->cur_chip << GPMI_CTRL0_CS_OFFSET) |
503 GPMI_CTRL0_ADDRESS_NAND_DATA | 503 GPMI_CTRL0_ADDRESS_NAND_DATA |
504 length; 504 length;
505 505
506 mxs_dma_desc_append(channel, d); 506 mxs_dma_desc_append(channel, d);
507 507
508 /* 508 /*
509 * A DMA descriptor that waits for the command to end and the chip to 509 * A DMA descriptor that waits for the command to end and the chip to
510 * become ready. 510 * become ready.
511 * 511 *
512 * I think we actually should *not* be waiting for the chip to become 512 * I think we actually should *not* be waiting for the chip to become
513 * ready because, after all, we don't care. I think the original code 513 * ready because, after all, we don't care. I think the original code
514 * did that and no one has re-thought it yet. 514 * did that and no one has re-thought it yet.
515 */ 515 */
516 d = mxs_nand_get_dma_desc(nand_info); 516 d = mxs_nand_get_dma_desc(nand_info);
517 d->cmd.data = 517 d->cmd.data =
518 MXS_DMA_DESC_COMMAND_NO_DMAXFER | MXS_DMA_DESC_IRQ | 518 MXS_DMA_DESC_COMMAND_NO_DMAXFER | MXS_DMA_DESC_IRQ |
519 MXS_DMA_DESC_NAND_WAIT_4_READY | MXS_DMA_DESC_DEC_SEM | 519 MXS_DMA_DESC_NAND_WAIT_4_READY | MXS_DMA_DESC_DEC_SEM |
520 MXS_DMA_DESC_WAIT4END | (1 << MXS_DMA_DESC_PIO_WORDS_OFFSET); 520 MXS_DMA_DESC_WAIT4END | (1 << MXS_DMA_DESC_PIO_WORDS_OFFSET);
521 521
522 d->cmd.address = 0; 522 d->cmd.address = 0;
523 523
524 d->cmd.pio_words[0] = 524 d->cmd.pio_words[0] =
525 GPMI_CTRL0_COMMAND_MODE_WAIT_FOR_READY | 525 GPMI_CTRL0_COMMAND_MODE_WAIT_FOR_READY |
526 GPMI_CTRL0_WORD_LENGTH | 526 GPMI_CTRL0_WORD_LENGTH |
527 (nand_info->cur_chip << GPMI_CTRL0_CS_OFFSET) | 527 (nand_info->cur_chip << GPMI_CTRL0_CS_OFFSET) |
528 GPMI_CTRL0_ADDRESS_NAND_DATA; 528 GPMI_CTRL0_ADDRESS_NAND_DATA;
529 529
530 mxs_dma_desc_append(channel, d); 530 mxs_dma_desc_append(channel, d);
531 531
532 /* Invalidate caches */ 532 /* Invalidate caches */
533 mxs_nand_inval_data_buf(nand_info); 533 mxs_nand_inval_data_buf(nand_info);
534 534
535 /* Execute the DMA chain. */ 535 /* Execute the DMA chain. */
536 ret = mxs_dma_go(channel); 536 ret = mxs_dma_go(channel);
537 if (ret) { 537 if (ret) {
538 printf("MXS NAND: DMA read error\n"); 538 printf("MXS NAND: DMA read error\n");
539 goto rtn; 539 goto rtn;
540 } 540 }
541 541
542 /* Invalidate caches */ 542 /* Invalidate caches */
543 mxs_nand_inval_data_buf(nand_info); 543 mxs_nand_inval_data_buf(nand_info);
544 544
545 memcpy(buf, nand_info->data_buf, length); 545 memcpy(buf, nand_info->data_buf, length);
546 546
547 rtn: 547 rtn:
548 mxs_nand_return_dma_descs(nand_info); 548 mxs_nand_return_dma_descs(nand_info);
549 } 549 }
550 550
551 /* 551 /*
552 * Write data to NAND. 552 * Write data to NAND.
553 */ 553 */
554 static void mxs_nand_write_buf(struct mtd_info *mtd, const uint8_t *buf, 554 static void mxs_nand_write_buf(struct mtd_info *mtd, const uint8_t *buf,
555 int length) 555 int length)
556 { 556 {
557 struct nand_chip *nand = mtd_to_nand(mtd); 557 struct nand_chip *nand = mtd_to_nand(mtd);
558 struct mxs_nand_info *nand_info = nand_get_controller_data(nand); 558 struct mxs_nand_info *nand_info = nand_get_controller_data(nand);
559 struct mxs_dma_desc *d; 559 struct mxs_dma_desc *d;
560 uint32_t channel = MXS_DMA_CHANNEL_AHB_APBH_GPMI0 + nand_info->cur_chip; 560 uint32_t channel = MXS_DMA_CHANNEL_AHB_APBH_GPMI0 + nand_info->cur_chip;
561 int ret; 561 int ret;
562 562
563 if (length > NAND_MAX_PAGESIZE) { 563 if (length > NAND_MAX_PAGESIZE) {
564 printf("MXS NAND: DMA buffer too big\n"); 564 printf("MXS NAND: DMA buffer too big\n");
565 return; 565 return;
566 } 566 }
567 567
568 if (!buf) { 568 if (!buf) {
569 printf("MXS NAND: DMA buffer is NULL\n"); 569 printf("MXS NAND: DMA buffer is NULL\n");
570 return; 570 return;
571 } 571 }
572 572
573 memcpy(nand_info->data_buf, buf, length); 573 memcpy(nand_info->data_buf, buf, length);
574 574
575 /* Compile the DMA descriptor - a descriptor that writes data. */ 575 /* Compile the DMA descriptor - a descriptor that writes data. */
576 d = mxs_nand_get_dma_desc(nand_info); 576 d = mxs_nand_get_dma_desc(nand_info);
577 d->cmd.data = 577 d->cmd.data =
578 MXS_DMA_DESC_COMMAND_DMA_READ | MXS_DMA_DESC_IRQ | 578 MXS_DMA_DESC_COMMAND_DMA_READ | MXS_DMA_DESC_IRQ |
579 MXS_DMA_DESC_DEC_SEM | MXS_DMA_DESC_WAIT4END | 579 MXS_DMA_DESC_DEC_SEM | MXS_DMA_DESC_WAIT4END |
580 (1 << MXS_DMA_DESC_PIO_WORDS_OFFSET) | 580 (1 << MXS_DMA_DESC_PIO_WORDS_OFFSET) |
581 (length << MXS_DMA_DESC_BYTES_OFFSET); 581 (length << MXS_DMA_DESC_BYTES_OFFSET);
582 582
583 d->cmd.address = (dma_addr_t)nand_info->data_buf; 583 d->cmd.address = (dma_addr_t)nand_info->data_buf;
584 584
585 d->cmd.pio_words[0] = 585 d->cmd.pio_words[0] =
586 GPMI_CTRL0_COMMAND_MODE_WRITE | 586 GPMI_CTRL0_COMMAND_MODE_WRITE |
587 GPMI_CTRL0_WORD_LENGTH | 587 GPMI_CTRL0_WORD_LENGTH |
588 (nand_info->cur_chip << GPMI_CTRL0_CS_OFFSET) | 588 (nand_info->cur_chip << GPMI_CTRL0_CS_OFFSET) |
589 GPMI_CTRL0_ADDRESS_NAND_DATA | 589 GPMI_CTRL0_ADDRESS_NAND_DATA |
590 length; 590 length;
591 591
592 mxs_dma_desc_append(channel, d); 592 mxs_dma_desc_append(channel, d);
593 593
594 /* Flush caches */ 594 /* Flush caches */
595 mxs_nand_flush_data_buf(nand_info); 595 mxs_nand_flush_data_buf(nand_info);
596 596
597 /* Execute the DMA chain. */ 597 /* Execute the DMA chain. */
598 ret = mxs_dma_go(channel); 598 ret = mxs_dma_go(channel);
599 if (ret) 599 if (ret)
600 printf("MXS NAND: DMA write error\n"); 600 printf("MXS NAND: DMA write error\n");
601 601
602 mxs_nand_return_dma_descs(nand_info); 602 mxs_nand_return_dma_descs(nand_info);
603 } 603 }
604 604
605 /* 605 /*
606 * Read a single byte from NAND. 606 * Read a single byte from NAND.
607 */ 607 */
608 static uint8_t mxs_nand_read_byte(struct mtd_info *mtd) 608 static uint8_t mxs_nand_read_byte(struct mtd_info *mtd)
609 { 609 {
610 uint8_t buf; 610 uint8_t buf;
611 mxs_nand_read_buf(mtd, &buf, 1); 611 mxs_nand_read_buf(mtd, &buf, 1);
612 return buf; 612 return buf;
613 } 613 }
614 614
615 static bool mxs_nand_erased_page(struct mtd_info *mtd, struct nand_chip *nand, 615 static bool mxs_nand_erased_page(struct mtd_info *mtd, struct nand_chip *nand,
616 uint8_t *buf, int chunk, int page) 616 uint8_t *buf, int chunk, int page)
617 { 617 {
618 struct mxs_nand_info *nand_info = nand_get_controller_data(nand); 618 struct mxs_nand_info *nand_info = nand_get_controller_data(nand);
619 struct bch_geometry *geo = &nand_info->bch_geometry; 619 struct bch_geometry *geo = &nand_info->bch_geometry;
620 unsigned int flip_bits = 0, flip_bits_noecc = 0; 620 unsigned int flip_bits = 0, flip_bits_noecc = 0;
621 unsigned int threshold; 621 unsigned int threshold;
622 unsigned int base = geo->ecc_chunkn_size * chunk; 622 unsigned int base = geo->ecc_chunkn_size * chunk;
623 uint32_t *dma_buf = (uint32_t *)buf; 623 uint32_t *dma_buf = (uint32_t *)buf;
624 int i; 624 int i;
625 625
626 threshold = geo->gf_len / 2; 626 threshold = geo->gf_len / 2;
627 if (threshold > geo->ecc_strength) 627 if (threshold > geo->ecc_strength)
628 threshold = geo->ecc_strength; 628 threshold = geo->ecc_strength;
629 629
630 for (i = 0; i < geo->ecc_chunkn_size; i++) { 630 for (i = 0; i < geo->ecc_chunkn_size; i++) {
631 flip_bits += hweight8(~buf[base + i]); 631 flip_bits += hweight8(~buf[base + i]);
632 if (flip_bits > threshold) 632 if (flip_bits > threshold)
633 return false; 633 return false;
634 } 634 }
635 635
636 nand->cmdfunc(mtd, NAND_CMD_READ0, 0, page); 636 nand->cmdfunc(mtd, NAND_CMD_READ0, 0, page);
637 nand->read_buf(mtd, buf, mtd->writesize); 637 nand->read_buf(mtd, buf, mtd->writesize);
638 638
639 for (i = 0; i < mtd->writesize / 4; i++) { 639 for (i = 0; i < mtd->writesize / 4; i++) {
640 flip_bits_noecc += hweight32(~dma_buf[i]); 640 flip_bits_noecc += hweight32(~dma_buf[i]);
641 if (flip_bits_noecc > threshold) 641 if (flip_bits_noecc > threshold)
642 return false; 642 return false;
643 } 643 }
644 644
645 mtd->ecc_stats.corrected += flip_bits; 645 mtd->ecc_stats.corrected += flip_bits;
646 646
647 memset(buf, 0xff, mtd->writesize); 647 memset(buf, 0xff, mtd->writesize);
648 648
649 printf("The page(%d) is an erased page(%d,%d,%d,%d).\n", page, chunk, threshold, flip_bits, flip_bits_noecc); 649 printf("The page(%d) is an erased page(%d,%d,%d,%d).\n", page, chunk, threshold, flip_bits, flip_bits_noecc);
650 650
651 return true; 651 return true;
652 } 652 }
653 653
654 /* 654 /*
655 * Read a page from NAND. 655 * Read a page from NAND.
656 */ 656 */
657 static int mxs_nand_ecc_read_page(struct mtd_info *mtd, struct nand_chip *nand, 657 static int mxs_nand_ecc_read_page(struct mtd_info *mtd, struct nand_chip *nand,
658 uint8_t *buf, int oob_required, 658 uint8_t *buf, int oob_required,
659 int page) 659 int page)
660 { 660 {
661 struct mxs_nand_info *nand_info = nand_get_controller_data(nand); 661 struct mxs_nand_info *nand_info = nand_get_controller_data(nand);
662 struct bch_geometry *geo = &nand_info->bch_geometry; 662 struct bch_geometry *geo = &nand_info->bch_geometry;
663 struct mxs_bch_regs *bch_regs = nand_info->bch_regs; 663 struct mxs_bch_regs *bch_regs = nand_info->bch_regs;
664 struct mxs_dma_desc *d; 664 struct mxs_dma_desc *d;
665 uint32_t channel = MXS_DMA_CHANNEL_AHB_APBH_GPMI0 + nand_info->cur_chip; 665 uint32_t channel = MXS_DMA_CHANNEL_AHB_APBH_GPMI0 + nand_info->cur_chip;
666 uint32_t corrected = 0, failed = 0; 666 uint32_t corrected = 0, failed = 0;
667 uint8_t *status; 667 uint8_t *status;
668 int i, ret; 668 int i, ret;
669 int flag = 0; 669 int flag = 0;
670 670
671 /* Compile the DMA descriptor - wait for ready. */ 671 /* Compile the DMA descriptor - wait for ready. */
672 d = mxs_nand_get_dma_desc(nand_info); 672 d = mxs_nand_get_dma_desc(nand_info);
673 d->cmd.data = 673 d->cmd.data =
674 MXS_DMA_DESC_COMMAND_NO_DMAXFER | MXS_DMA_DESC_CHAIN | 674 MXS_DMA_DESC_COMMAND_NO_DMAXFER | MXS_DMA_DESC_CHAIN |
675 MXS_DMA_DESC_NAND_WAIT_4_READY | MXS_DMA_DESC_WAIT4END | 675 MXS_DMA_DESC_NAND_WAIT_4_READY | MXS_DMA_DESC_WAIT4END |
676 (1 << MXS_DMA_DESC_PIO_WORDS_OFFSET); 676 (1 << MXS_DMA_DESC_PIO_WORDS_OFFSET);
677 677
678 d->cmd.address = 0; 678 d->cmd.address = 0;
679 679
680 d->cmd.pio_words[0] = 680 d->cmd.pio_words[0] =
681 GPMI_CTRL0_COMMAND_MODE_WAIT_FOR_READY | 681 GPMI_CTRL0_COMMAND_MODE_WAIT_FOR_READY |
682 GPMI_CTRL0_WORD_LENGTH | 682 GPMI_CTRL0_WORD_LENGTH |
683 (nand_info->cur_chip << GPMI_CTRL0_CS_OFFSET) | 683 (nand_info->cur_chip << GPMI_CTRL0_CS_OFFSET) |
684 GPMI_CTRL0_ADDRESS_NAND_DATA; 684 GPMI_CTRL0_ADDRESS_NAND_DATA;
685 685
686 mxs_dma_desc_append(channel, d); 686 mxs_dma_desc_append(channel, d);
687 687
688 /* Compile the DMA descriptor - enable the BCH block and read. */ 688 /* Compile the DMA descriptor - enable the BCH block and read. */
689 d = mxs_nand_get_dma_desc(nand_info); 689 d = mxs_nand_get_dma_desc(nand_info);
690 d->cmd.data = 690 d->cmd.data =
691 MXS_DMA_DESC_COMMAND_NO_DMAXFER | MXS_DMA_DESC_CHAIN | 691 MXS_DMA_DESC_COMMAND_NO_DMAXFER | MXS_DMA_DESC_CHAIN |
692 MXS_DMA_DESC_WAIT4END | (6 << MXS_DMA_DESC_PIO_WORDS_OFFSET); 692 MXS_DMA_DESC_WAIT4END | (6 << MXS_DMA_DESC_PIO_WORDS_OFFSET);
693 693
694 d->cmd.address = 0; 694 d->cmd.address = 0;
695 695
696 d->cmd.pio_words[0] = 696 d->cmd.pio_words[0] =
697 GPMI_CTRL0_COMMAND_MODE_READ | 697 GPMI_CTRL0_COMMAND_MODE_READ |
698 GPMI_CTRL0_WORD_LENGTH | 698 GPMI_CTRL0_WORD_LENGTH |
699 (nand_info->cur_chip << GPMI_CTRL0_CS_OFFSET) | 699 (nand_info->cur_chip << GPMI_CTRL0_CS_OFFSET) |
700 GPMI_CTRL0_ADDRESS_NAND_DATA | 700 GPMI_CTRL0_ADDRESS_NAND_DATA |
701 (mtd->writesize + mtd->oobsize); 701 (mtd->writesize + mtd->oobsize);
702 d->cmd.pio_words[1] = 0; 702 d->cmd.pio_words[1] = 0;
703 d->cmd.pio_words[2] = 703 d->cmd.pio_words[2] =
704 GPMI_ECCCTRL_ENABLE_ECC | 704 GPMI_ECCCTRL_ENABLE_ECC |
705 GPMI_ECCCTRL_ECC_CMD_DECODE | 705 GPMI_ECCCTRL_ECC_CMD_DECODE |
706 GPMI_ECCCTRL_BUFFER_MASK_BCH_PAGE; 706 GPMI_ECCCTRL_BUFFER_MASK_BCH_PAGE;
707 d->cmd.pio_words[3] = mtd->writesize + mtd->oobsize; 707 d->cmd.pio_words[3] = mtd->writesize + mtd->oobsize;
708 d->cmd.pio_words[4] = (dma_addr_t)nand_info->data_buf; 708 d->cmd.pio_words[4] = (dma_addr_t)nand_info->data_buf;
709 d->cmd.pio_words[5] = (dma_addr_t)nand_info->oob_buf; 709 d->cmd.pio_words[5] = (dma_addr_t)nand_info->oob_buf;
710 710
711 if ((is_mx7() || is_imx8m()) && nand_info->en_randomizer) {
712 d->cmd.pio_words[2] |= GPMI_ECCCTRL_RANDOMIZER_ENABLE |
713 GPMI_ECCCTRL_RANDOMIZER_TYPE2;
714 d->cmd.pio_words[3] |= (page % 256) << 16;
715 }
716
711 mxs_dma_desc_append(channel, d); 717 mxs_dma_desc_append(channel, d);
712 718
713 /* Compile the DMA descriptor - disable the BCH block. */ 719 /* Compile the DMA descriptor - disable the BCH block. */
714 d = mxs_nand_get_dma_desc(nand_info); 720 d = mxs_nand_get_dma_desc(nand_info);
715 d->cmd.data = 721 d->cmd.data =
716 MXS_DMA_DESC_COMMAND_NO_DMAXFER | MXS_DMA_DESC_CHAIN | 722 MXS_DMA_DESC_COMMAND_NO_DMAXFER | MXS_DMA_DESC_CHAIN |
717 MXS_DMA_DESC_NAND_WAIT_4_READY | MXS_DMA_DESC_WAIT4END | 723 MXS_DMA_DESC_NAND_WAIT_4_READY | MXS_DMA_DESC_WAIT4END |
718 (3 << MXS_DMA_DESC_PIO_WORDS_OFFSET); 724 (3 << MXS_DMA_DESC_PIO_WORDS_OFFSET);
719 725
720 d->cmd.address = 0; 726 d->cmd.address = 0;
721 727
722 d->cmd.pio_words[0] = 728 d->cmd.pio_words[0] =
723 GPMI_CTRL0_COMMAND_MODE_WAIT_FOR_READY | 729 GPMI_CTRL0_COMMAND_MODE_WAIT_FOR_READY |
724 GPMI_CTRL0_WORD_LENGTH | 730 GPMI_CTRL0_WORD_LENGTH |
725 (nand_info->cur_chip << GPMI_CTRL0_CS_OFFSET) | 731 (nand_info->cur_chip << GPMI_CTRL0_CS_OFFSET) |
726 GPMI_CTRL0_ADDRESS_NAND_DATA | 732 GPMI_CTRL0_ADDRESS_NAND_DATA |
727 (mtd->writesize + mtd->oobsize); 733 (mtd->writesize + mtd->oobsize);
728 d->cmd.pio_words[1] = 0; 734 d->cmd.pio_words[1] = 0;
729 d->cmd.pio_words[2] = 0; 735 d->cmd.pio_words[2] = 0;
730 736
731 mxs_dma_desc_append(channel, d); 737 mxs_dma_desc_append(channel, d);
732 738
733 /* Compile the DMA descriptor - deassert the NAND lock and interrupt. */ 739 /* Compile the DMA descriptor - deassert the NAND lock and interrupt. */
734 d = mxs_nand_get_dma_desc(nand_info); 740 d = mxs_nand_get_dma_desc(nand_info);
735 d->cmd.data = 741 d->cmd.data =
736 MXS_DMA_DESC_COMMAND_NO_DMAXFER | MXS_DMA_DESC_IRQ | 742 MXS_DMA_DESC_COMMAND_NO_DMAXFER | MXS_DMA_DESC_IRQ |
737 MXS_DMA_DESC_DEC_SEM; 743 MXS_DMA_DESC_DEC_SEM;
738 744
739 d->cmd.address = 0; 745 d->cmd.address = 0;
740 746
741 mxs_dma_desc_append(channel, d); 747 mxs_dma_desc_append(channel, d);
742 748
743 /* Invalidate caches */ 749 /* Invalidate caches */
744 mxs_nand_inval_data_buf(nand_info); 750 mxs_nand_inval_data_buf(nand_info);
745 751
746 /* Execute the DMA chain. */ 752 /* Execute the DMA chain. */
747 ret = mxs_dma_go(channel); 753 ret = mxs_dma_go(channel);
748 if (ret) { 754 if (ret) {
749 printf("MXS NAND: DMA read error\n"); 755 printf("MXS NAND: DMA read error\n");
750 goto rtn; 756 goto rtn;
751 } 757 }
752 758
753 ret = mxs_nand_wait_for_bch_complete(nand_info); 759 ret = mxs_nand_wait_for_bch_complete(nand_info);
754 if (ret) { 760 if (ret) {
755 printf("MXS NAND: BCH read timeout\n"); 761 printf("MXS NAND: BCH read timeout\n");
756 goto rtn; 762 goto rtn;
757 } 763 }
758 764
759 mxs_nand_return_dma_descs(nand_info); 765 mxs_nand_return_dma_descs(nand_info);
760 766
761 /* Invalidate caches */ 767 /* Invalidate caches */
762 mxs_nand_inval_data_buf(nand_info); 768 mxs_nand_inval_data_buf(nand_info);
763 769
764 /* Read DMA completed, now do the mark swapping. */ 770 /* Read DMA completed, now do the mark swapping. */
765 mxs_nand_swap_block_mark(geo, nand_info->data_buf, nand_info->oob_buf); 771 mxs_nand_swap_block_mark(geo, nand_info->data_buf, nand_info->oob_buf);
766 772
767 /* Loop over status bytes, accumulating ECC status. */ 773 /* Loop over status bytes, accumulating ECC status. */
768 status = nand_info->oob_buf + mxs_nand_aux_status_offset(); 774 status = nand_info->oob_buf + mxs_nand_aux_status_offset();
769 for (i = 0; i < geo->ecc_chunk_count; i++) { 775 for (i = 0; i < geo->ecc_chunk_count; i++) {
770 if (status[i] == 0x00) 776 if (status[i] == 0x00)
771 continue; 777 continue;
772 778
773 if (status[i] == 0xff) { 779 if (status[i] == 0xff) {
774 if (!nand_info->en_randomizer && 780 if (!nand_info->en_randomizer &&
775 (is_mx6dqp() || is_mx7() || is_mx6ul() 781 (is_mx6dqp() || is_mx7() || is_mx6ul()
776 || is_imx8() || is_imx8m())) 782 || is_imx8() || is_imx8m()))
777 if (readl(&bch_regs->hw_bch_debug1)) 783 if (readl(&bch_regs->hw_bch_debug1))
778 flag = 1; 784 flag = 1;
779 continue; 785 continue;
780 } 786 }
781 787
782 if (status[i] == 0xfe) { 788 if (status[i] == 0xfe) {
783 if (mxs_nand_erased_page(mtd, nand, 789 if (mxs_nand_erased_page(mtd, nand,
784 nand_info->data_buf, i, page)) 790 nand_info->data_buf, i, page))
785 break; 791 break;
786 failed++; 792 failed++;
787 continue; 793 continue;
788 } 794 }
789 795
790 corrected += status[i]; 796 corrected += status[i];
791 } 797 }
792 798
793 /* Propagate ECC status to the owning MTD. */ 799 /* Propagate ECC status to the owning MTD. */
794 mtd->ecc_stats.failed += failed; 800 mtd->ecc_stats.failed += failed;
795 mtd->ecc_stats.corrected += corrected; 801 mtd->ecc_stats.corrected += corrected;
796 802
797 /* 803 /*
798 * It's time to deliver the OOB bytes. See mxs_nand_ecc_read_oob() for 804 * It's time to deliver the OOB bytes. See mxs_nand_ecc_read_oob() for
799 * details about our policy for delivering the OOB. 805 * details about our policy for delivering the OOB.
800 * 806 *
801 * We fill the caller's buffer with set bits, and then copy the block 807 * We fill the caller's buffer with set bits, and then copy the block
802 * mark to the caller's buffer. Note that, if block mark swapping was 808 * mark to the caller's buffer. Note that, if block mark swapping was
803 * necessary, it has already been done, so we can rely on the first 809 * necessary, it has already been done, so we can rely on the first
804 * byte of the auxiliary buffer to contain the block mark. 810 * byte of the auxiliary buffer to contain the block mark.
805 */ 811 */
806 memset(nand->oob_poi, 0xff, mtd->oobsize); 812 memset(nand->oob_poi, 0xff, mtd->oobsize);
807 813
808 nand->oob_poi[0] = nand_info->oob_buf[0]; 814 nand->oob_poi[0] = nand_info->oob_buf[0];
809 815
810 memcpy(buf, nand_info->data_buf, mtd->writesize); 816 memcpy(buf, nand_info->data_buf, mtd->writesize);
811 817
812 if (flag) 818 if (flag)
813 memset(buf, 0xff, mtd->writesize); 819 memset(buf, 0xff, mtd->writesize);
814 rtn: 820 rtn:
815 mxs_nand_return_dma_descs(nand_info); 821 mxs_nand_return_dma_descs(nand_info);
816 822
817 return ret; 823 return ret;
818 } 824 }
819 825
820 /* 826 /*
821 * Write a page to NAND. 827 * Write a page to NAND.
822 */ 828 */
823 static int mxs_nand_ecc_write_page(struct mtd_info *mtd, 829 static int mxs_nand_ecc_write_page(struct mtd_info *mtd,
824 struct nand_chip *nand, const uint8_t *buf, 830 struct nand_chip *nand, const uint8_t *buf,
825 int oob_required, int page) 831 int oob_required, int page)
826 { 832 {
827 struct mxs_nand_info *nand_info = nand_get_controller_data(nand); 833 struct mxs_nand_info *nand_info = nand_get_controller_data(nand);
828 struct bch_geometry *geo = &nand_info->bch_geometry; 834 struct bch_geometry *geo = &nand_info->bch_geometry;
829 struct mxs_dma_desc *d; 835 struct mxs_dma_desc *d;
830 uint32_t channel = MXS_DMA_CHANNEL_AHB_APBH_GPMI0 + nand_info->cur_chip; 836 uint32_t channel = MXS_DMA_CHANNEL_AHB_APBH_GPMI0 + nand_info->cur_chip;
831 int ret; 837 int ret;
832 838
833 memcpy(nand_info->data_buf, buf, mtd->writesize); 839 memcpy(nand_info->data_buf, buf, mtd->writesize);
834 memcpy(nand_info->oob_buf, nand->oob_poi, mtd->oobsize); 840 memcpy(nand_info->oob_buf, nand->oob_poi, mtd->oobsize);
835 841
836 /* Handle block mark swapping. */ 842 /* Handle block mark swapping. */
837 mxs_nand_swap_block_mark(geo, nand_info->data_buf, nand_info->oob_buf); 843 mxs_nand_swap_block_mark(geo, nand_info->data_buf, nand_info->oob_buf);
838 844
839 /* Compile the DMA descriptor - write data. */ 845 /* Compile the DMA descriptor - write data. */
840 d = mxs_nand_get_dma_desc(nand_info); 846 d = mxs_nand_get_dma_desc(nand_info);
841 d->cmd.data = 847 d->cmd.data =
842 MXS_DMA_DESC_COMMAND_NO_DMAXFER | MXS_DMA_DESC_IRQ | 848 MXS_DMA_DESC_COMMAND_NO_DMAXFER | MXS_DMA_DESC_IRQ |
843 MXS_DMA_DESC_DEC_SEM | MXS_DMA_DESC_WAIT4END | 849 MXS_DMA_DESC_DEC_SEM | MXS_DMA_DESC_WAIT4END |
844 (6 << MXS_DMA_DESC_PIO_WORDS_OFFSET); 850 (6 << MXS_DMA_DESC_PIO_WORDS_OFFSET);
845 851
846 d->cmd.address = 0; 852 d->cmd.address = 0;
847 853
848 d->cmd.pio_words[0] = 854 d->cmd.pio_words[0] =
849 GPMI_CTRL0_COMMAND_MODE_WRITE | 855 GPMI_CTRL0_COMMAND_MODE_WRITE |
850 GPMI_CTRL0_WORD_LENGTH | 856 GPMI_CTRL0_WORD_LENGTH |
851 (nand_info->cur_chip << GPMI_CTRL0_CS_OFFSET) | 857 (nand_info->cur_chip << GPMI_CTRL0_CS_OFFSET) |
852 GPMI_CTRL0_ADDRESS_NAND_DATA; 858 GPMI_CTRL0_ADDRESS_NAND_DATA;
853 d->cmd.pio_words[1] = 0; 859 d->cmd.pio_words[1] = 0;
854 d->cmd.pio_words[2] = 860 d->cmd.pio_words[2] =
855 GPMI_ECCCTRL_ENABLE_ECC | 861 GPMI_ECCCTRL_ENABLE_ECC |
856 GPMI_ECCCTRL_ECC_CMD_ENCODE | 862 GPMI_ECCCTRL_ECC_CMD_ENCODE |
857 GPMI_ECCCTRL_BUFFER_MASK_BCH_PAGE; 863 GPMI_ECCCTRL_BUFFER_MASK_BCH_PAGE;
858 d->cmd.pio_words[3] = (mtd->writesize + mtd->oobsize); 864 d->cmd.pio_words[3] = (mtd->writesize + mtd->oobsize);
859 d->cmd.pio_words[4] = (dma_addr_t)nand_info->data_buf; 865 d->cmd.pio_words[4] = (dma_addr_t)nand_info->data_buf;
860 d->cmd.pio_words[5] = (dma_addr_t)nand_info->oob_buf; 866 d->cmd.pio_words[5] = (dma_addr_t)nand_info->oob_buf;
861 867
862 if ((is_mx7() && nand_info->en_randomizer) || (is_imx8m() && nand_info->en_randomizer)) { 868 if ((is_mx7() || is_imx8m()) && nand_info->en_randomizer) {
863 d->cmd.pio_words[2] |= GPMI_ECCCTRL_RANDOMIZER_ENABLE | 869 d->cmd.pio_words[2] |= GPMI_ECCCTRL_RANDOMIZER_ENABLE |
864 GPMI_ECCCTRL_RANDOMIZER_TYPE2; 870 GPMI_ECCCTRL_RANDOMIZER_TYPE2;
865 /* 871 /*
866 * Write NAND page number needed to be randomized 872 * Write NAND page number needed to be randomized
867 * to GPMI_ECCCOUNT register. 873 * to GPMI_ECCCOUNT register.
868 * 874 *
869 * The value is between 0-255. For additional details 875 * The value is between 0-255. For additional details
870 * check 9.6.6.4 of i.MX7D Applications Processor reference 876 * check 9.6.6.4 of i.MX7D Applications Processor reference
871 */ 877 */
872 d->cmd.pio_words[3] |= (page % 255) << 16; 878 d->cmd.pio_words[3] |= (page % 256) << 16;
873 } 879 }
874 880
875 mxs_dma_desc_append(channel, d); 881 mxs_dma_desc_append(channel, d);
876 882
877 /* Flush caches */ 883 /* Flush caches */
878 mxs_nand_flush_data_buf(nand_info); 884 mxs_nand_flush_data_buf(nand_info);
879 885
880 /* Execute the DMA chain. */ 886 /* Execute the DMA chain. */
881 ret = mxs_dma_go(channel); 887 ret = mxs_dma_go(channel);
882 if (ret) { 888 if (ret) {
883 printf("MXS NAND: DMA write error\n"); 889 printf("MXS NAND: DMA write error\n");
884 goto rtn; 890 goto rtn;
885 } 891 }
886 892
887 ret = mxs_nand_wait_for_bch_complete(nand_info); 893 ret = mxs_nand_wait_for_bch_complete(nand_info);
888 if (ret) { 894 if (ret) {
889 printf("MXS NAND: BCH write timeout\n"); 895 printf("MXS NAND: BCH write timeout\n");
890 goto rtn; 896 goto rtn;
891 } 897 }
892 898
893 rtn: 899 rtn:
894 mxs_nand_return_dma_descs(nand_info); 900 mxs_nand_return_dma_descs(nand_info);
895 return 0; 901 return 0;
896 } 902 }
897 903
898 /* 904 /*
899 * Read OOB from NAND. 905 * Read OOB from NAND.
900 * 906 *
901 * This function is a veneer that replaces the function originally installed by 907 * This function is a veneer that replaces the function originally installed by
902 * the NAND Flash MTD code. 908 * the NAND Flash MTD code.
903 */ 909 */
904 static int mxs_nand_hook_read_oob(struct mtd_info *mtd, loff_t from, 910 static int mxs_nand_hook_read_oob(struct mtd_info *mtd, loff_t from,
905 struct mtd_oob_ops *ops) 911 struct mtd_oob_ops *ops)
906 { 912 {
907 struct nand_chip *chip = mtd_to_nand(mtd); 913 struct nand_chip *chip = mtd_to_nand(mtd);
908 struct mxs_nand_info *nand_info = nand_get_controller_data(chip); 914 struct mxs_nand_info *nand_info = nand_get_controller_data(chip);
909 int ret; 915 int ret;
910 916
911 if (ops->mode == MTD_OPS_RAW) 917 if (ops->mode == MTD_OPS_RAW)
912 nand_info->raw_oob_mode = 1; 918 nand_info->raw_oob_mode = 1;
913 else 919 else
914 nand_info->raw_oob_mode = 0; 920 nand_info->raw_oob_mode = 0;
915 921
916 ret = nand_info->hooked_read_oob(mtd, from, ops); 922 ret = nand_info->hooked_read_oob(mtd, from, ops);
917 923
918 nand_info->raw_oob_mode = 0; 924 nand_info->raw_oob_mode = 0;
919 925
920 return ret; 926 return ret;
921 } 927 }
922 928
923 /* 929 /*
924 * Write OOB to NAND. 930 * Write OOB to NAND.
925 * 931 *
926 * This function is a veneer that replaces the function originally installed by 932 * This function is a veneer that replaces the function originally installed by
927 * the NAND Flash MTD code. 933 * the NAND Flash MTD code.
928 */ 934 */
929 static int mxs_nand_hook_write_oob(struct mtd_info *mtd, loff_t to, 935 static int mxs_nand_hook_write_oob(struct mtd_info *mtd, loff_t to,
930 struct mtd_oob_ops *ops) 936 struct mtd_oob_ops *ops)
931 { 937 {
932 struct nand_chip *chip = mtd_to_nand(mtd); 938 struct nand_chip *chip = mtd_to_nand(mtd);
933 struct mxs_nand_info *nand_info = nand_get_controller_data(chip); 939 struct mxs_nand_info *nand_info = nand_get_controller_data(chip);
934 int ret; 940 int ret;
935 941
936 if (ops->mode == MTD_OPS_RAW) 942 if (ops->mode == MTD_OPS_RAW)
937 nand_info->raw_oob_mode = 1; 943 nand_info->raw_oob_mode = 1;
938 else 944 else
939 nand_info->raw_oob_mode = 0; 945 nand_info->raw_oob_mode = 0;
940 946
941 ret = nand_info->hooked_write_oob(mtd, to, ops); 947 ret = nand_info->hooked_write_oob(mtd, to, ops);
942 948
943 nand_info->raw_oob_mode = 0; 949 nand_info->raw_oob_mode = 0;
944 950
945 return ret; 951 return ret;
946 } 952 }
947 953
948 /* 954 /*
949 * Mark a block bad in NAND. 955 * Mark a block bad in NAND.
950 * 956 *
951 * This function is a veneer that replaces the function originally installed by 957 * This function is a veneer that replaces the function originally installed by
952 * the NAND Flash MTD code. 958 * the NAND Flash MTD code.
953 */ 959 */
954 static int mxs_nand_hook_block_markbad(struct mtd_info *mtd, loff_t ofs) 960 static int mxs_nand_hook_block_markbad(struct mtd_info *mtd, loff_t ofs)
955 { 961 {
956 struct nand_chip *chip = mtd_to_nand(mtd); 962 struct nand_chip *chip = mtd_to_nand(mtd);
957 struct mxs_nand_info *nand_info = nand_get_controller_data(chip); 963 struct mxs_nand_info *nand_info = nand_get_controller_data(chip);
958 int ret; 964 int ret;
959 965
960 nand_info->marking_block_bad = 1; 966 nand_info->marking_block_bad = 1;
961 967
962 ret = nand_info->hooked_block_markbad(mtd, ofs); 968 ret = nand_info->hooked_block_markbad(mtd, ofs);
963 969
964 nand_info->marking_block_bad = 0; 970 nand_info->marking_block_bad = 0;
965 971
966 return ret; 972 return ret;
967 } 973 }
968 974
969 /* 975 /*
970 * There are several places in this driver where we have to handle the OOB and 976 * There are several places in this driver where we have to handle the OOB and
971 * block marks. This is the function where things are the most complicated, so 977 * block marks. This is the function where things are the most complicated, so
972 * this is where we try to explain it all. All the other places refer back to 978 * this is where we try to explain it all. All the other places refer back to
973 * here. 979 * here.
974 * 980 *
975 * These are the rules, in order of decreasing importance: 981 * These are the rules, in order of decreasing importance:
976 * 982 *
977 * 1) Nothing the caller does can be allowed to imperil the block mark, so all 983 * 1) Nothing the caller does can be allowed to imperil the block mark, so all
978 * write operations take measures to protect it. 984 * write operations take measures to protect it.
979 * 985 *
980 * 2) In read operations, the first byte of the OOB we return must reflect the 986 * 2) In read operations, the first byte of the OOB we return must reflect the
981 * true state of the block mark, no matter where that block mark appears in 987 * true state of the block mark, no matter where that block mark appears in
982 * the physical page. 988 * the physical page.
983 * 989 *
984 * 3) ECC-based read operations return an OOB full of set bits (since we never 990 * 3) ECC-based read operations return an OOB full of set bits (since we never
985 * allow ECC-based writes to the OOB, it doesn't matter what ECC-based reads 991 * allow ECC-based writes to the OOB, it doesn't matter what ECC-based reads
986 * return). 992 * return).
987 * 993 *
988 * 4) "Raw" read operations return a direct view of the physical bytes in the 994 * 4) "Raw" read operations return a direct view of the physical bytes in the
989 * page, using the conventional definition of which bytes are data and which 995 * page, using the conventional definition of which bytes are data and which
990 * are OOB. This gives the caller a way to see the actual, physical bytes 996 * are OOB. This gives the caller a way to see the actual, physical bytes
991 * in the page, without the distortions applied by our ECC engine. 997 * in the page, without the distortions applied by our ECC engine.
992 * 998 *
993 * What we do for this specific read operation depends on whether we're doing 999 * What we do for this specific read operation depends on whether we're doing
994 * "raw" read, or an ECC-based read. 1000 * "raw" read, or an ECC-based read.
995 * 1001 *
996 * It turns out that knowing whether we want an "ECC-based" or "raw" read is not 1002 * It turns out that knowing whether we want an "ECC-based" or "raw" read is not
997 * easy. When reading a page, for example, the NAND Flash MTD code calls our 1003 * easy. When reading a page, for example, the NAND Flash MTD code calls our
998 * ecc.read_page or ecc.read_page_raw function. Thus, the fact that MTD wants an 1004 * ecc.read_page or ecc.read_page_raw function. Thus, the fact that MTD wants an
999 * ECC-based or raw view of the page is implicit in which function it calls 1005 * ECC-based or raw view of the page is implicit in which function it calls
1000 * (there is a similar pair of ECC-based/raw functions for writing). 1006 * (there is a similar pair of ECC-based/raw functions for writing).
1001 * 1007 *
1002 * Since MTD assumes the OOB is not covered by ECC, there is no pair of 1008 * Since MTD assumes the OOB is not covered by ECC, there is no pair of
1003 * ECC-based/raw functions for reading or or writing the OOB. The fact that the 1009 * ECC-based/raw functions for reading or or writing the OOB. The fact that the
1004 * caller wants an ECC-based or raw view of the page is not propagated down to 1010 * caller wants an ECC-based or raw view of the page is not propagated down to
1005 * this driver. 1011 * this driver.
1006 * 1012 *
1007 * Since our OOB *is* covered by ECC, we need this information. So, we hook the 1013 * Since our OOB *is* covered by ECC, we need this information. So, we hook the
1008 * ecc.read_oob and ecc.write_oob function pointers in the owning 1014 * ecc.read_oob and ecc.write_oob function pointers in the owning
1009 * struct mtd_info with our own functions. These hook functions set the 1015 * struct mtd_info with our own functions. These hook functions set the
1010 * raw_oob_mode field so that, when control finally arrives here, we'll know 1016 * raw_oob_mode field so that, when control finally arrives here, we'll know
1011 * what to do. 1017 * what to do.
1012 */ 1018 */
1013 static int mxs_nand_ecc_read_oob(struct mtd_info *mtd, struct nand_chip *nand, 1019 static int mxs_nand_ecc_read_oob(struct mtd_info *mtd, struct nand_chip *nand,
1014 int page) 1020 int page)
1015 { 1021 {
1016 struct mxs_nand_info *nand_info = nand_get_controller_data(nand); 1022 struct mxs_nand_info *nand_info = nand_get_controller_data(nand);
1017 1023
1018 /* 1024 /*
1019 * First, fill in the OOB buffer. If we're doing a raw read, we need to 1025 * First, fill in the OOB buffer. If we're doing a raw read, we need to
1020 * get the bytes from the physical page. If we're not doing a raw read, 1026 * get the bytes from the physical page. If we're not doing a raw read,
1021 * we need to fill the buffer with set bits. 1027 * we need to fill the buffer with set bits.
1022 */ 1028 */
1023 if (nand_info->raw_oob_mode) { 1029 if (nand_info->raw_oob_mode) {
1024 /* 1030 /*
1025 * If control arrives here, we're doing a "raw" read. Send the 1031 * If control arrives here, we're doing a "raw" read. Send the
1026 * command to read the conventional OOB and read it. 1032 * command to read the conventional OOB and read it.
1027 */ 1033 */
1028 nand->cmdfunc(mtd, NAND_CMD_READ0, mtd->writesize, page); 1034 nand->cmdfunc(mtd, NAND_CMD_READ0, mtd->writesize, page);
1029 nand->read_buf(mtd, nand->oob_poi, mtd->oobsize); 1035 nand->read_buf(mtd, nand->oob_poi, mtd->oobsize);
1030 } else { 1036 } else {
1031 /* 1037 /*
1032 * If control arrives here, we're not doing a "raw" read. Fill 1038 * If control arrives here, we're not doing a "raw" read. Fill
1033 * the OOB buffer with set bits and correct the block mark. 1039 * the OOB buffer with set bits and correct the block mark.
1034 */ 1040 */
1035 memset(nand->oob_poi, 0xff, mtd->oobsize); 1041 memset(nand->oob_poi, 0xff, mtd->oobsize);
1036 1042
1037 nand->cmdfunc(mtd, NAND_CMD_READ0, mtd->writesize, page); 1043 nand->cmdfunc(mtd, NAND_CMD_READ0, mtd->writesize, page);
1038 mxs_nand_read_buf(mtd, nand->oob_poi, 1); 1044 mxs_nand_read_buf(mtd, nand->oob_poi, 1);
1039 } 1045 }
1040 1046
1041 return 0; 1047 return 0;
1042 1048
1043 } 1049 }
1044 1050
1045 /* 1051 /*
1046 * Write OOB data to NAND. 1052 * Write OOB data to NAND.
1047 */ 1053 */
1048 static int mxs_nand_ecc_write_oob(struct mtd_info *mtd, struct nand_chip *nand, 1054 static int mxs_nand_ecc_write_oob(struct mtd_info *mtd, struct nand_chip *nand,
1049 int page) 1055 int page)
1050 { 1056 {
1051 struct mxs_nand_info *nand_info = nand_get_controller_data(nand); 1057 struct mxs_nand_info *nand_info = nand_get_controller_data(nand);
1052 uint8_t block_mark = 0; 1058 uint8_t block_mark = 0;
1053 1059
1054 /* 1060 /*
1055 * There are fundamental incompatibilities between the i.MX GPMI NFC and 1061 * There are fundamental incompatibilities between the i.MX GPMI NFC and
1056 * the NAND Flash MTD model that make it essentially impossible to write 1062 * the NAND Flash MTD model that make it essentially impossible to write
1057 * the out-of-band bytes. 1063 * the out-of-band bytes.
1058 * 1064 *
1059 * We permit *ONE* exception. If the *intent* of writing the OOB is to 1065 * We permit *ONE* exception. If the *intent* of writing the OOB is to
1060 * mark a block bad, we can do that. 1066 * mark a block bad, we can do that.
1061 */ 1067 */
1062 1068
1063 if (!nand_info->marking_block_bad) { 1069 if (!nand_info->marking_block_bad) {
1064 printf("NXS NAND: Writing OOB isn't supported\n"); 1070 printf("NXS NAND: Writing OOB isn't supported\n");
1065 return -EIO; 1071 return -EIO;
1066 } 1072 }
1067 1073
1068 /* Write the block mark. */ 1074 /* Write the block mark. */
1069 nand->cmdfunc(mtd, NAND_CMD_SEQIN, mtd->writesize, page); 1075 nand->cmdfunc(mtd, NAND_CMD_SEQIN, mtd->writesize, page);
1070 nand->write_buf(mtd, &block_mark, 1); 1076 nand->write_buf(mtd, &block_mark, 1);
1071 nand->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1); 1077 nand->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
1072 1078
1073 /* Check if it worked. */ 1079 /* Check if it worked. */
1074 if (nand->waitfunc(mtd, nand) & NAND_STATUS_FAIL) 1080 if (nand->waitfunc(mtd, nand) & NAND_STATUS_FAIL)
1075 return -EIO; 1081 return -EIO;
1076 1082
1077 return 0; 1083 return 0;
1078 } 1084 }
1079 1085
1080 /* 1086 /*
1081 * Claims all blocks are good. 1087 * Claims all blocks are good.
1082 * 1088 *
1083 * In principle, this function is *only* called when the NAND Flash MTD system 1089 * In principle, this function is *only* called when the NAND Flash MTD system
1084 * isn't allowed to keep an in-memory bad block table, so it is forced to ask 1090 * isn't allowed to keep an in-memory bad block table, so it is forced to ask
1085 * the driver for bad block information. 1091 * the driver for bad block information.
1086 * 1092 *
1087 * In fact, we permit the NAND Flash MTD system to have an in-memory BBT, so 1093 * In fact, we permit the NAND Flash MTD system to have an in-memory BBT, so
1088 * this function is *only* called when we take it away. 1094 * this function is *only* called when we take it away.
1089 * 1095 *
1090 * Thus, this function is only called when we want *all* blocks to look good, 1096 * Thus, this function is only called when we want *all* blocks to look good,
1091 * so it *always* return success. 1097 * so it *always* return success.
1092 */ 1098 */
1093 static int mxs_nand_block_bad(struct mtd_info *mtd, loff_t ofs) 1099 static int mxs_nand_block_bad(struct mtd_info *mtd, loff_t ofs)
1094 { 1100 {
1095 return 0; 1101 return 0;
1096 } 1102 }
1097 1103
1098 static int mxs_nand_set_geometry(struct mtd_info *mtd, struct bch_geometry *geo) 1104 static int mxs_nand_set_geometry(struct mtd_info *mtd, struct bch_geometry *geo)
1099 { 1105 {
1100 struct nand_chip *chip = mtd_to_nand(mtd); 1106 struct nand_chip *chip = mtd_to_nand(mtd);
1101 struct nand_chip *nand = mtd_to_nand(mtd); 1107 struct nand_chip *nand = mtd_to_nand(mtd);
1102 struct mxs_nand_info *nand_info = nand_get_controller_data(nand); 1108 struct mxs_nand_info *nand_info = nand_get_controller_data(nand);
1103 1109
1104 if (chip->ecc_strength_ds > nand_info->max_ecc_strength_supported) { 1110 if (chip->ecc_strength_ds > nand_info->max_ecc_strength_supported) {
1105 printf("unsupported NAND chip, minimum ecc required %d\n" 1111 printf("unsupported NAND chip, minimum ecc required %d\n"
1106 , chip->ecc_strength_ds); 1112 , chip->ecc_strength_ds);
1107 return -EINVAL; 1113 return -EINVAL;
1108 } 1114 }
1109 1115
1110 if ((!(chip->ecc_strength_ds > 0 && chip->ecc_step_ds > 0) && 1116 if ((!(chip->ecc_strength_ds > 0 && chip->ecc_step_ds > 0) &&
1111 (mtd->oobsize < 1024)) || nand_info->legacy_bch_geometry) { 1117 (mtd->oobsize < 1024)) || nand_info->legacy_bch_geometry) {
1112 dev_warn(this->dev, "use legacy bch geometry\n"); 1118 dev_warn(this->dev, "use legacy bch geometry\n");
1113 return mxs_nand_legacy_calc_ecc_layout(geo, mtd); 1119 return mxs_nand_legacy_calc_ecc_layout(geo, mtd);
1114 } 1120 }
1115 1121
1116 if (mtd->oobsize > 1024 || chip->ecc_step_ds < mtd->oobsize) 1122 if (mtd->oobsize > 1024 || chip->ecc_step_ds < mtd->oobsize)
1117 return mxs_nand_calc_ecc_for_large_oob(geo, mtd); 1123 return mxs_nand_calc_ecc_for_large_oob(geo, mtd);
1118 1124
1119 return mxs_nand_calc_ecc_layout_by_info(geo, mtd, 1125 return mxs_nand_calc_ecc_layout_by_info(geo, mtd,
1120 chip->ecc_strength_ds, chip->ecc_step_ds); 1126 chip->ecc_strength_ds, chip->ecc_step_ds);
1121 1127
1122 return 0; 1128 return 0;
1123 } 1129 }
1124 1130
1125 /* 1131 /*
1126 * At this point, the physical NAND Flash chips have been identified and 1132 * At this point, the physical NAND Flash chips have been identified and
1127 * counted, so we know the physical geometry. This enables us to make some 1133 * counted, so we know the physical geometry. This enables us to make some
1128 * important configuration decisions. 1134 * important configuration decisions.
1129 * 1135 *
1130 * The return value of this function propagates directly back to this driver's 1136 * The return value of this function propagates directly back to this driver's
1131 * board_nand_init(). Anything other than zero will cause this driver to 1137 * board_nand_init(). Anything other than zero will cause this driver to
1132 * tear everything down and declare failure. 1138 * tear everything down and declare failure.
1133 */ 1139 */
1134 int mxs_nand_setup_ecc(struct mtd_info *mtd) 1140 int mxs_nand_setup_ecc(struct mtd_info *mtd)
1135 { 1141 {
1136 struct nand_chip *nand = mtd_to_nand(mtd); 1142 struct nand_chip *nand = mtd_to_nand(mtd);
1137 struct mxs_nand_info *nand_info = nand_get_controller_data(nand); 1143 struct mxs_nand_info *nand_info = nand_get_controller_data(nand);
1138 struct bch_geometry *geo = &nand_info->bch_geometry; 1144 struct bch_geometry *geo = &nand_info->bch_geometry;
1139 struct mxs_bch_regs *bch_regs = nand_info->bch_regs; 1145 struct mxs_bch_regs *bch_regs = nand_info->bch_regs;
1140 uint32_t tmp; 1146 uint32_t tmp;
1141 int ret; 1147 int ret;
1142 1148
1143 nand_info->en_randomizer = 0; 1149 nand_info->en_randomizer = 0;
1144 nand_info->oobsize = mtd->oobsize; 1150 nand_info->oobsize = mtd->oobsize;
1145 nand_info->writesize = mtd->writesize; 1151 nand_info->writesize = mtd->writesize;
1146 1152
1147 ret = mxs_nand_set_geometry(mtd, geo); 1153 ret = mxs_nand_set_geometry(mtd, geo);
1148 if (ret) 1154 if (ret)
1149 return ret; 1155 return ret;
1150 1156
1151 /* Configure BCH and set NFC geometry */ 1157 /* Configure BCH and set NFC geometry */
1152 mxs_reset_block(&bch_regs->hw_bch_ctrl_reg); 1158 mxs_reset_block(&bch_regs->hw_bch_ctrl_reg);
1153 1159
1154 /* Configure layout 0 */ 1160 /* Configure layout 0 */
1155 tmp = (geo->ecc_chunk_count - 1) << BCH_FLASHLAYOUT0_NBLOCKS_OFFSET; 1161 tmp = (geo->ecc_chunk_count - 1) << BCH_FLASHLAYOUT0_NBLOCKS_OFFSET;
1156 tmp |= MXS_NAND_METADATA_SIZE << BCH_FLASHLAYOUT0_META_SIZE_OFFSET; 1162 tmp |= MXS_NAND_METADATA_SIZE << BCH_FLASHLAYOUT0_META_SIZE_OFFSET;
1157 tmp |= (geo->ecc_strength >> 1) << BCH_FLASHLAYOUT0_ECC0_OFFSET; 1163 tmp |= (geo->ecc_strength >> 1) << BCH_FLASHLAYOUT0_ECC0_OFFSET;
1158 tmp |= geo->ecc_chunk0_size >> MXS_NAND_CHUNK_DATA_CHUNK_SIZE_SHIFT; 1164 tmp |= geo->ecc_chunk0_size >> MXS_NAND_CHUNK_DATA_CHUNK_SIZE_SHIFT;
1159 tmp |= (geo->gf_len == 14 ? 1 : 0) << 1165 tmp |= (geo->gf_len == 14 ? 1 : 0) <<
1160 BCH_FLASHLAYOUT0_GF13_0_GF14_1_OFFSET; 1166 BCH_FLASHLAYOUT0_GF13_0_GF14_1_OFFSET;
1161 writel(tmp, &bch_regs->hw_bch_flash0layout0); 1167 writel(tmp, &bch_regs->hw_bch_flash0layout0);
1162 nand_info->bch_flash0layout0 = tmp; 1168 nand_info->bch_flash0layout0 = tmp;
1163 1169
1164 tmp = (mtd->writesize + mtd->oobsize) 1170 tmp = (mtd->writesize + mtd->oobsize)
1165 << BCH_FLASHLAYOUT1_PAGE_SIZE_OFFSET; 1171 << BCH_FLASHLAYOUT1_PAGE_SIZE_OFFSET;
1166 tmp |= (geo->ecc_strength >> 1) << BCH_FLASHLAYOUT1_ECCN_OFFSET; 1172 tmp |= (geo->ecc_strength >> 1) << BCH_FLASHLAYOUT1_ECCN_OFFSET;
1167 tmp |= geo->ecc_chunkn_size >> MXS_NAND_CHUNK_DATA_CHUNK_SIZE_SHIFT; 1173 tmp |= geo->ecc_chunkn_size >> MXS_NAND_CHUNK_DATA_CHUNK_SIZE_SHIFT;
1168 tmp |= (geo->gf_len == 14 ? 1 : 0) << 1174 tmp |= (geo->gf_len == 14 ? 1 : 0) <<
1169 BCH_FLASHLAYOUT1_GF13_0_GF14_1_OFFSET; 1175 BCH_FLASHLAYOUT1_GF13_0_GF14_1_OFFSET;
1170 writel(tmp, &bch_regs->hw_bch_flash0layout1); 1176 writel(tmp, &bch_regs->hw_bch_flash0layout1);
1171 nand_info->bch_flash0layout1 = tmp; 1177 nand_info->bch_flash0layout1 = tmp;
1172 1178
1173 /* Set erase threshold to ecc strength for mx6ul, mx6qp and mx7 */ 1179 /* Set erase threshold to ecc strength for mx6ul, mx6qp and mx7 */
1174 if (is_mx6dqp() || is_mx7() || 1180 if (is_mx6dqp() || is_mx7() ||
1175 is_mx6ul() || is_imx8() || is_imx8m()) 1181 is_mx6ul() || is_imx8() || is_imx8m())
1176 writel(BCH_MODE_ERASE_THRESHOLD(geo->ecc_strength), 1182 writel(BCH_MODE_ERASE_THRESHOLD(geo->ecc_strength),
1177 &bch_regs->hw_bch_mode); 1183 &bch_regs->hw_bch_mode);
1178 1184
1179 /* Set *all* chip selects to use layout 0 */ 1185 /* Set *all* chip selects to use layout 0 */
1180 writel(0, &bch_regs->hw_bch_layoutselect); 1186 writel(0, &bch_regs->hw_bch_layoutselect);
1181 1187
1182 /* Enable BCH complete interrupt */ 1188 /* Enable BCH complete interrupt */
1183 writel(BCH_CTRL_COMPLETE_IRQ_EN, &bch_regs->hw_bch_ctrl_set); 1189 writel(BCH_CTRL_COMPLETE_IRQ_EN, &bch_regs->hw_bch_ctrl_set);
1184 1190
1185 /* Hook some operations at the MTD level. */ 1191 /* Hook some operations at the MTD level. */
1186 if (mtd->_read_oob != mxs_nand_hook_read_oob) { 1192 if (mtd->_read_oob != mxs_nand_hook_read_oob) {
1187 nand_info->hooked_read_oob = mtd->_read_oob; 1193 nand_info->hooked_read_oob = mtd->_read_oob;
1188 mtd->_read_oob = mxs_nand_hook_read_oob; 1194 mtd->_read_oob = mxs_nand_hook_read_oob;
1189 } 1195 }
1190 1196
1191 if (mtd->_write_oob != mxs_nand_hook_write_oob) { 1197 if (mtd->_write_oob != mxs_nand_hook_write_oob) {
1192 nand_info->hooked_write_oob = mtd->_write_oob; 1198 nand_info->hooked_write_oob = mtd->_write_oob;
1193 mtd->_write_oob = mxs_nand_hook_write_oob; 1199 mtd->_write_oob = mxs_nand_hook_write_oob;
1194 } 1200 }
1195 1201
1196 if (mtd->_block_markbad != mxs_nand_hook_block_markbad) { 1202 if (mtd->_block_markbad != mxs_nand_hook_block_markbad) {
1197 nand_info->hooked_block_markbad = mtd->_block_markbad; 1203 nand_info->hooked_block_markbad = mtd->_block_markbad;
1198 mtd->_block_markbad = mxs_nand_hook_block_markbad; 1204 mtd->_block_markbad = mxs_nand_hook_block_markbad;
1199 } 1205 }
1200 1206
1201 return 0; 1207 return 0;
1202 } 1208 }
1203 1209
1204 /* 1210 /*
1205 * Allocate DMA buffers 1211 * Allocate DMA buffers
1206 */ 1212 */
1207 int mxs_nand_alloc_buffers(struct mxs_nand_info *nand_info) 1213 int mxs_nand_alloc_buffers(struct mxs_nand_info *nand_info)
1208 { 1214 {
1209 uint8_t *buf; 1215 uint8_t *buf;
1210 const int size = NAND_MAX_PAGESIZE + NAND_MAX_OOBSIZE; 1216 const int size = NAND_MAX_PAGESIZE + NAND_MAX_OOBSIZE;
1211 1217
1212 nand_info->data_buf_size = roundup(size, MXS_DMA_ALIGNMENT); 1218 nand_info->data_buf_size = roundup(size, MXS_DMA_ALIGNMENT);
1213 1219
1214 /* DMA buffers */ 1220 /* DMA buffers */
1215 buf = memalign(MXS_DMA_ALIGNMENT, nand_info->data_buf_size); 1221 buf = memalign(MXS_DMA_ALIGNMENT, nand_info->data_buf_size);
1216 if (!buf) { 1222 if (!buf) {
1217 printf("MXS NAND: Error allocating DMA buffers\n"); 1223 printf("MXS NAND: Error allocating DMA buffers\n");
1218 return -ENOMEM; 1224 return -ENOMEM;
1219 } 1225 }
1220 1226
1221 memset(buf, 0, nand_info->data_buf_size); 1227 memset(buf, 0, nand_info->data_buf_size);
1222 1228
1223 nand_info->data_buf = buf; 1229 nand_info->data_buf = buf;
1224 nand_info->oob_buf = buf + NAND_MAX_PAGESIZE; 1230 nand_info->oob_buf = buf + NAND_MAX_PAGESIZE;
1225 /* Command buffers */ 1231 /* Command buffers */
1226 nand_info->cmd_buf = memalign(MXS_DMA_ALIGNMENT, 1232 nand_info->cmd_buf = memalign(MXS_DMA_ALIGNMENT,
1227 MXS_NAND_COMMAND_BUFFER_SIZE); 1233 MXS_NAND_COMMAND_BUFFER_SIZE);
1228 if (!nand_info->cmd_buf) { 1234 if (!nand_info->cmd_buf) {
1229 free(buf); 1235 free(buf);
1230 printf("MXS NAND: Error allocating command buffers\n"); 1236 printf("MXS NAND: Error allocating command buffers\n");
1231 return -ENOMEM; 1237 return -ENOMEM;
1232 } 1238 }
1233 memset(nand_info->cmd_buf, 0, MXS_NAND_COMMAND_BUFFER_SIZE); 1239 memset(nand_info->cmd_buf, 0, MXS_NAND_COMMAND_BUFFER_SIZE);
1234 nand_info->cmd_queue_len = 0; 1240 nand_info->cmd_queue_len = 0;
1235 1241
1236 return 0; 1242 return 0;
1237 } 1243 }
1238 1244
1239 /* 1245 /*
1240 * Initializes the NFC hardware. 1246 * Initializes the NFC hardware.
1241 */ 1247 */
1242 static int mxs_nand_init_dma(struct mxs_nand_info *info) 1248 static int mxs_nand_init_dma(struct mxs_nand_info *info)
1243 { 1249 {
1244 int i = 0, j, ret = 0; 1250 int i = 0, j, ret = 0;
1245 1251
1246 #ifdef CONFIG_MX6 1252 #ifdef CONFIG_MX6
1247 if (check_module_fused(MX6_MODULE_GPMI)) { 1253 if (check_module_fused(MX6_MODULE_GPMI)) {
1248 printf("NAND GPMI@0x%x is fused, disable it\n", (u32)info->gpmi_regs); 1254 printf("NAND GPMI@0x%x is fused, disable it\n", (u32)info->gpmi_regs);
1249 return -EPERM; 1255 return -EPERM;
1250 } 1256 }
1251 #endif 1257 #endif
1252 1258
1253 info->desc = malloc(sizeof(struct mxs_dma_desc *) * 1259 info->desc = malloc(sizeof(struct mxs_dma_desc *) *
1254 MXS_NAND_DMA_DESCRIPTOR_COUNT); 1260 MXS_NAND_DMA_DESCRIPTOR_COUNT);
1255 if (!info->desc) { 1261 if (!info->desc) {
1256 ret = -ENOMEM; 1262 ret = -ENOMEM;
1257 goto err1; 1263 goto err1;
1258 } 1264 }
1259 1265
1260 /* Allocate the DMA descriptors. */ 1266 /* Allocate the DMA descriptors. */
1261 for (i = 0; i < MXS_NAND_DMA_DESCRIPTOR_COUNT; i++) { 1267 for (i = 0; i < MXS_NAND_DMA_DESCRIPTOR_COUNT; i++) {
1262 info->desc[i] = mxs_dma_desc_alloc(); 1268 info->desc[i] = mxs_dma_desc_alloc();
1263 if (!info->desc[i]) { 1269 if (!info->desc[i]) {
1264 ret = -ENOMEM; 1270 ret = -ENOMEM;
1265 goto err2; 1271 goto err2;
1266 } 1272 }
1267 } 1273 }
1268 1274
1269 /* Init the DMA controller. */ 1275 /* Init the DMA controller. */
1270 mxs_dma_init(); 1276 mxs_dma_init();
1271 for (j = MXS_DMA_CHANNEL_AHB_APBH_GPMI0; 1277 for (j = MXS_DMA_CHANNEL_AHB_APBH_GPMI0;
1272 j <= MXS_DMA_CHANNEL_AHB_APBH_GPMI7; j++) { 1278 j <= MXS_DMA_CHANNEL_AHB_APBH_GPMI7; j++) {
1273 ret = mxs_dma_init_channel(j); 1279 ret = mxs_dma_init_channel(j);
1274 if (ret) 1280 if (ret)
1275 goto err3; 1281 goto err3;
1276 } 1282 }
1277 1283
1278 /* Reset the GPMI block. */ 1284 /* Reset the GPMI block. */
1279 mxs_reset_block(&info->gpmi_regs->hw_gpmi_ctrl0_reg); 1285 mxs_reset_block(&info->gpmi_regs->hw_gpmi_ctrl0_reg);
1280 mxs_reset_block(&info->bch_regs->hw_bch_ctrl_reg); 1286 mxs_reset_block(&info->bch_regs->hw_bch_ctrl_reg);
1281 1287
1282 /* 1288 /*
1283 * Choose NAND mode, set IRQ polarity, disable write protection and 1289 * Choose NAND mode, set IRQ polarity, disable write protection and
1284 * select BCH ECC. 1290 * select BCH ECC.
1285 */ 1291 */
1286 clrsetbits_le32(&info->gpmi_regs->hw_gpmi_ctrl1, 1292 clrsetbits_le32(&info->gpmi_regs->hw_gpmi_ctrl1,
1287 GPMI_CTRL1_GPMI_MODE, 1293 GPMI_CTRL1_GPMI_MODE,
1288 GPMI_CTRL1_ATA_IRQRDY_POLARITY | GPMI_CTRL1_DEV_RESET | 1294 GPMI_CTRL1_ATA_IRQRDY_POLARITY | GPMI_CTRL1_DEV_RESET |
1289 GPMI_CTRL1_BCH_MODE); 1295 GPMI_CTRL1_BCH_MODE);
1290 1296
1291 return 0; 1297 return 0;
1292 1298
1293 err3: 1299 err3:
1294 for (--j; j >= MXS_DMA_CHANNEL_AHB_APBH_GPMI0; j--) 1300 for (--j; j >= MXS_DMA_CHANNEL_AHB_APBH_GPMI0; j--)
1295 mxs_dma_release(j); 1301 mxs_dma_release(j);
1296 err2: 1302 err2:
1297 for (--i; i >= 0; i--) 1303 for (--i; i >= 0; i--)
1298 mxs_dma_desc_free(info->desc[i]); 1304 mxs_dma_desc_free(info->desc[i]);
1299 free(info->desc); 1305 free(info->desc);
1300 err1: 1306 err1:
1301 if (ret == -ENOMEM) 1307 if (ret == -ENOMEM)
1302 printf("MXS NAND: Unable to allocate DMA descriptors\n"); 1308 printf("MXS NAND: Unable to allocate DMA descriptors\n");
1303 return ret; 1309 return ret;
1304 } 1310 }
1305 1311
1306 int mxs_nand_init_spl(struct nand_chip *nand) 1312 int mxs_nand_init_spl(struct nand_chip *nand)
1307 { 1313 {
1308 struct mxs_nand_info *nand_info; 1314 struct mxs_nand_info *nand_info;
1309 int err; 1315 int err;
1310 1316
1311 nand_info = malloc(sizeof(struct mxs_nand_info)); 1317 nand_info = malloc(sizeof(struct mxs_nand_info));
1312 if (!nand_info) { 1318 if (!nand_info) {
1313 printf("MXS NAND: Failed to allocate private data\n"); 1319 printf("MXS NAND: Failed to allocate private data\n");
1314 return -ENOMEM; 1320 return -ENOMEM;
1315 } 1321 }
1316 memset(nand_info, 0, sizeof(struct mxs_nand_info)); 1322 memset(nand_info, 0, sizeof(struct mxs_nand_info));
1317 1323
1318 nand_info->gpmi_regs = (struct mxs_gpmi_regs *)MXS_GPMI_BASE; 1324 nand_info->gpmi_regs = (struct mxs_gpmi_regs *)MXS_GPMI_BASE;
1319 nand_info->bch_regs = (struct mxs_bch_regs *)MXS_BCH_BASE; 1325 nand_info->bch_regs = (struct mxs_bch_regs *)MXS_BCH_BASE;
1320 1326
1321 if (is_mx6sx() || is_mx7() || is_imx8() || is_imx8m()) 1327 if (is_mx6sx() || is_mx7() || is_imx8() || is_imx8m())
1322 nand_info->max_ecc_strength_supported = 62; 1328 nand_info->max_ecc_strength_supported = 62;
1323 else 1329 else
1324 nand_info->max_ecc_strength_supported = 40; 1330 nand_info->max_ecc_strength_supported = 40;
1325 1331
1326 err = mxs_nand_alloc_buffers(nand_info); 1332 err = mxs_nand_alloc_buffers(nand_info);
1327 if (err) 1333 if (err)
1328 return err; 1334 return err;
1329 1335
1330 err = mxs_nand_init_dma(nand_info); 1336 err = mxs_nand_init_dma(nand_info);
1331 if (err) 1337 if (err)
1332 return err; 1338 return err;
1333 1339
1334 nand_set_controller_data(nand, nand_info); 1340 nand_set_controller_data(nand, nand_info);
1335 1341
1336 nand->options |= NAND_NO_SUBPAGE_WRITE; 1342 nand->options |= NAND_NO_SUBPAGE_WRITE;
1337 1343
1338 nand->cmd_ctrl = mxs_nand_cmd_ctrl; 1344 nand->cmd_ctrl = mxs_nand_cmd_ctrl;
1339 nand->dev_ready = mxs_nand_device_ready; 1345 nand->dev_ready = mxs_nand_device_ready;
1340 nand->select_chip = mxs_nand_select_chip; 1346 nand->select_chip = mxs_nand_select_chip;
1341 1347
1342 nand->read_byte = mxs_nand_read_byte; 1348 nand->read_byte = mxs_nand_read_byte;
1343 nand->read_buf = mxs_nand_read_buf; 1349 nand->read_buf = mxs_nand_read_buf;
1344 1350
1345 nand->ecc.read_page = mxs_nand_ecc_read_page; 1351 nand->ecc.read_page = mxs_nand_ecc_read_page;
1346 1352
1347 nand->ecc.mode = NAND_ECC_HW; 1353 nand->ecc.mode = NAND_ECC_HW;
1348 1354
1349 return 0; 1355 return 0;
1350 } 1356 }
1351 1357
1352 int mxs_nand_init_ctrl(struct mxs_nand_info *nand_info) 1358 int mxs_nand_init_ctrl(struct mxs_nand_info *nand_info)
1353 { 1359 {
1354 struct mtd_info *mtd; 1360 struct mtd_info *mtd;
1355 struct nand_chip *nand; 1361 struct nand_chip *nand;
1356 int err; 1362 int err;
1357 1363
1358 nand = &nand_info->chip; 1364 nand = &nand_info->chip;
1359 mtd = nand_to_mtd(nand); 1365 mtd = nand_to_mtd(nand);
1360 err = mxs_nand_alloc_buffers(nand_info); 1366 err = mxs_nand_alloc_buffers(nand_info);
1361 if (err) 1367 if (err)
1362 return err; 1368 return err;
1363 1369
1364 err = mxs_nand_init_dma(nand_info); 1370 err = mxs_nand_init_dma(nand_info);
1365 if (err) 1371 if (err)
1366 goto err_free_buffers; 1372 goto err_free_buffers;
1367 1373
1368 memset(&fake_ecc_layout, 0, sizeof(fake_ecc_layout)); 1374 memset(&fake_ecc_layout, 0, sizeof(fake_ecc_layout));
1369 1375
1370 #ifdef CONFIG_SYS_NAND_USE_FLASH_BBT 1376 #ifdef CONFIG_SYS_NAND_USE_FLASH_BBT
1371 nand->bbt_options |= NAND_BBT_USE_FLASH | NAND_BBT_NO_OOB; 1377 nand->bbt_options |= NAND_BBT_USE_FLASH | NAND_BBT_NO_OOB;
1372 #endif 1378 #endif
1373 1379
1374 nand_set_controller_data(nand, nand_info); 1380 nand_set_controller_data(nand, nand_info);
1375 nand->options |= NAND_NO_SUBPAGE_WRITE; 1381 nand->options |= NAND_NO_SUBPAGE_WRITE;
1376 1382
1377 if (nand_info->dev) 1383 if (nand_info->dev)
1378 nand->flash_node = dev_of_offset(nand_info->dev); 1384 nand->flash_node = dev_of_offset(nand_info->dev);
1379 1385
1380 nand->cmd_ctrl = mxs_nand_cmd_ctrl; 1386 nand->cmd_ctrl = mxs_nand_cmd_ctrl;
1381 1387
1382 nand->dev_ready = mxs_nand_device_ready; 1388 nand->dev_ready = mxs_nand_device_ready;
1383 nand->select_chip = mxs_nand_select_chip; 1389 nand->select_chip = mxs_nand_select_chip;
1384 nand->block_bad = mxs_nand_block_bad; 1390 nand->block_bad = mxs_nand_block_bad;
1385 1391
1386 nand->read_byte = mxs_nand_read_byte; 1392 nand->read_byte = mxs_nand_read_byte;
1387 1393
1388 nand->read_buf = mxs_nand_read_buf; 1394 nand->read_buf = mxs_nand_read_buf;
1389 nand->write_buf = mxs_nand_write_buf; 1395 nand->write_buf = mxs_nand_write_buf;
1390 1396
1391 /* first scan to find the device and get the page size */ 1397 /* first scan to find the device and get the page size */
1392 if (nand_scan_ident(mtd, CONFIG_SYS_MAX_NAND_DEVICE, NULL)) 1398 if (nand_scan_ident(mtd, CONFIG_SYS_MAX_NAND_DEVICE, NULL))
1393 goto err_free_buffers; 1399 goto err_free_buffers;
1394 1400
1395 if (mxs_nand_setup_ecc(mtd)) 1401 if (mxs_nand_setup_ecc(mtd))
1396 goto err_free_buffers; 1402 goto err_free_buffers;
1397 1403
1398 nand->ecc.read_page = mxs_nand_ecc_read_page; 1404 nand->ecc.read_page = mxs_nand_ecc_read_page;
1399 nand->ecc.write_page = mxs_nand_ecc_write_page; 1405 nand->ecc.write_page = mxs_nand_ecc_write_page;
1400 nand->ecc.read_oob = mxs_nand_ecc_read_oob; 1406 nand->ecc.read_oob = mxs_nand_ecc_read_oob;
1401 nand->ecc.write_oob = mxs_nand_ecc_write_oob; 1407 nand->ecc.write_oob = mxs_nand_ecc_write_oob;
1402 1408
1403 nand->ecc.layout = &fake_ecc_layout; 1409 nand->ecc.layout = &fake_ecc_layout;
1404 nand->ecc.mode = NAND_ECC_HW; 1410 nand->ecc.mode = NAND_ECC_HW;
1405 nand->ecc.size = nand_info->bch_geometry.ecc_chunkn_size; 1411 nand->ecc.size = nand_info->bch_geometry.ecc_chunkn_size;
1406 nand->ecc.strength = nand_info->bch_geometry.ecc_strength; 1412 nand->ecc.strength = nand_info->bch_geometry.ecc_strength;
1407 1413
1408 /* second phase scan */ 1414 /* second phase scan */
1409 err = nand_scan_tail(mtd); 1415 err = nand_scan_tail(mtd);
1410 if (err) 1416 if (err)
1411 goto err_free_buffers; 1417 goto err_free_buffers;
1412 1418
1413 err = nand_register(0, mtd); 1419 err = nand_register(0, mtd);
1414 if (err) 1420 if (err)
1415 goto err_free_buffers; 1421 goto err_free_buffers;
1416 1422
1417 return 0; 1423 return 0;
1418 1424
1419 err_free_buffers: 1425 err_free_buffers:
1420 free(nand_info->data_buf); 1426 free(nand_info->data_buf);
1421 free(nand_info->cmd_buf); 1427 free(nand_info->cmd_buf);
1422 1428
1423 return err; 1429 return err;
1424 } 1430 }
1425 1431
1426 #ifndef CONFIG_NAND_MXS_DT 1432 #ifndef CONFIG_NAND_MXS_DT
1427 void board_nand_init(void) 1433 void board_nand_init(void)
1428 { 1434 {
1429 struct mxs_nand_info *nand_info; 1435 struct mxs_nand_info *nand_info;
1430 1436
1431 nand_info = malloc(sizeof(struct mxs_nand_info)); 1437 nand_info = malloc(sizeof(struct mxs_nand_info));
1432 if (!nand_info) { 1438 if (!nand_info) {
1433 printf("MXS NAND: Failed to allocate private data\n"); 1439 printf("MXS NAND: Failed to allocate private data\n");
1434 return; 1440 return;
1435 } 1441 }
1436 memset(nand_info, 0, sizeof(struct mxs_nand_info)); 1442 memset(nand_info, 0, sizeof(struct mxs_nand_info));
1437 1443
1438 nand_info->gpmi_regs = (struct mxs_gpmi_regs *)MXS_GPMI_BASE; 1444 nand_info->gpmi_regs = (struct mxs_gpmi_regs *)MXS_GPMI_BASE;
1439 nand_info->bch_regs = (struct mxs_bch_regs *)MXS_BCH_BASE; 1445 nand_info->bch_regs = (struct mxs_bch_regs *)MXS_BCH_BASE;
1440 1446
1441 /* Refer to Chapter 17 for i.MX6DQ, Chapter 18 for i.MX6SX */ 1447 /* Refer to Chapter 17 for i.MX6DQ, Chapter 18 for i.MX6SX */
1442 if (is_mx6sx() || is_mx7()) 1448 if (is_mx6sx() || is_mx7())
1443 nand_info->max_ecc_strength_supported = 62; 1449 nand_info->max_ecc_strength_supported = 62;
1444 else 1450 else
1445 nand_info->max_ecc_strength_supported = 40; 1451 nand_info->max_ecc_strength_supported = 40;
1446 1452
1447 #ifdef CONFIG_NAND_MXS_USE_MINIMUM_ECC 1453 #ifdef CONFIG_NAND_MXS_USE_MINIMUM_ECC
1448 nand_info->use_minimum_ecc = true; 1454 nand_info->use_minimum_ecc = true;
1449 #endif 1455 #endif
1450 1456
1451 if (mxs_nand_init_ctrl(nand_info) < 0) 1457 if (mxs_nand_init_ctrl(nand_info) < 0)
1452 goto err; 1458 goto err;
1453 1459
1454 return; 1460 return;
1455 1461
1456 err: 1462 err:
1457 free(nand_info); 1463 free(nand_info);
1458 } 1464 }
1459 #endif 1465 #endif
1460 1466
1461 #if defined(CONFIG_MX6) || defined(CONFIG_MX7) || defined(CONFIG_IMX8M) 1467 #if defined(CONFIG_MX6) || defined(CONFIG_MX7) || defined(CONFIG_IMX8M)
1462 /* 1468 /*
1463 * Read NAND layout for FCB block generation. 1469 * Read NAND layout for FCB block generation.
1464 */ 1470 */
1465 void mxs_nand_get_layout(struct mtd_info *mtd, struct mxs_nand_layout *l) 1471 void mxs_nand_get_layout(struct mtd_info *mtd, struct mxs_nand_layout *l)
1466 { 1472 {
1467 struct mxs_bch_regs *bch_regs = (struct mxs_bch_regs *)MXS_BCH_BASE; 1473 struct mxs_bch_regs *bch_regs = (struct mxs_bch_regs *)MXS_BCH_BASE;
1468 u32 tmp; 1474 u32 tmp;
1469 1475
1470 tmp = readl(&bch_regs->hw_bch_flash0layout0); 1476 tmp = readl(&bch_regs->hw_bch_flash0layout0);
1471 l->nblocks = (tmp & BCH_FLASHLAYOUT0_NBLOCKS_MASK) >> 1477 l->nblocks = (tmp & BCH_FLASHLAYOUT0_NBLOCKS_MASK) >>
1472 BCH_FLASHLAYOUT0_NBLOCKS_OFFSET; 1478 BCH_FLASHLAYOUT0_NBLOCKS_OFFSET;
1473 l->meta_size = (tmp & BCH_FLASHLAYOUT0_META_SIZE_MASK) >> 1479 l->meta_size = (tmp & BCH_FLASHLAYOUT0_META_SIZE_MASK) >>
1474 BCH_FLASHLAYOUT0_META_SIZE_OFFSET; 1480 BCH_FLASHLAYOUT0_META_SIZE_OFFSET;
1475 1481
1476 tmp = readl(&bch_regs->hw_bch_flash0layout1); 1482 tmp = readl(&bch_regs->hw_bch_flash0layout1);
1477 l->data0_size = 4 * ((tmp & BCH_FLASHLAYOUT0_DATA0_SIZE_MASK) >> 1483 l->data0_size = 4 * ((tmp & BCH_FLASHLAYOUT0_DATA0_SIZE_MASK) >>
1478 BCH_FLASHLAYOUT0_DATA0_SIZE_OFFSET); 1484 BCH_FLASHLAYOUT0_DATA0_SIZE_OFFSET);
1479 l->ecc0 = (tmp & BCH_FLASHLAYOUT0_ECC0_MASK) >> 1485 l->ecc0 = (tmp & BCH_FLASHLAYOUT0_ECC0_MASK) >>
1480 BCH_FLASHLAYOUT0_ECC0_OFFSET; 1486 BCH_FLASHLAYOUT0_ECC0_OFFSET;
1481 l->datan_size = 4 * ((tmp & BCH_FLASHLAYOUT1_DATAN_SIZE_MASK) >> 1487 l->datan_size = 4 * ((tmp & BCH_FLASHLAYOUT1_DATAN_SIZE_MASK) >>
1482 BCH_FLASHLAYOUT1_DATAN_SIZE_OFFSET); 1488 BCH_FLASHLAYOUT1_DATAN_SIZE_OFFSET);
1483 l->eccn = (tmp & BCH_FLASHLAYOUT1_ECCN_MASK) >> 1489 l->eccn = (tmp & BCH_FLASHLAYOUT1_ECCN_MASK) >>
1484 BCH_FLASHLAYOUT1_ECCN_OFFSET; 1490 BCH_FLASHLAYOUT1_ECCN_OFFSET;
1485 l->gf_len = (tmp & BCH_FLASHLAYOUT1_GF13_0_GF14_1_MASK) >> 1491 l->gf_len = (tmp & BCH_FLASHLAYOUT1_GF13_0_GF14_1_MASK) >>
1486 BCH_FLASHLAYOUT1_GF13_0_GF14_1_OFFSET; 1492 BCH_FLASHLAYOUT1_GF13_0_GF14_1_OFFSET;
1487 } 1493 }
1488 1494
1489 /* 1495 /*
1490 * Set BCH to specific layout used by ROM bootloader to read FCB. 1496 * Set BCH to specific layout used by ROM bootloader to read FCB.
1491 */ 1497 */
1492 void mxs_nand_mode_fcb(struct mtd_info *mtd) 1498 void mxs_nand_mode_fcb(struct mtd_info *mtd)
1493 { 1499 {
1494 u32 tmp; 1500 u32 tmp;
1495 struct mxs_bch_regs *bch_regs = (struct mxs_bch_regs *)MXS_BCH_BASE; 1501 struct mxs_bch_regs *bch_regs = (struct mxs_bch_regs *)MXS_BCH_BASE;
1496 struct nand_chip *nand = mtd_to_nand(mtd); 1502 struct nand_chip *nand = mtd_to_nand(mtd);
1497 struct mxs_nand_info *nand_info = nand_get_controller_data(nand); 1503 struct mxs_nand_info *nand_info = nand_get_controller_data(nand);
1498 1504
1499 nand_info->en_randomizer = 1; 1505 nand_info->en_randomizer = 1;
1500 1506
1501 mtd->writesize = 1024; 1507 mtd->writesize = 1024;
1502 mtd->oobsize = 1862 - 1024; 1508 mtd->oobsize = 1862 - 1024;
1503 1509
1504 /* 8 ecc_chunks_*/ 1510 /* 8 ecc_chunks_*/
1505 tmp = 7 << BCH_FLASHLAYOUT0_NBLOCKS_OFFSET; 1511 tmp = 7 << BCH_FLASHLAYOUT0_NBLOCKS_OFFSET;
1506 /* 32 bytes for metadata */ 1512 /* 32 bytes for metadata */
1507 tmp |= 32 << BCH_FLASHLAYOUT0_META_SIZE_OFFSET; 1513 tmp |= 32 << BCH_FLASHLAYOUT0_META_SIZE_OFFSET;
1508 /* using ECC62 level to be performed */ 1514 /* using ECC62 level to be performed */
1509 tmp |= 0x1F << BCH_FLASHLAYOUT0_ECC0_OFFSET; 1515 tmp |= 0x1F << BCH_FLASHLAYOUT0_ECC0_OFFSET;
1510 /* 0x20 * 4 bytes of the data0 block */ 1516 /* 0x20 * 4 bytes of the data0 block */
1511 tmp |= 0x20 << BCH_FLASHLAYOUT0_DATA0_SIZE_OFFSET; 1517 tmp |= 0x20 << BCH_FLASHLAYOUT0_DATA0_SIZE_OFFSET;
1512 tmp |= 0 << BCH_FLASHLAYOUT0_GF13_0_GF14_1_OFFSET; 1518 tmp |= 0 << BCH_FLASHLAYOUT0_GF13_0_GF14_1_OFFSET;
1513 writel(tmp, &bch_regs->hw_bch_flash0layout0); 1519 writel(tmp, &bch_regs->hw_bch_flash0layout0);
1514 1520
1515 /* 1024 for data + 838 for OOB */ 1521 /* 1024 for data + 838 for OOB */
1516 tmp = 1862 << BCH_FLASHLAYOUT1_PAGE_SIZE_OFFSET; 1522 tmp = 1862 << BCH_FLASHLAYOUT1_PAGE_SIZE_OFFSET;
1517 /* using ECC62 level to be performed */ 1523 /* using ECC62 level to be performed */
1518 tmp |= 0x1F << BCH_FLASHLAYOUT1_ECCN_OFFSET; 1524 tmp |= 0x1F << BCH_FLASHLAYOUT1_ECCN_OFFSET;
1519 /* 0x20 * 4 bytes of the data0 block */ 1525 /* 0x20 * 4 bytes of the data0 block */
1520 tmp |= 0x20 << BCH_FLASHLAYOUT1_DATAN_SIZE_OFFSET; 1526 tmp |= 0x20 << BCH_FLASHLAYOUT1_DATAN_SIZE_OFFSET;
1521 tmp |= 0 << BCH_FLASHLAYOUT1_GF13_0_GF14_1_OFFSET; 1527 tmp |= 0 << BCH_FLASHLAYOUT1_GF13_0_GF14_1_OFFSET;
1522 writel(tmp, &bch_regs->hw_bch_flash0layout1); 1528 writel(tmp, &bch_regs->hw_bch_flash0layout1);
1523 } 1529 }
1524 1530
1525 /* 1531 /*
1526 * Restore BCH to normal settings. 1532 * Restore BCH to normal settings.
1527 */ 1533 */
1528 void mxs_nand_mode_normal(struct mtd_info *mtd) 1534 void mxs_nand_mode_normal(struct mtd_info *mtd)
1529 { 1535 {
1530 struct mxs_bch_regs *bch_regs = (struct mxs_bch_regs *)MXS_BCH_BASE; 1536 struct mxs_bch_regs *bch_regs = (struct mxs_bch_regs *)MXS_BCH_BASE;
1531 struct nand_chip *nand = mtd_to_nand(mtd); 1537 struct nand_chip *nand = mtd_to_nand(mtd);
1532 struct mxs_nand_info *nand_info = nand_get_controller_data(nand); 1538 struct mxs_nand_info *nand_info = nand_get_controller_data(nand);
1533 1539
1534 nand_info->en_randomizer = 0; 1540 nand_info->en_randomizer = 0;
1535 1541
1536 mtd->writesize = nand_info->writesize; 1542 mtd->writesize = nand_info->writesize;
1537 mtd->oobsize = nand_info->oobsize; 1543 mtd->oobsize = nand_info->oobsize;
1538 1544
1539 writel(nand_info->bch_flash0layout0, &bch_regs->hw_bch_flash0layout0); 1545 writel(nand_info->bch_flash0layout0, &bch_regs->hw_bch_flash0layout0);
1540 writel(nand_info->bch_flash0layout1, &bch_regs->hw_bch_flash0layout1); 1546 writel(nand_info->bch_flash0layout1, &bch_regs->hw_bch_flash0layout1);
1541 } 1547 }
1542 1548
1543 uint32_t mxs_nand_mark_byte_offset(struct mtd_info *mtd) 1549 uint32_t mxs_nand_mark_byte_offset(struct mtd_info *mtd)
1544 { 1550 {
1545 struct nand_chip *chip = mtd_to_nand(mtd); 1551 struct nand_chip *chip = mtd_to_nand(mtd);
1546 struct mxs_nand_info *nand_info = nand_get_controller_data(chip); 1552 struct mxs_nand_info *nand_info = nand_get_controller_data(chip);
1547 struct bch_geometry *geo = &nand_info->bch_geometry; 1553 struct bch_geometry *geo = &nand_info->bch_geometry;
1548 1554
1549 return geo->block_mark_byte_offset; 1555 return geo->block_mark_byte_offset;
1550 } 1556 }
1551 1557
1552 uint32_t mxs_nand_mark_bit_offset(struct mtd_info *mtd) 1558 uint32_t mxs_nand_mark_bit_offset(struct mtd_info *mtd)
1553 { 1559 {
1554 struct nand_chip *chip = mtd_to_nand(mtd); 1560 struct nand_chip *chip = mtd_to_nand(mtd);
1555 struct mxs_nand_info *nand_info = nand_get_controller_data(chip); 1561 struct mxs_nand_info *nand_info = nand_get_controller_data(chip);
1556 struct bch_geometry *geo = &nand_info->bch_geometry; 1562 struct bch_geometry *geo = &nand_info->bch_geometry;
1557 1563
1558 return geo->block_mark_bit_offset; 1564 return geo->block_mark_bit_offset;
1559 } 1565 }
1560 #endif /* CONFIG_IS_ENABLED(MX7) */ 1566 #endif /* CONFIG_IS_ENABLED(MX7) */
1561 1567