Commit bc468fa6a8dcfbe809eef7efe90150afbeca2fac

Authored by Peng Fan
Committed by Ye Li
1 parent b4a15f89ed

MLK-12693-1 nand: mxs: fix the bitflips for erased page when uncorrectable error

This patch is porting from linux:
http://git.freescale.com/git/cgit.cgi/imx/linux-2.6-imx.git/commit/?h=imx_4.1.15_1.0.0_ga&id=3d42fcece496224fde59f9343763fb2dfc5b0768

"
We may meet the bitflips in reading an erased page(contains all 0xFF),
this may causes the UBIFS corrupt, please see the log from Elie:

-----------------------------------------------------------------
[    3.831323] UBI warning: ubi_io_read: error -74 (ECC error) while reading 16384 bytes from PEB 443:245760, read only 16384 bytes, retry
[    3.845026] UBI warning: ubi_io_read: error -74 (ECC error) while reading 16384 bytes from PEB 443:245760, read only 16384 bytes, retry
[    3.858710] UBI warning: ubi_io_read: error -74 (ECC error) while reading 16384 bytes from PEB 443:245760, read only 16384 bytes, retry
[    3.872408] UBI error: ubi_io_read: error -74 (ECC error) while reading 16384 bytes from PEB 443:245760, read 16384 bytes
...
[    4.011529] UBIFS error (pid 36): ubifs_recover_leb: corrupt empty space LEB 27:237568, corruption starts at 9815
[    4.021897] UBIFS error (pid 36): ubifs_scanned_corruption: corruption at LEB 27:247383
[    4.030000] UBIFS error (pid 36): ubifs_scanned_corruption: first 6569 bytes from LEB 27:247383
-----------------------------------------------------------------

This patch does a check for the uncorrectable failure in the following steps:

   [0] set the threshold.
       The threshold is set based on the truth:
       "A single 0 bit will lead to gf_len(13 or 14) bits 0 after the BCH
        do the ECC."

        For the sake of safe, we will set the threshold with half the gf_len, and
        do not make it bigger the ECC strength.

   [1] count the bitflips of the current ECC chunk, assume it is N.

   [2] if the (N <= threshold) is true, we continue to read out the page with
       ECC disabled. and we count the bitflips again, assume it is N2.
       (We read out the whole page, not just a chunk, this makes the check
        more strictly, and make the code more simple.)

   [3] if the (N2 <= threshold) is true again, we can regard this is a erased
       page. This is because a real erased page is full of 0xFF(maybe also has
       several bitflips), while a page contains the 0xFF data will definitely
       has many bitflips in the ECC parity areas.

   [4] if the [3] fails, we can regard this is a page filled with the '0xFF'
       data.
"

Signed-off-by: Peng Fan <peng.fan@nxp.com>
(cherry picked from commit ceb324a2914487aa517a6c70a06a20b5e3438fda)
(cherry picked from commit 026751697e41c7376414a8716cf0ea4bf998b85f)
(cherry picked from commit 93b481f07b8cb59c733f420bebea77ac484f9036)
(cherry picked from commit eefb30b8e68d522bd315ed884c36cb9b7e917f71)

Showing 1 changed file with 44 additions and 0 deletions Inline Diff

drivers/mtd/nand/raw/mxs_nand.c
1 // SPDX-License-Identifier: GPL-2.0+ 1 // SPDX-License-Identifier: GPL-2.0+
2 /* 2 /*
3 * Freescale i.MX28 NAND flash driver 3 * Freescale i.MX28 NAND flash driver
4 * 4 *
5 * Copyright (C) 2011 Marek Vasut <marek.vasut@gmail.com> 5 * Copyright (C) 2011 Marek Vasut <marek.vasut@gmail.com>
6 * on behalf of DENX Software Engineering GmbH 6 * on behalf of DENX Software Engineering GmbH
7 * 7 *
8 * Based on code from LTIB: 8 * Based on code from LTIB:
9 * Freescale GPMI NFC NAND Flash Driver 9 * Freescale GPMI NFC NAND Flash Driver
10 * 10 *
11 * Copyright (C) 2010 Freescale Semiconductor, Inc. 11 * Copyright (C) 2010 Freescale Semiconductor, Inc.
12 * Copyright (C) 2008 Embedded Alley Solutions, Inc. 12 * Copyright (C) 2008 Embedded Alley Solutions, Inc.
13 */ 13 */
14 14
15 #include <common.h> 15 #include <common.h>
16 #include <cpu_func.h> 16 #include <cpu_func.h>
17 #include <dm.h> 17 #include <dm.h>
18 #include <linux/mtd/rawnand.h> 18 #include <linux/mtd/rawnand.h>
19 #include <linux/sizes.h> 19 #include <linux/sizes.h>
20 #include <linux/types.h> 20 #include <linux/types.h>
21 #include <malloc.h> 21 #include <malloc.h>
22 #include <linux/errno.h> 22 #include <linux/errno.h>
23 #include <asm/io.h> 23 #include <asm/io.h>
24 #include <asm/arch/clock.h> 24 #include <asm/arch/clock.h>
25 #include <asm/arch/imx-regs.h> 25 #include <asm/arch/imx-regs.h>
26 #include <asm/mach-imx/regs-bch.h> 26 #include <asm/mach-imx/regs-bch.h>
27 #include <asm/mach-imx/regs-gpmi.h> 27 #include <asm/mach-imx/regs-gpmi.h>
28 #include <asm/arch/sys_proto.h> 28 #include <asm/arch/sys_proto.h>
29 #include <mxs_nand.h> 29 #include <mxs_nand.h>
30 30
31 #define MXS_NAND_DMA_DESCRIPTOR_COUNT 4 31 #define MXS_NAND_DMA_DESCRIPTOR_COUNT 4
32 32
33 #if (defined(CONFIG_MX6) || defined(CONFIG_MX7)) 33 #if (defined(CONFIG_MX6) || defined(CONFIG_MX7))
34 #define MXS_NAND_CHUNK_DATA_CHUNK_SIZE_SHIFT 2 34 #define MXS_NAND_CHUNK_DATA_CHUNK_SIZE_SHIFT 2
35 #else 35 #else
36 #define MXS_NAND_CHUNK_DATA_CHUNK_SIZE_SHIFT 0 36 #define MXS_NAND_CHUNK_DATA_CHUNK_SIZE_SHIFT 0
37 #endif 37 #endif
38 #define MXS_NAND_METADATA_SIZE 10 38 #define MXS_NAND_METADATA_SIZE 10
39 #define MXS_NAND_BITS_PER_ECC_LEVEL 13 39 #define MXS_NAND_BITS_PER_ECC_LEVEL 13
40 40
41 #if !defined(CONFIG_SYS_CACHELINE_SIZE) || CONFIG_SYS_CACHELINE_SIZE < 32 41 #if !defined(CONFIG_SYS_CACHELINE_SIZE) || CONFIG_SYS_CACHELINE_SIZE < 32
42 #define MXS_NAND_COMMAND_BUFFER_SIZE 32 42 #define MXS_NAND_COMMAND_BUFFER_SIZE 32
43 #else 43 #else
44 #define MXS_NAND_COMMAND_BUFFER_SIZE CONFIG_SYS_CACHELINE_SIZE 44 #define MXS_NAND_COMMAND_BUFFER_SIZE CONFIG_SYS_CACHELINE_SIZE
45 #endif 45 #endif
46 46
47 #define MXS_NAND_BCH_TIMEOUT 10000 47 #define MXS_NAND_BCH_TIMEOUT 10000
48 48
49 struct nand_ecclayout fake_ecc_layout; 49 struct nand_ecclayout fake_ecc_layout;
50 50
51 /* 51 /*
52 * Cache management functions 52 * Cache management functions
53 */ 53 */
54 #if !CONFIG_IS_ENABLED(SYS_DCACHE_OFF) 54 #if !CONFIG_IS_ENABLED(SYS_DCACHE_OFF)
55 static void mxs_nand_flush_data_buf(struct mxs_nand_info *info) 55 static void mxs_nand_flush_data_buf(struct mxs_nand_info *info)
56 { 56 {
57 uint32_t addr = (uint32_t)info->data_buf; 57 uint32_t addr = (uint32_t)info->data_buf;
58 58
59 flush_dcache_range(addr, addr + info->data_buf_size); 59 flush_dcache_range(addr, addr + info->data_buf_size);
60 } 60 }
61 61
62 static void mxs_nand_inval_data_buf(struct mxs_nand_info *info) 62 static void mxs_nand_inval_data_buf(struct mxs_nand_info *info)
63 { 63 {
64 uint32_t addr = (uint32_t)info->data_buf; 64 uint32_t addr = (uint32_t)info->data_buf;
65 65
66 invalidate_dcache_range(addr, addr + info->data_buf_size); 66 invalidate_dcache_range(addr, addr + info->data_buf_size);
67 } 67 }
68 68
69 static void mxs_nand_flush_cmd_buf(struct mxs_nand_info *info) 69 static void mxs_nand_flush_cmd_buf(struct mxs_nand_info *info)
70 { 70 {
71 uint32_t addr = (uint32_t)info->cmd_buf; 71 uint32_t addr = (uint32_t)info->cmd_buf;
72 72
73 flush_dcache_range(addr, addr + MXS_NAND_COMMAND_BUFFER_SIZE); 73 flush_dcache_range(addr, addr + MXS_NAND_COMMAND_BUFFER_SIZE);
74 } 74 }
75 #else 75 #else
76 static inline void mxs_nand_flush_data_buf(struct mxs_nand_info *info) {} 76 static inline void mxs_nand_flush_data_buf(struct mxs_nand_info *info) {}
77 static inline void mxs_nand_inval_data_buf(struct mxs_nand_info *info) {} 77 static inline void mxs_nand_inval_data_buf(struct mxs_nand_info *info) {}
78 static inline void mxs_nand_flush_cmd_buf(struct mxs_nand_info *info) {} 78 static inline void mxs_nand_flush_cmd_buf(struct mxs_nand_info *info) {}
79 #endif 79 #endif
80 80
81 static struct mxs_dma_desc *mxs_nand_get_dma_desc(struct mxs_nand_info *info) 81 static struct mxs_dma_desc *mxs_nand_get_dma_desc(struct mxs_nand_info *info)
82 { 82 {
83 struct mxs_dma_desc *desc; 83 struct mxs_dma_desc *desc;
84 84
85 if (info->desc_index >= MXS_NAND_DMA_DESCRIPTOR_COUNT) { 85 if (info->desc_index >= MXS_NAND_DMA_DESCRIPTOR_COUNT) {
86 printf("MXS NAND: Too many DMA descriptors requested\n"); 86 printf("MXS NAND: Too many DMA descriptors requested\n");
87 return NULL; 87 return NULL;
88 } 88 }
89 89
90 desc = info->desc[info->desc_index]; 90 desc = info->desc[info->desc_index];
91 info->desc_index++; 91 info->desc_index++;
92 92
93 return desc; 93 return desc;
94 } 94 }
95 95
96 static void mxs_nand_return_dma_descs(struct mxs_nand_info *info) 96 static void mxs_nand_return_dma_descs(struct mxs_nand_info *info)
97 { 97 {
98 int i; 98 int i;
99 struct mxs_dma_desc *desc; 99 struct mxs_dma_desc *desc;
100 100
101 for (i = 0; i < info->desc_index; i++) { 101 for (i = 0; i < info->desc_index; i++) {
102 desc = info->desc[i]; 102 desc = info->desc[i];
103 memset(desc, 0, sizeof(struct mxs_dma_desc)); 103 memset(desc, 0, sizeof(struct mxs_dma_desc));
104 desc->address = (dma_addr_t)desc; 104 desc->address = (dma_addr_t)desc;
105 } 105 }
106 106
107 info->desc_index = 0; 107 info->desc_index = 0;
108 } 108 }
109 109
110 static uint32_t mxs_nand_aux_status_offset(void) 110 static uint32_t mxs_nand_aux_status_offset(void)
111 { 111 {
112 return (MXS_NAND_METADATA_SIZE + 0x3) & ~0x3; 112 return (MXS_NAND_METADATA_SIZE + 0x3) & ~0x3;
113 } 113 }
114 114
115 static inline bool mxs_nand_bbm_in_data_chunk(struct bch_geometry *geo, struct mtd_info *mtd, 115 static inline bool mxs_nand_bbm_in_data_chunk(struct bch_geometry *geo, struct mtd_info *mtd,
116 unsigned int *chunk_num) 116 unsigned int *chunk_num)
117 { 117 {
118 unsigned int i, j; 118 unsigned int i, j;
119 119
120 if (geo->ecc_chunk0_size != geo->ecc_chunkn_size) { 120 if (geo->ecc_chunk0_size != geo->ecc_chunkn_size) {
121 dev_err(this->dev, "The size of chunk0 must equal to chunkn\n"); 121 dev_err(this->dev, "The size of chunk0 must equal to chunkn\n");
122 return false; 122 return false;
123 } 123 }
124 124
125 i = (mtd->writesize * 8 - MXS_NAND_METADATA_SIZE * 8) / 125 i = (mtd->writesize * 8 - MXS_NAND_METADATA_SIZE * 8) /
126 (geo->gf_len * geo->ecc_strength + 126 (geo->gf_len * geo->ecc_strength +
127 geo->ecc_chunkn_size * 8); 127 geo->ecc_chunkn_size * 8);
128 128
129 j = (mtd->writesize * 8 - MXS_NAND_METADATA_SIZE * 8) - 129 j = (mtd->writesize * 8 - MXS_NAND_METADATA_SIZE * 8) -
130 (geo->gf_len * geo->ecc_strength + 130 (geo->gf_len * geo->ecc_strength +
131 geo->ecc_chunkn_size * 8) * i; 131 geo->ecc_chunkn_size * 8) * i;
132 132
133 if (j < geo->ecc_chunkn_size * 8) { 133 if (j < geo->ecc_chunkn_size * 8) {
134 *chunk_num = i+1; 134 *chunk_num = i+1;
135 dev_dbg(this->dev, "Set ecc to %d and bbm in chunk %d\n", 135 dev_dbg(this->dev, "Set ecc to %d and bbm in chunk %d\n",
136 geo->ecc_strength, *chunk_num); 136 geo->ecc_strength, *chunk_num);
137 return true; 137 return true;
138 } 138 }
139 139
140 return false; 140 return false;
141 } 141 }
142 142
143 static inline int mxs_nand_calc_ecc_layout_by_info(struct bch_geometry *geo, 143 static inline int mxs_nand_calc_ecc_layout_by_info(struct bch_geometry *geo,
144 struct mtd_info *mtd, 144 struct mtd_info *mtd,
145 unsigned int ecc_strength, 145 unsigned int ecc_strength,
146 unsigned int ecc_step) 146 unsigned int ecc_step)
147 { 147 {
148 struct nand_chip *chip = mtd_to_nand(mtd); 148 struct nand_chip *chip = mtd_to_nand(mtd);
149 struct mxs_nand_info *nand_info = nand_get_controller_data(chip); 149 struct mxs_nand_info *nand_info = nand_get_controller_data(chip);
150 unsigned int block_mark_bit_offset; 150 unsigned int block_mark_bit_offset;
151 151
152 switch (ecc_step) { 152 switch (ecc_step) {
153 case SZ_512: 153 case SZ_512:
154 geo->gf_len = 13; 154 geo->gf_len = 13;
155 break; 155 break;
156 case SZ_1K: 156 case SZ_1K:
157 geo->gf_len = 14; 157 geo->gf_len = 14;
158 break; 158 break;
159 default: 159 default:
160 return -EINVAL; 160 return -EINVAL;
161 } 161 }
162 162
163 geo->ecc_chunk0_size = ecc_step; 163 geo->ecc_chunk0_size = ecc_step;
164 geo->ecc_chunkn_size = ecc_step; 164 geo->ecc_chunkn_size = ecc_step;
165 geo->ecc_strength = round_up(ecc_strength, 2); 165 geo->ecc_strength = round_up(ecc_strength, 2);
166 166
167 /* Keep the C >= O */ 167 /* Keep the C >= O */
168 if (geo->ecc_chunkn_size < mtd->oobsize) 168 if (geo->ecc_chunkn_size < mtd->oobsize)
169 return -EINVAL; 169 return -EINVAL;
170 170
171 if (geo->ecc_strength > nand_info->max_ecc_strength_supported) 171 if (geo->ecc_strength > nand_info->max_ecc_strength_supported)
172 return -EINVAL; 172 return -EINVAL;
173 173
174 geo->ecc_chunk_count = mtd->writesize / geo->ecc_chunkn_size; 174 geo->ecc_chunk_count = mtd->writesize / geo->ecc_chunkn_size;
175 175
176 /* For bit swap. */ 176 /* For bit swap. */
177 block_mark_bit_offset = mtd->writesize * 8 - 177 block_mark_bit_offset = mtd->writesize * 8 -
178 (geo->ecc_strength * geo->gf_len * (geo->ecc_chunk_count - 1) 178 (geo->ecc_strength * geo->gf_len * (geo->ecc_chunk_count - 1)
179 + MXS_NAND_METADATA_SIZE * 8); 179 + MXS_NAND_METADATA_SIZE * 8);
180 180
181 geo->block_mark_byte_offset = block_mark_bit_offset / 8; 181 geo->block_mark_byte_offset = block_mark_bit_offset / 8;
182 geo->block_mark_bit_offset = block_mark_bit_offset % 8; 182 geo->block_mark_bit_offset = block_mark_bit_offset % 8;
183 183
184 return 0; 184 return 0;
185 } 185 }
186 186
187 static inline int mxs_nand_legacy_calc_ecc_layout(struct bch_geometry *geo, 187 static inline int mxs_nand_legacy_calc_ecc_layout(struct bch_geometry *geo,
188 struct mtd_info *mtd) 188 struct mtd_info *mtd)
189 { 189 {
190 struct nand_chip *chip = mtd_to_nand(mtd); 190 struct nand_chip *chip = mtd_to_nand(mtd);
191 struct mxs_nand_info *nand_info = nand_get_controller_data(chip); 191 struct mxs_nand_info *nand_info = nand_get_controller_data(chip);
192 unsigned int block_mark_bit_offset; 192 unsigned int block_mark_bit_offset;
193 193
194 /* The default for the length of Galois Field. */ 194 /* The default for the length of Galois Field. */
195 geo->gf_len = 13; 195 geo->gf_len = 13;
196 196
197 /* The default for chunk size. */ 197 /* The default for chunk size. */
198 geo->ecc_chunk0_size = 512; 198 geo->ecc_chunk0_size = 512;
199 geo->ecc_chunkn_size = 512; 199 geo->ecc_chunkn_size = 512;
200 200
201 if (geo->ecc_chunkn_size < mtd->oobsize) { 201 if (geo->ecc_chunkn_size < mtd->oobsize) {
202 geo->gf_len = 14; 202 geo->gf_len = 14;
203 geo->ecc_chunk0_size *= 2; 203 geo->ecc_chunk0_size *= 2;
204 geo->ecc_chunkn_size *= 2; 204 geo->ecc_chunkn_size *= 2;
205 } 205 }
206 206
207 geo->ecc_chunk_count = mtd->writesize / geo->ecc_chunkn_size; 207 geo->ecc_chunk_count = mtd->writesize / geo->ecc_chunkn_size;
208 208
209 /* 209 /*
210 * Determine the ECC layout with the formula: 210 * Determine the ECC layout with the formula:
211 * ECC bits per chunk = (total page spare data bits) / 211 * ECC bits per chunk = (total page spare data bits) /
212 * (bits per ECC level) / (chunks per page) 212 * (bits per ECC level) / (chunks per page)
213 * where: 213 * where:
214 * total page spare data bits = 214 * total page spare data bits =
215 * (page oob size - meta data size) * (bits per byte) 215 * (page oob size - meta data size) * (bits per byte)
216 */ 216 */
217 geo->ecc_strength = ((mtd->oobsize - MXS_NAND_METADATA_SIZE) * 8) 217 geo->ecc_strength = ((mtd->oobsize - MXS_NAND_METADATA_SIZE) * 8)
218 / (geo->gf_len * geo->ecc_chunk_count); 218 / (geo->gf_len * geo->ecc_chunk_count);
219 219
220 geo->ecc_strength = min(round_down(geo->ecc_strength, 2), 220 geo->ecc_strength = min(round_down(geo->ecc_strength, 2),
221 nand_info->max_ecc_strength_supported); 221 nand_info->max_ecc_strength_supported);
222 222
223 block_mark_bit_offset = mtd->writesize * 8 - 223 block_mark_bit_offset = mtd->writesize * 8 -
224 (geo->ecc_strength * geo->gf_len * (geo->ecc_chunk_count - 1) 224 (geo->ecc_strength * geo->gf_len * (geo->ecc_chunk_count - 1)
225 + MXS_NAND_METADATA_SIZE * 8); 225 + MXS_NAND_METADATA_SIZE * 8);
226 226
227 geo->block_mark_byte_offset = block_mark_bit_offset / 8; 227 geo->block_mark_byte_offset = block_mark_bit_offset / 8;
228 geo->block_mark_bit_offset = block_mark_bit_offset % 8; 228 geo->block_mark_bit_offset = block_mark_bit_offset % 8;
229 229
230 return 0; 230 return 0;
231 } 231 }
232 232
233 static inline int mxs_nand_calc_ecc_for_large_oob(struct bch_geometry *geo, 233 static inline int mxs_nand_calc_ecc_for_large_oob(struct bch_geometry *geo,
234 struct mtd_info *mtd) 234 struct mtd_info *mtd)
235 { 235 {
236 struct nand_chip *chip = mtd_to_nand(mtd); 236 struct nand_chip *chip = mtd_to_nand(mtd);
237 struct mxs_nand_info *nand_info = nand_get_controller_data(chip); 237 struct mxs_nand_info *nand_info = nand_get_controller_data(chip);
238 unsigned int block_mark_bit_offset; 238 unsigned int block_mark_bit_offset;
239 unsigned int max_ecc; 239 unsigned int max_ecc;
240 unsigned int bbm_chunk; 240 unsigned int bbm_chunk;
241 unsigned int i; 241 unsigned int i;
242 242
243 /* sanity check for the minimum ecc nand required */ 243 /* sanity check for the minimum ecc nand required */
244 if (!(chip->ecc_strength_ds > 0 && chip->ecc_step_ds > 0)) 244 if (!(chip->ecc_strength_ds > 0 && chip->ecc_step_ds > 0))
245 return -EINVAL; 245 return -EINVAL;
246 geo->ecc_strength = chip->ecc_strength_ds; 246 geo->ecc_strength = chip->ecc_strength_ds;
247 247
248 /* calculate the maximum ecc platform can support*/ 248 /* calculate the maximum ecc platform can support*/
249 geo->gf_len = 14; 249 geo->gf_len = 14;
250 geo->ecc_chunk0_size = 1024; 250 geo->ecc_chunk0_size = 1024;
251 geo->ecc_chunkn_size = 1024; 251 geo->ecc_chunkn_size = 1024;
252 geo->ecc_chunk_count = mtd->writesize / geo->ecc_chunkn_size; 252 geo->ecc_chunk_count = mtd->writesize / geo->ecc_chunkn_size;
253 max_ecc = ((mtd->oobsize - MXS_NAND_METADATA_SIZE) * 8) 253 max_ecc = ((mtd->oobsize - MXS_NAND_METADATA_SIZE) * 8)
254 / (geo->gf_len * geo->ecc_chunk_count); 254 / (geo->gf_len * geo->ecc_chunk_count);
255 max_ecc = min(round_down(max_ecc, 2), 255 max_ecc = min(round_down(max_ecc, 2),
256 nand_info->max_ecc_strength_supported); 256 nand_info->max_ecc_strength_supported);
257 257
258 258
259 /* search a supported ecc strength that makes bbm */ 259 /* search a supported ecc strength that makes bbm */
260 /* located in data chunk */ 260 /* located in data chunk */
261 geo->ecc_strength = chip->ecc_strength_ds; 261 geo->ecc_strength = chip->ecc_strength_ds;
262 while (!(geo->ecc_strength > max_ecc)) { 262 while (!(geo->ecc_strength > max_ecc)) {
263 if (mxs_nand_bbm_in_data_chunk(geo, mtd, &bbm_chunk)) 263 if (mxs_nand_bbm_in_data_chunk(geo, mtd, &bbm_chunk))
264 break; 264 break;
265 geo->ecc_strength += 2; 265 geo->ecc_strength += 2;
266 } 266 }
267 267
268 /* if none of them works, keep using the minimum ecc */ 268 /* if none of them works, keep using the minimum ecc */
269 /* nand required but changing ecc page layout */ 269 /* nand required but changing ecc page layout */
270 if (geo->ecc_strength > max_ecc) { 270 if (geo->ecc_strength > max_ecc) {
271 geo->ecc_strength = chip->ecc_strength_ds; 271 geo->ecc_strength = chip->ecc_strength_ds;
272 /* add extra ecc for meta data */ 272 /* add extra ecc for meta data */
273 geo->ecc_chunk0_size = 0; 273 geo->ecc_chunk0_size = 0;
274 geo->ecc_chunk_count = (mtd->writesize / geo->ecc_chunkn_size) + 1; 274 geo->ecc_chunk_count = (mtd->writesize / geo->ecc_chunkn_size) + 1;
275 geo->ecc_for_meta = 1; 275 geo->ecc_for_meta = 1;
276 /* check if oob can afford this extra ecc chunk */ 276 /* check if oob can afford this extra ecc chunk */
277 if (mtd->oobsize * 8 < MXS_NAND_METADATA_SIZE * 8 + 277 if (mtd->oobsize * 8 < MXS_NAND_METADATA_SIZE * 8 +
278 geo->gf_len * geo->ecc_strength 278 geo->gf_len * geo->ecc_strength
279 * geo->ecc_chunk_count) { 279 * geo->ecc_chunk_count) {
280 printf("unsupported NAND chip with new layout\n"); 280 printf("unsupported NAND chip with new layout\n");
281 return -EINVAL; 281 return -EINVAL;
282 } 282 }
283 283
284 /* calculate in which chunk bbm located */ 284 /* calculate in which chunk bbm located */
285 bbm_chunk = (mtd->writesize * 8 - MXS_NAND_METADATA_SIZE * 8 - 285 bbm_chunk = (mtd->writesize * 8 - MXS_NAND_METADATA_SIZE * 8 -
286 geo->gf_len * geo->ecc_strength) / 286 geo->gf_len * geo->ecc_strength) /
287 (geo->gf_len * geo->ecc_strength + 287 (geo->gf_len * geo->ecc_strength +
288 geo->ecc_chunkn_size * 8) + 1; 288 geo->ecc_chunkn_size * 8) + 1;
289 } 289 }
290 290
291 /* calculate the number of ecc chunk behind the bbm */ 291 /* calculate the number of ecc chunk behind the bbm */
292 i = (mtd->writesize / geo->ecc_chunkn_size) - bbm_chunk + 1; 292 i = (mtd->writesize / geo->ecc_chunkn_size) - bbm_chunk + 1;
293 293
294 block_mark_bit_offset = mtd->writesize * 8 - 294 block_mark_bit_offset = mtd->writesize * 8 -
295 (geo->ecc_strength * geo->gf_len * (geo->ecc_chunk_count - i) 295 (geo->ecc_strength * geo->gf_len * (geo->ecc_chunk_count - i)
296 + MXS_NAND_METADATA_SIZE * 8); 296 + MXS_NAND_METADATA_SIZE * 8);
297 297
298 geo->block_mark_byte_offset = block_mark_bit_offset / 8; 298 geo->block_mark_byte_offset = block_mark_bit_offset / 8;
299 geo->block_mark_bit_offset = block_mark_bit_offset % 8; 299 geo->block_mark_bit_offset = block_mark_bit_offset % 8;
300 300
301 return 0; 301 return 0;
302 } 302 }
303 303
304 /* 304 /*
305 * Wait for BCH complete IRQ and clear the IRQ 305 * Wait for BCH complete IRQ and clear the IRQ
306 */ 306 */
307 static int mxs_nand_wait_for_bch_complete(struct mxs_nand_info *nand_info) 307 static int mxs_nand_wait_for_bch_complete(struct mxs_nand_info *nand_info)
308 { 308 {
309 int timeout = MXS_NAND_BCH_TIMEOUT; 309 int timeout = MXS_NAND_BCH_TIMEOUT;
310 int ret; 310 int ret;
311 311
312 ret = mxs_wait_mask_set(&nand_info->bch_regs->hw_bch_ctrl_reg, 312 ret = mxs_wait_mask_set(&nand_info->bch_regs->hw_bch_ctrl_reg,
313 BCH_CTRL_COMPLETE_IRQ, timeout); 313 BCH_CTRL_COMPLETE_IRQ, timeout);
314 314
315 writel(BCH_CTRL_COMPLETE_IRQ, &nand_info->bch_regs->hw_bch_ctrl_clr); 315 writel(BCH_CTRL_COMPLETE_IRQ, &nand_info->bch_regs->hw_bch_ctrl_clr);
316 316
317 return ret; 317 return ret;
318 } 318 }
319 319
320 /* 320 /*
321 * This is the function that we install in the cmd_ctrl function pointer of the 321 * This is the function that we install in the cmd_ctrl function pointer of the
322 * owning struct nand_chip. The only functions in the reference implementation 322 * owning struct nand_chip. The only functions in the reference implementation
323 * that use these functions pointers are cmdfunc and select_chip. 323 * that use these functions pointers are cmdfunc and select_chip.
324 * 324 *
325 * In this driver, we implement our own select_chip, so this function will only 325 * In this driver, we implement our own select_chip, so this function will only
326 * be called by the reference implementation's cmdfunc. For this reason, we can 326 * be called by the reference implementation's cmdfunc. For this reason, we can
327 * ignore the chip enable bit and concentrate only on sending bytes to the NAND 327 * ignore the chip enable bit and concentrate only on sending bytes to the NAND
328 * Flash. 328 * Flash.
329 */ 329 */
330 static void mxs_nand_cmd_ctrl(struct mtd_info *mtd, int data, unsigned int ctrl) 330 static void mxs_nand_cmd_ctrl(struct mtd_info *mtd, int data, unsigned int ctrl)
331 { 331 {
332 struct nand_chip *nand = mtd_to_nand(mtd); 332 struct nand_chip *nand = mtd_to_nand(mtd);
333 struct mxs_nand_info *nand_info = nand_get_controller_data(nand); 333 struct mxs_nand_info *nand_info = nand_get_controller_data(nand);
334 struct mxs_dma_desc *d; 334 struct mxs_dma_desc *d;
335 uint32_t channel = MXS_DMA_CHANNEL_AHB_APBH_GPMI0 + nand_info->cur_chip; 335 uint32_t channel = MXS_DMA_CHANNEL_AHB_APBH_GPMI0 + nand_info->cur_chip;
336 int ret; 336 int ret;
337 337
338 /* 338 /*
339 * If this condition is true, something is _VERY_ wrong in MTD 339 * If this condition is true, something is _VERY_ wrong in MTD
340 * subsystem! 340 * subsystem!
341 */ 341 */
342 if (nand_info->cmd_queue_len == MXS_NAND_COMMAND_BUFFER_SIZE) { 342 if (nand_info->cmd_queue_len == MXS_NAND_COMMAND_BUFFER_SIZE) {
343 printf("MXS NAND: Command queue too long\n"); 343 printf("MXS NAND: Command queue too long\n");
344 return; 344 return;
345 } 345 }
346 346
347 /* 347 /*
348 * Every operation begins with a command byte and a series of zero or 348 * Every operation begins with a command byte and a series of zero or
349 * more address bytes. These are distinguished by either the Address 349 * more address bytes. These are distinguished by either the Address
350 * Latch Enable (ALE) or Command Latch Enable (CLE) signals being 350 * Latch Enable (ALE) or Command Latch Enable (CLE) signals being
351 * asserted. When MTD is ready to execute the command, it will 351 * asserted. When MTD is ready to execute the command, it will
352 * deasert both latch enables. 352 * deasert both latch enables.
353 * 353 *
354 * Rather than run a separate DMA operation for every single byte, we 354 * Rather than run a separate DMA operation for every single byte, we
355 * queue them up and run a single DMA operation for the entire series 355 * queue them up and run a single DMA operation for the entire series
356 * of command and data bytes. 356 * of command and data bytes.
357 */ 357 */
358 if (ctrl & (NAND_ALE | NAND_CLE)) { 358 if (ctrl & (NAND_ALE | NAND_CLE)) {
359 if (data != NAND_CMD_NONE) 359 if (data != NAND_CMD_NONE)
360 nand_info->cmd_buf[nand_info->cmd_queue_len++] = data; 360 nand_info->cmd_buf[nand_info->cmd_queue_len++] = data;
361 return; 361 return;
362 } 362 }
363 363
364 /* 364 /*
365 * If control arrives here, MTD has deasserted both the ALE and CLE, 365 * If control arrives here, MTD has deasserted both the ALE and CLE,
366 * which means it's ready to run an operation. Check if we have any 366 * which means it's ready to run an operation. Check if we have any
367 * bytes to send. 367 * bytes to send.
368 */ 368 */
369 if (nand_info->cmd_queue_len == 0) 369 if (nand_info->cmd_queue_len == 0)
370 return; 370 return;
371 371
372 /* Compile the DMA descriptor -- a descriptor that sends command. */ 372 /* Compile the DMA descriptor -- a descriptor that sends command. */
373 d = mxs_nand_get_dma_desc(nand_info); 373 d = mxs_nand_get_dma_desc(nand_info);
374 d->cmd.data = 374 d->cmd.data =
375 MXS_DMA_DESC_COMMAND_DMA_READ | MXS_DMA_DESC_IRQ | 375 MXS_DMA_DESC_COMMAND_DMA_READ | MXS_DMA_DESC_IRQ |
376 MXS_DMA_DESC_CHAIN | MXS_DMA_DESC_DEC_SEM | 376 MXS_DMA_DESC_CHAIN | MXS_DMA_DESC_DEC_SEM |
377 MXS_DMA_DESC_WAIT4END | (3 << MXS_DMA_DESC_PIO_WORDS_OFFSET) | 377 MXS_DMA_DESC_WAIT4END | (3 << MXS_DMA_DESC_PIO_WORDS_OFFSET) |
378 (nand_info->cmd_queue_len << MXS_DMA_DESC_BYTES_OFFSET); 378 (nand_info->cmd_queue_len << MXS_DMA_DESC_BYTES_OFFSET);
379 379
380 d->cmd.address = (dma_addr_t)nand_info->cmd_buf; 380 d->cmd.address = (dma_addr_t)nand_info->cmd_buf;
381 381
382 d->cmd.pio_words[0] = 382 d->cmd.pio_words[0] =
383 GPMI_CTRL0_COMMAND_MODE_WRITE | 383 GPMI_CTRL0_COMMAND_MODE_WRITE |
384 GPMI_CTRL0_WORD_LENGTH | 384 GPMI_CTRL0_WORD_LENGTH |
385 (nand_info->cur_chip << GPMI_CTRL0_CS_OFFSET) | 385 (nand_info->cur_chip << GPMI_CTRL0_CS_OFFSET) |
386 GPMI_CTRL0_ADDRESS_NAND_CLE | 386 GPMI_CTRL0_ADDRESS_NAND_CLE |
387 GPMI_CTRL0_ADDRESS_INCREMENT | 387 GPMI_CTRL0_ADDRESS_INCREMENT |
388 nand_info->cmd_queue_len; 388 nand_info->cmd_queue_len;
389 389
390 mxs_dma_desc_append(channel, d); 390 mxs_dma_desc_append(channel, d);
391 391
392 /* Flush caches */ 392 /* Flush caches */
393 mxs_nand_flush_cmd_buf(nand_info); 393 mxs_nand_flush_cmd_buf(nand_info);
394 394
395 /* Execute the DMA chain. */ 395 /* Execute the DMA chain. */
396 ret = mxs_dma_go(channel); 396 ret = mxs_dma_go(channel);
397 if (ret) 397 if (ret)
398 printf("MXS NAND: Error sending command\n"); 398 printf("MXS NAND: Error sending command\n");
399 399
400 mxs_nand_return_dma_descs(nand_info); 400 mxs_nand_return_dma_descs(nand_info);
401 401
402 /* Reset the command queue. */ 402 /* Reset the command queue. */
403 nand_info->cmd_queue_len = 0; 403 nand_info->cmd_queue_len = 0;
404 } 404 }
405 405
406 /* 406 /*
407 * Test if the NAND flash is ready. 407 * Test if the NAND flash is ready.
408 */ 408 */
409 static int mxs_nand_device_ready(struct mtd_info *mtd) 409 static int mxs_nand_device_ready(struct mtd_info *mtd)
410 { 410 {
411 struct nand_chip *chip = mtd_to_nand(mtd); 411 struct nand_chip *chip = mtd_to_nand(mtd);
412 struct mxs_nand_info *nand_info = nand_get_controller_data(chip); 412 struct mxs_nand_info *nand_info = nand_get_controller_data(chip);
413 uint32_t tmp; 413 uint32_t tmp;
414 414
415 tmp = readl(&nand_info->gpmi_regs->hw_gpmi_stat); 415 tmp = readl(&nand_info->gpmi_regs->hw_gpmi_stat);
416 tmp >>= (GPMI_STAT_READY_BUSY_OFFSET + nand_info->cur_chip); 416 tmp >>= (GPMI_STAT_READY_BUSY_OFFSET + nand_info->cur_chip);
417 417
418 return tmp & 1; 418 return tmp & 1;
419 } 419 }
420 420
421 /* 421 /*
422 * Select the NAND chip. 422 * Select the NAND chip.
423 */ 423 */
424 static void mxs_nand_select_chip(struct mtd_info *mtd, int chip) 424 static void mxs_nand_select_chip(struct mtd_info *mtd, int chip)
425 { 425 {
426 struct nand_chip *nand = mtd_to_nand(mtd); 426 struct nand_chip *nand = mtd_to_nand(mtd);
427 struct mxs_nand_info *nand_info = nand_get_controller_data(nand); 427 struct mxs_nand_info *nand_info = nand_get_controller_data(nand);
428 428
429 nand_info->cur_chip = chip; 429 nand_info->cur_chip = chip;
430 } 430 }
431 431
432 /* 432 /*
433 * Handle block mark swapping. 433 * Handle block mark swapping.
434 * 434 *
435 * Note that, when this function is called, it doesn't know whether it's 435 * Note that, when this function is called, it doesn't know whether it's
436 * swapping the block mark, or swapping it *back* -- but it doesn't matter 436 * swapping the block mark, or swapping it *back* -- but it doesn't matter
437 * because the the operation is the same. 437 * because the the operation is the same.
438 */ 438 */
439 static void mxs_nand_swap_block_mark(struct bch_geometry *geo, 439 static void mxs_nand_swap_block_mark(struct bch_geometry *geo,
440 uint8_t *data_buf, uint8_t *oob_buf) 440 uint8_t *data_buf, uint8_t *oob_buf)
441 { 441 {
442 uint32_t bit_offset = geo->block_mark_bit_offset; 442 uint32_t bit_offset = geo->block_mark_bit_offset;
443 uint32_t buf_offset = geo->block_mark_byte_offset; 443 uint32_t buf_offset = geo->block_mark_byte_offset;
444 444
445 uint32_t src; 445 uint32_t src;
446 uint32_t dst; 446 uint32_t dst;
447 447
448 /* 448 /*
449 * Get the byte from the data area that overlays the block mark. Since 449 * Get the byte from the data area that overlays the block mark. Since
450 * the ECC engine applies its own view to the bits in the page, the 450 * the ECC engine applies its own view to the bits in the page, the
451 * physical block mark won't (in general) appear on a byte boundary in 451 * physical block mark won't (in general) appear on a byte boundary in
452 * the data. 452 * the data.
453 */ 453 */
454 src = data_buf[buf_offset] >> bit_offset; 454 src = data_buf[buf_offset] >> bit_offset;
455 src |= data_buf[buf_offset + 1] << (8 - bit_offset); 455 src |= data_buf[buf_offset + 1] << (8 - bit_offset);
456 456
457 dst = oob_buf[0]; 457 dst = oob_buf[0];
458 458
459 oob_buf[0] = src; 459 oob_buf[0] = src;
460 460
461 data_buf[buf_offset] &= ~(0xff << bit_offset); 461 data_buf[buf_offset] &= ~(0xff << bit_offset);
462 data_buf[buf_offset + 1] &= 0xff << bit_offset; 462 data_buf[buf_offset + 1] &= 0xff << bit_offset;
463 463
464 data_buf[buf_offset] |= dst << bit_offset; 464 data_buf[buf_offset] |= dst << bit_offset;
465 data_buf[buf_offset + 1] |= dst >> (8 - bit_offset); 465 data_buf[buf_offset + 1] |= dst >> (8 - bit_offset);
466 } 466 }
467 467
468 /* 468 /*
469 * Read data from NAND. 469 * Read data from NAND.
470 */ 470 */
471 static void mxs_nand_read_buf(struct mtd_info *mtd, uint8_t *buf, int length) 471 static void mxs_nand_read_buf(struct mtd_info *mtd, uint8_t *buf, int length)
472 { 472 {
473 struct nand_chip *nand = mtd_to_nand(mtd); 473 struct nand_chip *nand = mtd_to_nand(mtd);
474 struct mxs_nand_info *nand_info = nand_get_controller_data(nand); 474 struct mxs_nand_info *nand_info = nand_get_controller_data(nand);
475 struct mxs_dma_desc *d; 475 struct mxs_dma_desc *d;
476 uint32_t channel = MXS_DMA_CHANNEL_AHB_APBH_GPMI0 + nand_info->cur_chip; 476 uint32_t channel = MXS_DMA_CHANNEL_AHB_APBH_GPMI0 + nand_info->cur_chip;
477 int ret; 477 int ret;
478 478
479 if (length > NAND_MAX_PAGESIZE) { 479 if (length > NAND_MAX_PAGESIZE) {
480 printf("MXS NAND: DMA buffer too big\n"); 480 printf("MXS NAND: DMA buffer too big\n");
481 return; 481 return;
482 } 482 }
483 483
484 if (!buf) { 484 if (!buf) {
485 printf("MXS NAND: DMA buffer is NULL\n"); 485 printf("MXS NAND: DMA buffer is NULL\n");
486 return; 486 return;
487 } 487 }
488 488
489 /* Compile the DMA descriptor - a descriptor that reads data. */ 489 /* Compile the DMA descriptor - a descriptor that reads data. */
490 d = mxs_nand_get_dma_desc(nand_info); 490 d = mxs_nand_get_dma_desc(nand_info);
491 d->cmd.data = 491 d->cmd.data =
492 MXS_DMA_DESC_COMMAND_DMA_WRITE | MXS_DMA_DESC_IRQ | 492 MXS_DMA_DESC_COMMAND_DMA_WRITE | MXS_DMA_DESC_IRQ |
493 MXS_DMA_DESC_DEC_SEM | MXS_DMA_DESC_WAIT4END | 493 MXS_DMA_DESC_DEC_SEM | MXS_DMA_DESC_WAIT4END |
494 (1 << MXS_DMA_DESC_PIO_WORDS_OFFSET) | 494 (1 << MXS_DMA_DESC_PIO_WORDS_OFFSET) |
495 (length << MXS_DMA_DESC_BYTES_OFFSET); 495 (length << MXS_DMA_DESC_BYTES_OFFSET);
496 496
497 d->cmd.address = (dma_addr_t)nand_info->data_buf; 497 d->cmd.address = (dma_addr_t)nand_info->data_buf;
498 498
499 d->cmd.pio_words[0] = 499 d->cmd.pio_words[0] =
500 GPMI_CTRL0_COMMAND_MODE_READ | 500 GPMI_CTRL0_COMMAND_MODE_READ |
501 GPMI_CTRL0_WORD_LENGTH | 501 GPMI_CTRL0_WORD_LENGTH |
502 (nand_info->cur_chip << GPMI_CTRL0_CS_OFFSET) | 502 (nand_info->cur_chip << GPMI_CTRL0_CS_OFFSET) |
503 GPMI_CTRL0_ADDRESS_NAND_DATA | 503 GPMI_CTRL0_ADDRESS_NAND_DATA |
504 length; 504 length;
505 505
506 mxs_dma_desc_append(channel, d); 506 mxs_dma_desc_append(channel, d);
507 507
508 /* 508 /*
509 * A DMA descriptor that waits for the command to end and the chip to 509 * A DMA descriptor that waits for the command to end and the chip to
510 * become ready. 510 * become ready.
511 * 511 *
512 * I think we actually should *not* be waiting for the chip to become 512 * I think we actually should *not* be waiting for the chip to become
513 * ready because, after all, we don't care. I think the original code 513 * ready because, after all, we don't care. I think the original code
514 * did that and no one has re-thought it yet. 514 * did that and no one has re-thought it yet.
515 */ 515 */
516 d = mxs_nand_get_dma_desc(nand_info); 516 d = mxs_nand_get_dma_desc(nand_info);
517 d->cmd.data = 517 d->cmd.data =
518 MXS_DMA_DESC_COMMAND_NO_DMAXFER | MXS_DMA_DESC_IRQ | 518 MXS_DMA_DESC_COMMAND_NO_DMAXFER | MXS_DMA_DESC_IRQ |
519 MXS_DMA_DESC_NAND_WAIT_4_READY | MXS_DMA_DESC_DEC_SEM | 519 MXS_DMA_DESC_NAND_WAIT_4_READY | MXS_DMA_DESC_DEC_SEM |
520 MXS_DMA_DESC_WAIT4END | (1 << MXS_DMA_DESC_PIO_WORDS_OFFSET); 520 MXS_DMA_DESC_WAIT4END | (1 << MXS_DMA_DESC_PIO_WORDS_OFFSET);
521 521
522 d->cmd.address = 0; 522 d->cmd.address = 0;
523 523
524 d->cmd.pio_words[0] = 524 d->cmd.pio_words[0] =
525 GPMI_CTRL0_COMMAND_MODE_WAIT_FOR_READY | 525 GPMI_CTRL0_COMMAND_MODE_WAIT_FOR_READY |
526 GPMI_CTRL0_WORD_LENGTH | 526 GPMI_CTRL0_WORD_LENGTH |
527 (nand_info->cur_chip << GPMI_CTRL0_CS_OFFSET) | 527 (nand_info->cur_chip << GPMI_CTRL0_CS_OFFSET) |
528 GPMI_CTRL0_ADDRESS_NAND_DATA; 528 GPMI_CTRL0_ADDRESS_NAND_DATA;
529 529
530 mxs_dma_desc_append(channel, d); 530 mxs_dma_desc_append(channel, d);
531 531
532 /* Invalidate caches */ 532 /* Invalidate caches */
533 mxs_nand_inval_data_buf(nand_info); 533 mxs_nand_inval_data_buf(nand_info);
534 534
535 /* Execute the DMA chain. */ 535 /* Execute the DMA chain. */
536 ret = mxs_dma_go(channel); 536 ret = mxs_dma_go(channel);
537 if (ret) { 537 if (ret) {
538 printf("MXS NAND: DMA read error\n"); 538 printf("MXS NAND: DMA read error\n");
539 goto rtn; 539 goto rtn;
540 } 540 }
541 541
542 /* Invalidate caches */ 542 /* Invalidate caches */
543 mxs_nand_inval_data_buf(nand_info); 543 mxs_nand_inval_data_buf(nand_info);
544 544
545 memcpy(buf, nand_info->data_buf, length); 545 memcpy(buf, nand_info->data_buf, length);
546 546
547 rtn: 547 rtn:
548 mxs_nand_return_dma_descs(nand_info); 548 mxs_nand_return_dma_descs(nand_info);
549 } 549 }
550 550
551 /* 551 /*
552 * Write data to NAND. 552 * Write data to NAND.
553 */ 553 */
554 static void mxs_nand_write_buf(struct mtd_info *mtd, const uint8_t *buf, 554 static void mxs_nand_write_buf(struct mtd_info *mtd, const uint8_t *buf,
555 int length) 555 int length)
556 { 556 {
557 struct nand_chip *nand = mtd_to_nand(mtd); 557 struct nand_chip *nand = mtd_to_nand(mtd);
558 struct mxs_nand_info *nand_info = nand_get_controller_data(nand); 558 struct mxs_nand_info *nand_info = nand_get_controller_data(nand);
559 struct mxs_dma_desc *d; 559 struct mxs_dma_desc *d;
560 uint32_t channel = MXS_DMA_CHANNEL_AHB_APBH_GPMI0 + nand_info->cur_chip; 560 uint32_t channel = MXS_DMA_CHANNEL_AHB_APBH_GPMI0 + nand_info->cur_chip;
561 int ret; 561 int ret;
562 562
563 if (length > NAND_MAX_PAGESIZE) { 563 if (length > NAND_MAX_PAGESIZE) {
564 printf("MXS NAND: DMA buffer too big\n"); 564 printf("MXS NAND: DMA buffer too big\n");
565 return; 565 return;
566 } 566 }
567 567
568 if (!buf) { 568 if (!buf) {
569 printf("MXS NAND: DMA buffer is NULL\n"); 569 printf("MXS NAND: DMA buffer is NULL\n");
570 return; 570 return;
571 } 571 }
572 572
573 memcpy(nand_info->data_buf, buf, length); 573 memcpy(nand_info->data_buf, buf, length);
574 574
575 /* Compile the DMA descriptor - a descriptor that writes data. */ 575 /* Compile the DMA descriptor - a descriptor that writes data. */
576 d = mxs_nand_get_dma_desc(nand_info); 576 d = mxs_nand_get_dma_desc(nand_info);
577 d->cmd.data = 577 d->cmd.data =
578 MXS_DMA_DESC_COMMAND_DMA_READ | MXS_DMA_DESC_IRQ | 578 MXS_DMA_DESC_COMMAND_DMA_READ | MXS_DMA_DESC_IRQ |
579 MXS_DMA_DESC_DEC_SEM | MXS_DMA_DESC_WAIT4END | 579 MXS_DMA_DESC_DEC_SEM | MXS_DMA_DESC_WAIT4END |
580 (1 << MXS_DMA_DESC_PIO_WORDS_OFFSET) | 580 (1 << MXS_DMA_DESC_PIO_WORDS_OFFSET) |
581 (length << MXS_DMA_DESC_BYTES_OFFSET); 581 (length << MXS_DMA_DESC_BYTES_OFFSET);
582 582
583 d->cmd.address = (dma_addr_t)nand_info->data_buf; 583 d->cmd.address = (dma_addr_t)nand_info->data_buf;
584 584
585 d->cmd.pio_words[0] = 585 d->cmd.pio_words[0] =
586 GPMI_CTRL0_COMMAND_MODE_WRITE | 586 GPMI_CTRL0_COMMAND_MODE_WRITE |
587 GPMI_CTRL0_WORD_LENGTH | 587 GPMI_CTRL0_WORD_LENGTH |
588 (nand_info->cur_chip << GPMI_CTRL0_CS_OFFSET) | 588 (nand_info->cur_chip << GPMI_CTRL0_CS_OFFSET) |
589 GPMI_CTRL0_ADDRESS_NAND_DATA | 589 GPMI_CTRL0_ADDRESS_NAND_DATA |
590 length; 590 length;
591 591
592 mxs_dma_desc_append(channel, d); 592 mxs_dma_desc_append(channel, d);
593 593
594 /* Flush caches */ 594 /* Flush caches */
595 mxs_nand_flush_data_buf(nand_info); 595 mxs_nand_flush_data_buf(nand_info);
596 596
597 /* Execute the DMA chain. */ 597 /* Execute the DMA chain. */
598 ret = mxs_dma_go(channel); 598 ret = mxs_dma_go(channel);
599 if (ret) 599 if (ret)
600 printf("MXS NAND: DMA write error\n"); 600 printf("MXS NAND: DMA write error\n");
601 601
602 mxs_nand_return_dma_descs(nand_info); 602 mxs_nand_return_dma_descs(nand_info);
603 } 603 }
604 604
605 /* 605 /*
606 * Read a single byte from NAND. 606 * Read a single byte from NAND.
607 */ 607 */
608 static uint8_t mxs_nand_read_byte(struct mtd_info *mtd) 608 static uint8_t mxs_nand_read_byte(struct mtd_info *mtd)
609 { 609 {
610 uint8_t buf; 610 uint8_t buf;
611 mxs_nand_read_buf(mtd, &buf, 1); 611 mxs_nand_read_buf(mtd, &buf, 1);
612 return buf; 612 return buf;
613 } 613 }
614 614
615 static bool mxs_nand_erased_page(struct mtd_info *mtd, struct nand_chip *nand,
616 uint8_t *buf, int chunk, int page)
617 {
618 struct mxs_nand_info *nand_info = nand_get_controller_data(nand);
619 struct bch_geometry *geo = &nand_info->bch_geometry;
620 unsigned int flip_bits = 0, flip_bits_noecc = 0;
621 unsigned int threshold;
622 unsigned int base = geo->ecc_chunkn_size * chunk;
623 uint32_t *dma_buf = (uint32_t *)buf;
624 int i;
625
626 threshold = geo->gf_len / 2;
627 if (threshold > geo->ecc_strength)
628 threshold = geo->ecc_strength;
629
630 for (i = 0; i < geo->ecc_chunkn_size; i++) {
631 flip_bits += hweight8(~buf[base + i]);
632 if (flip_bits > threshold)
633 return false;
634 }
635
636 nand->cmdfunc(mtd, NAND_CMD_READ0, 0, page);
637 nand->read_buf(mtd, buf, mtd->writesize);
638
639 for (i = 0; i < mtd->writesize / 4; i++) {
640 flip_bits_noecc += hweight32(~dma_buf[i]);
641 if (flip_bits_noecc > threshold)
642 return false;
643 }
644
645 mtd->ecc_stats.corrected += flip_bits;
646
647 memset(buf, 0xff, mtd->writesize);
648
649 printf("The page(%d) is an erased page(%d,%d,%d,%d).\n", page, chunk, threshold, flip_bits, flip_bits_noecc);
650
651 return true;
652 }
653
615 /* 654 /*
616 * Read a page from NAND. 655 * Read a page from NAND.
617 */ 656 */
618 static int mxs_nand_ecc_read_page(struct mtd_info *mtd, struct nand_chip *nand, 657 static int mxs_nand_ecc_read_page(struct mtd_info *mtd, struct nand_chip *nand,
619 uint8_t *buf, int oob_required, 658 uint8_t *buf, int oob_required,
620 int page) 659 int page)
621 { 660 {
622 struct mxs_nand_info *nand_info = nand_get_controller_data(nand); 661 struct mxs_nand_info *nand_info = nand_get_controller_data(nand);
623 struct bch_geometry *geo = &nand_info->bch_geometry; 662 struct bch_geometry *geo = &nand_info->bch_geometry;
624 struct mxs_dma_desc *d; 663 struct mxs_dma_desc *d;
625 uint32_t channel = MXS_DMA_CHANNEL_AHB_APBH_GPMI0 + nand_info->cur_chip; 664 uint32_t channel = MXS_DMA_CHANNEL_AHB_APBH_GPMI0 + nand_info->cur_chip;
626 uint32_t corrected = 0, failed = 0; 665 uint32_t corrected = 0, failed = 0;
627 uint8_t *status; 666 uint8_t *status;
628 int i, ret; 667 int i, ret;
629 668
630 /* Compile the DMA descriptor - wait for ready. */ 669 /* Compile the DMA descriptor - wait for ready. */
631 d = mxs_nand_get_dma_desc(nand_info); 670 d = mxs_nand_get_dma_desc(nand_info);
632 d->cmd.data = 671 d->cmd.data =
633 MXS_DMA_DESC_COMMAND_NO_DMAXFER | MXS_DMA_DESC_CHAIN | 672 MXS_DMA_DESC_COMMAND_NO_DMAXFER | MXS_DMA_DESC_CHAIN |
634 MXS_DMA_DESC_NAND_WAIT_4_READY | MXS_DMA_DESC_WAIT4END | 673 MXS_DMA_DESC_NAND_WAIT_4_READY | MXS_DMA_DESC_WAIT4END |
635 (1 << MXS_DMA_DESC_PIO_WORDS_OFFSET); 674 (1 << MXS_DMA_DESC_PIO_WORDS_OFFSET);
636 675
637 d->cmd.address = 0; 676 d->cmd.address = 0;
638 677
639 d->cmd.pio_words[0] = 678 d->cmd.pio_words[0] =
640 GPMI_CTRL0_COMMAND_MODE_WAIT_FOR_READY | 679 GPMI_CTRL0_COMMAND_MODE_WAIT_FOR_READY |
641 GPMI_CTRL0_WORD_LENGTH | 680 GPMI_CTRL0_WORD_LENGTH |
642 (nand_info->cur_chip << GPMI_CTRL0_CS_OFFSET) | 681 (nand_info->cur_chip << GPMI_CTRL0_CS_OFFSET) |
643 GPMI_CTRL0_ADDRESS_NAND_DATA; 682 GPMI_CTRL0_ADDRESS_NAND_DATA;
644 683
645 mxs_dma_desc_append(channel, d); 684 mxs_dma_desc_append(channel, d);
646 685
647 /* Compile the DMA descriptor - enable the BCH block and read. */ 686 /* Compile the DMA descriptor - enable the BCH block and read. */
648 d = mxs_nand_get_dma_desc(nand_info); 687 d = mxs_nand_get_dma_desc(nand_info);
649 d->cmd.data = 688 d->cmd.data =
650 MXS_DMA_DESC_COMMAND_NO_DMAXFER | MXS_DMA_DESC_CHAIN | 689 MXS_DMA_DESC_COMMAND_NO_DMAXFER | MXS_DMA_DESC_CHAIN |
651 MXS_DMA_DESC_WAIT4END | (6 << MXS_DMA_DESC_PIO_WORDS_OFFSET); 690 MXS_DMA_DESC_WAIT4END | (6 << MXS_DMA_DESC_PIO_WORDS_OFFSET);
652 691
653 d->cmd.address = 0; 692 d->cmd.address = 0;
654 693
655 d->cmd.pio_words[0] = 694 d->cmd.pio_words[0] =
656 GPMI_CTRL0_COMMAND_MODE_READ | 695 GPMI_CTRL0_COMMAND_MODE_READ |
657 GPMI_CTRL0_WORD_LENGTH | 696 GPMI_CTRL0_WORD_LENGTH |
658 (nand_info->cur_chip << GPMI_CTRL0_CS_OFFSET) | 697 (nand_info->cur_chip << GPMI_CTRL0_CS_OFFSET) |
659 GPMI_CTRL0_ADDRESS_NAND_DATA | 698 GPMI_CTRL0_ADDRESS_NAND_DATA |
660 (mtd->writesize + mtd->oobsize); 699 (mtd->writesize + mtd->oobsize);
661 d->cmd.pio_words[1] = 0; 700 d->cmd.pio_words[1] = 0;
662 d->cmd.pio_words[2] = 701 d->cmd.pio_words[2] =
663 GPMI_ECCCTRL_ENABLE_ECC | 702 GPMI_ECCCTRL_ENABLE_ECC |
664 GPMI_ECCCTRL_ECC_CMD_DECODE | 703 GPMI_ECCCTRL_ECC_CMD_DECODE |
665 GPMI_ECCCTRL_BUFFER_MASK_BCH_PAGE; 704 GPMI_ECCCTRL_BUFFER_MASK_BCH_PAGE;
666 d->cmd.pio_words[3] = mtd->writesize + mtd->oobsize; 705 d->cmd.pio_words[3] = mtd->writesize + mtd->oobsize;
667 d->cmd.pio_words[4] = (dma_addr_t)nand_info->data_buf; 706 d->cmd.pio_words[4] = (dma_addr_t)nand_info->data_buf;
668 d->cmd.pio_words[5] = (dma_addr_t)nand_info->oob_buf; 707 d->cmd.pio_words[5] = (dma_addr_t)nand_info->oob_buf;
669 708
670 mxs_dma_desc_append(channel, d); 709 mxs_dma_desc_append(channel, d);
671 710
672 /* Compile the DMA descriptor - disable the BCH block. */ 711 /* Compile the DMA descriptor - disable the BCH block. */
673 d = mxs_nand_get_dma_desc(nand_info); 712 d = mxs_nand_get_dma_desc(nand_info);
674 d->cmd.data = 713 d->cmd.data =
675 MXS_DMA_DESC_COMMAND_NO_DMAXFER | MXS_DMA_DESC_CHAIN | 714 MXS_DMA_DESC_COMMAND_NO_DMAXFER | MXS_DMA_DESC_CHAIN |
676 MXS_DMA_DESC_NAND_WAIT_4_READY | MXS_DMA_DESC_WAIT4END | 715 MXS_DMA_DESC_NAND_WAIT_4_READY | MXS_DMA_DESC_WAIT4END |
677 (3 << MXS_DMA_DESC_PIO_WORDS_OFFSET); 716 (3 << MXS_DMA_DESC_PIO_WORDS_OFFSET);
678 717
679 d->cmd.address = 0; 718 d->cmd.address = 0;
680 719
681 d->cmd.pio_words[0] = 720 d->cmd.pio_words[0] =
682 GPMI_CTRL0_COMMAND_MODE_WAIT_FOR_READY | 721 GPMI_CTRL0_COMMAND_MODE_WAIT_FOR_READY |
683 GPMI_CTRL0_WORD_LENGTH | 722 GPMI_CTRL0_WORD_LENGTH |
684 (nand_info->cur_chip << GPMI_CTRL0_CS_OFFSET) | 723 (nand_info->cur_chip << GPMI_CTRL0_CS_OFFSET) |
685 GPMI_CTRL0_ADDRESS_NAND_DATA | 724 GPMI_CTRL0_ADDRESS_NAND_DATA |
686 (mtd->writesize + mtd->oobsize); 725 (mtd->writesize + mtd->oobsize);
687 d->cmd.pio_words[1] = 0; 726 d->cmd.pio_words[1] = 0;
688 d->cmd.pio_words[2] = 0; 727 d->cmd.pio_words[2] = 0;
689 728
690 mxs_dma_desc_append(channel, d); 729 mxs_dma_desc_append(channel, d);
691 730
692 /* Compile the DMA descriptor - deassert the NAND lock and interrupt. */ 731 /* Compile the DMA descriptor - deassert the NAND lock and interrupt. */
693 d = mxs_nand_get_dma_desc(nand_info); 732 d = mxs_nand_get_dma_desc(nand_info);
694 d->cmd.data = 733 d->cmd.data =
695 MXS_DMA_DESC_COMMAND_NO_DMAXFER | MXS_DMA_DESC_IRQ | 734 MXS_DMA_DESC_COMMAND_NO_DMAXFER | MXS_DMA_DESC_IRQ |
696 MXS_DMA_DESC_DEC_SEM; 735 MXS_DMA_DESC_DEC_SEM;
697 736
698 d->cmd.address = 0; 737 d->cmd.address = 0;
699 738
700 mxs_dma_desc_append(channel, d); 739 mxs_dma_desc_append(channel, d);
701 740
702 /* Invalidate caches */ 741 /* Invalidate caches */
703 mxs_nand_inval_data_buf(nand_info); 742 mxs_nand_inval_data_buf(nand_info);
704 743
705 /* Execute the DMA chain. */ 744 /* Execute the DMA chain. */
706 ret = mxs_dma_go(channel); 745 ret = mxs_dma_go(channel);
707 if (ret) { 746 if (ret) {
708 printf("MXS NAND: DMA read error\n"); 747 printf("MXS NAND: DMA read error\n");
709 goto rtn; 748 goto rtn;
710 } 749 }
711 750
712 ret = mxs_nand_wait_for_bch_complete(nand_info); 751 ret = mxs_nand_wait_for_bch_complete(nand_info);
713 if (ret) { 752 if (ret) {
714 printf("MXS NAND: BCH read timeout\n"); 753 printf("MXS NAND: BCH read timeout\n");
715 goto rtn; 754 goto rtn;
716 } 755 }
717 756
757 mxs_nand_return_dma_descs(nand_info);
758
718 /* Invalidate caches */ 759 /* Invalidate caches */
719 mxs_nand_inval_data_buf(nand_info); 760 mxs_nand_inval_data_buf(nand_info);
720 761
721 /* Read DMA completed, now do the mark swapping. */ 762 /* Read DMA completed, now do the mark swapping. */
722 mxs_nand_swap_block_mark(geo, nand_info->data_buf, nand_info->oob_buf); 763 mxs_nand_swap_block_mark(geo, nand_info->data_buf, nand_info->oob_buf);
723 764
724 /* Loop over status bytes, accumulating ECC status. */ 765 /* Loop over status bytes, accumulating ECC status. */
725 status = nand_info->oob_buf + mxs_nand_aux_status_offset(); 766 status = nand_info->oob_buf + mxs_nand_aux_status_offset();
726 for (i = 0; i < geo->ecc_chunk_count; i++) { 767 for (i = 0; i < geo->ecc_chunk_count; i++) {
727 if (status[i] == 0x00) 768 if (status[i] == 0x00)
728 continue; 769 continue;
729 770
730 if (status[i] == 0xff) 771 if (status[i] == 0xff)
731 continue; 772 continue;
732 773
733 if (status[i] == 0xfe) { 774 if (status[i] == 0xfe) {
775 if (mxs_nand_erased_page(mtd, nand,
776 nand_info->data_buf, i, page))
777 break;
734 failed++; 778 failed++;
735 continue; 779 continue;
736 } 780 }
737 781
738 corrected += status[i]; 782 corrected += status[i];
739 } 783 }
740 784
741 /* Propagate ECC status to the owning MTD. */ 785 /* Propagate ECC status to the owning MTD. */
742 mtd->ecc_stats.failed += failed; 786 mtd->ecc_stats.failed += failed;
743 mtd->ecc_stats.corrected += corrected; 787 mtd->ecc_stats.corrected += corrected;
744 788
745 /* 789 /*
746 * It's time to deliver the OOB bytes. See mxs_nand_ecc_read_oob() for 790 * It's time to deliver the OOB bytes. See mxs_nand_ecc_read_oob() for
747 * details about our policy for delivering the OOB. 791 * details about our policy for delivering the OOB.
748 * 792 *
749 * We fill the caller's buffer with set bits, and then copy the block 793 * We fill the caller's buffer with set bits, and then copy the block
750 * mark to the caller's buffer. Note that, if block mark swapping was 794 * mark to the caller's buffer. Note that, if block mark swapping was
751 * necessary, it has already been done, so we can rely on the first 795 * necessary, it has already been done, so we can rely on the first
752 * byte of the auxiliary buffer to contain the block mark. 796 * byte of the auxiliary buffer to contain the block mark.
753 */ 797 */
754 memset(nand->oob_poi, 0xff, mtd->oobsize); 798 memset(nand->oob_poi, 0xff, mtd->oobsize);
755 799
756 nand->oob_poi[0] = nand_info->oob_buf[0]; 800 nand->oob_poi[0] = nand_info->oob_buf[0];
757 801
758 memcpy(buf, nand_info->data_buf, mtd->writesize); 802 memcpy(buf, nand_info->data_buf, mtd->writesize);
759 803
760 rtn: 804 rtn:
761 mxs_nand_return_dma_descs(nand_info); 805 mxs_nand_return_dma_descs(nand_info);
762 806
763 return ret; 807 return ret;
764 } 808 }
765 809
766 /* 810 /*
767 * Write a page to NAND. 811 * Write a page to NAND.
768 */ 812 */
769 static int mxs_nand_ecc_write_page(struct mtd_info *mtd, 813 static int mxs_nand_ecc_write_page(struct mtd_info *mtd,
770 struct nand_chip *nand, const uint8_t *buf, 814 struct nand_chip *nand, const uint8_t *buf,
771 int oob_required, int page) 815 int oob_required, int page)
772 { 816 {
773 struct mxs_nand_info *nand_info = nand_get_controller_data(nand); 817 struct mxs_nand_info *nand_info = nand_get_controller_data(nand);
774 struct bch_geometry *geo = &nand_info->bch_geometry; 818 struct bch_geometry *geo = &nand_info->bch_geometry;
775 struct mxs_dma_desc *d; 819 struct mxs_dma_desc *d;
776 uint32_t channel = MXS_DMA_CHANNEL_AHB_APBH_GPMI0 + nand_info->cur_chip; 820 uint32_t channel = MXS_DMA_CHANNEL_AHB_APBH_GPMI0 + nand_info->cur_chip;
777 int ret; 821 int ret;
778 822
779 memcpy(nand_info->data_buf, buf, mtd->writesize); 823 memcpy(nand_info->data_buf, buf, mtd->writesize);
780 memcpy(nand_info->oob_buf, nand->oob_poi, mtd->oobsize); 824 memcpy(nand_info->oob_buf, nand->oob_poi, mtd->oobsize);
781 825
782 /* Handle block mark swapping. */ 826 /* Handle block mark swapping. */
783 mxs_nand_swap_block_mark(geo, nand_info->data_buf, nand_info->oob_buf); 827 mxs_nand_swap_block_mark(geo, nand_info->data_buf, nand_info->oob_buf);
784 828
785 /* Compile the DMA descriptor - write data. */ 829 /* Compile the DMA descriptor - write data. */
786 d = mxs_nand_get_dma_desc(nand_info); 830 d = mxs_nand_get_dma_desc(nand_info);
787 d->cmd.data = 831 d->cmd.data =
788 MXS_DMA_DESC_COMMAND_NO_DMAXFER | MXS_DMA_DESC_IRQ | 832 MXS_DMA_DESC_COMMAND_NO_DMAXFER | MXS_DMA_DESC_IRQ |
789 MXS_DMA_DESC_DEC_SEM | MXS_DMA_DESC_WAIT4END | 833 MXS_DMA_DESC_DEC_SEM | MXS_DMA_DESC_WAIT4END |
790 (6 << MXS_DMA_DESC_PIO_WORDS_OFFSET); 834 (6 << MXS_DMA_DESC_PIO_WORDS_OFFSET);
791 835
792 d->cmd.address = 0; 836 d->cmd.address = 0;
793 837
794 d->cmd.pio_words[0] = 838 d->cmd.pio_words[0] =
795 GPMI_CTRL0_COMMAND_MODE_WRITE | 839 GPMI_CTRL0_COMMAND_MODE_WRITE |
796 GPMI_CTRL0_WORD_LENGTH | 840 GPMI_CTRL0_WORD_LENGTH |
797 (nand_info->cur_chip << GPMI_CTRL0_CS_OFFSET) | 841 (nand_info->cur_chip << GPMI_CTRL0_CS_OFFSET) |
798 GPMI_CTRL0_ADDRESS_NAND_DATA; 842 GPMI_CTRL0_ADDRESS_NAND_DATA;
799 d->cmd.pio_words[1] = 0; 843 d->cmd.pio_words[1] = 0;
800 d->cmd.pio_words[2] = 844 d->cmd.pio_words[2] =
801 GPMI_ECCCTRL_ENABLE_ECC | 845 GPMI_ECCCTRL_ENABLE_ECC |
802 GPMI_ECCCTRL_ECC_CMD_ENCODE | 846 GPMI_ECCCTRL_ECC_CMD_ENCODE |
803 GPMI_ECCCTRL_BUFFER_MASK_BCH_PAGE; 847 GPMI_ECCCTRL_BUFFER_MASK_BCH_PAGE;
804 d->cmd.pio_words[3] = (mtd->writesize + mtd->oobsize); 848 d->cmd.pio_words[3] = (mtd->writesize + mtd->oobsize);
805 d->cmd.pio_words[4] = (dma_addr_t)nand_info->data_buf; 849 d->cmd.pio_words[4] = (dma_addr_t)nand_info->data_buf;
806 d->cmd.pio_words[5] = (dma_addr_t)nand_info->oob_buf; 850 d->cmd.pio_words[5] = (dma_addr_t)nand_info->oob_buf;
807 851
808 if (is_mx7() && nand_info->en_randomizer) { 852 if (is_mx7() && nand_info->en_randomizer) {
809 d->cmd.pio_words[2] |= GPMI_ECCCTRL_RANDOMIZER_ENABLE | 853 d->cmd.pio_words[2] |= GPMI_ECCCTRL_RANDOMIZER_ENABLE |
810 GPMI_ECCCTRL_RANDOMIZER_TYPE2; 854 GPMI_ECCCTRL_RANDOMIZER_TYPE2;
811 /* 855 /*
812 * Write NAND page number needed to be randomized 856 * Write NAND page number needed to be randomized
813 * to GPMI_ECCCOUNT register. 857 * to GPMI_ECCCOUNT register.
814 * 858 *
815 * The value is between 0-255. For additional details 859 * The value is between 0-255. For additional details
816 * check 9.6.6.4 of i.MX7D Applications Processor reference 860 * check 9.6.6.4 of i.MX7D Applications Processor reference
817 */ 861 */
818 d->cmd.pio_words[3] |= (page % 255) << 16; 862 d->cmd.pio_words[3] |= (page % 255) << 16;
819 } 863 }
820 864
821 mxs_dma_desc_append(channel, d); 865 mxs_dma_desc_append(channel, d);
822 866
823 /* Flush caches */ 867 /* Flush caches */
824 mxs_nand_flush_data_buf(nand_info); 868 mxs_nand_flush_data_buf(nand_info);
825 869
826 /* Execute the DMA chain. */ 870 /* Execute the DMA chain. */
827 ret = mxs_dma_go(channel); 871 ret = mxs_dma_go(channel);
828 if (ret) { 872 if (ret) {
829 printf("MXS NAND: DMA write error\n"); 873 printf("MXS NAND: DMA write error\n");
830 goto rtn; 874 goto rtn;
831 } 875 }
832 876
833 ret = mxs_nand_wait_for_bch_complete(nand_info); 877 ret = mxs_nand_wait_for_bch_complete(nand_info);
834 if (ret) { 878 if (ret) {
835 printf("MXS NAND: BCH write timeout\n"); 879 printf("MXS NAND: BCH write timeout\n");
836 goto rtn; 880 goto rtn;
837 } 881 }
838 882
839 rtn: 883 rtn:
840 mxs_nand_return_dma_descs(nand_info); 884 mxs_nand_return_dma_descs(nand_info);
841 return 0; 885 return 0;
842 } 886 }
843 887
844 /* 888 /*
845 * Read OOB from NAND. 889 * Read OOB from NAND.
846 * 890 *
847 * This function is a veneer that replaces the function originally installed by 891 * This function is a veneer that replaces the function originally installed by
848 * the NAND Flash MTD code. 892 * the NAND Flash MTD code.
849 */ 893 */
850 static int mxs_nand_hook_read_oob(struct mtd_info *mtd, loff_t from, 894 static int mxs_nand_hook_read_oob(struct mtd_info *mtd, loff_t from,
851 struct mtd_oob_ops *ops) 895 struct mtd_oob_ops *ops)
852 { 896 {
853 struct nand_chip *chip = mtd_to_nand(mtd); 897 struct nand_chip *chip = mtd_to_nand(mtd);
854 struct mxs_nand_info *nand_info = nand_get_controller_data(chip); 898 struct mxs_nand_info *nand_info = nand_get_controller_data(chip);
855 int ret; 899 int ret;
856 900
857 if (ops->mode == MTD_OPS_RAW) 901 if (ops->mode == MTD_OPS_RAW)
858 nand_info->raw_oob_mode = 1; 902 nand_info->raw_oob_mode = 1;
859 else 903 else
860 nand_info->raw_oob_mode = 0; 904 nand_info->raw_oob_mode = 0;
861 905
862 ret = nand_info->hooked_read_oob(mtd, from, ops); 906 ret = nand_info->hooked_read_oob(mtd, from, ops);
863 907
864 nand_info->raw_oob_mode = 0; 908 nand_info->raw_oob_mode = 0;
865 909
866 return ret; 910 return ret;
867 } 911 }
868 912
869 /* 913 /*
870 * Write OOB to NAND. 914 * Write OOB to NAND.
871 * 915 *
872 * This function is a veneer that replaces the function originally installed by 916 * This function is a veneer that replaces the function originally installed by
873 * the NAND Flash MTD code. 917 * the NAND Flash MTD code.
874 */ 918 */
875 static int mxs_nand_hook_write_oob(struct mtd_info *mtd, loff_t to, 919 static int mxs_nand_hook_write_oob(struct mtd_info *mtd, loff_t to,
876 struct mtd_oob_ops *ops) 920 struct mtd_oob_ops *ops)
877 { 921 {
878 struct nand_chip *chip = mtd_to_nand(mtd); 922 struct nand_chip *chip = mtd_to_nand(mtd);
879 struct mxs_nand_info *nand_info = nand_get_controller_data(chip); 923 struct mxs_nand_info *nand_info = nand_get_controller_data(chip);
880 int ret; 924 int ret;
881 925
882 if (ops->mode == MTD_OPS_RAW) 926 if (ops->mode == MTD_OPS_RAW)
883 nand_info->raw_oob_mode = 1; 927 nand_info->raw_oob_mode = 1;
884 else 928 else
885 nand_info->raw_oob_mode = 0; 929 nand_info->raw_oob_mode = 0;
886 930
887 ret = nand_info->hooked_write_oob(mtd, to, ops); 931 ret = nand_info->hooked_write_oob(mtd, to, ops);
888 932
889 nand_info->raw_oob_mode = 0; 933 nand_info->raw_oob_mode = 0;
890 934
891 return ret; 935 return ret;
892 } 936 }
893 937
894 /* 938 /*
895 * Mark a block bad in NAND. 939 * Mark a block bad in NAND.
896 * 940 *
897 * This function is a veneer that replaces the function originally installed by 941 * This function is a veneer that replaces the function originally installed by
898 * the NAND Flash MTD code. 942 * the NAND Flash MTD code.
899 */ 943 */
900 static int mxs_nand_hook_block_markbad(struct mtd_info *mtd, loff_t ofs) 944 static int mxs_nand_hook_block_markbad(struct mtd_info *mtd, loff_t ofs)
901 { 945 {
902 struct nand_chip *chip = mtd_to_nand(mtd); 946 struct nand_chip *chip = mtd_to_nand(mtd);
903 struct mxs_nand_info *nand_info = nand_get_controller_data(chip); 947 struct mxs_nand_info *nand_info = nand_get_controller_data(chip);
904 int ret; 948 int ret;
905 949
906 nand_info->marking_block_bad = 1; 950 nand_info->marking_block_bad = 1;
907 951
908 ret = nand_info->hooked_block_markbad(mtd, ofs); 952 ret = nand_info->hooked_block_markbad(mtd, ofs);
909 953
910 nand_info->marking_block_bad = 0; 954 nand_info->marking_block_bad = 0;
911 955
912 return ret; 956 return ret;
913 } 957 }
914 958
915 /* 959 /*
916 * There are several places in this driver where we have to handle the OOB and 960 * There are several places in this driver where we have to handle the OOB and
917 * block marks. This is the function where things are the most complicated, so 961 * block marks. This is the function where things are the most complicated, so
918 * this is where we try to explain it all. All the other places refer back to 962 * this is where we try to explain it all. All the other places refer back to
919 * here. 963 * here.
920 * 964 *
921 * These are the rules, in order of decreasing importance: 965 * These are the rules, in order of decreasing importance:
922 * 966 *
923 * 1) Nothing the caller does can be allowed to imperil the block mark, so all 967 * 1) Nothing the caller does can be allowed to imperil the block mark, so all
924 * write operations take measures to protect it. 968 * write operations take measures to protect it.
925 * 969 *
926 * 2) In read operations, the first byte of the OOB we return must reflect the 970 * 2) In read operations, the first byte of the OOB we return must reflect the
927 * true state of the block mark, no matter where that block mark appears in 971 * true state of the block mark, no matter where that block mark appears in
928 * the physical page. 972 * the physical page.
929 * 973 *
930 * 3) ECC-based read operations return an OOB full of set bits (since we never 974 * 3) ECC-based read operations return an OOB full of set bits (since we never
931 * allow ECC-based writes to the OOB, it doesn't matter what ECC-based reads 975 * allow ECC-based writes to the OOB, it doesn't matter what ECC-based reads
932 * return). 976 * return).
933 * 977 *
934 * 4) "Raw" read operations return a direct view of the physical bytes in the 978 * 4) "Raw" read operations return a direct view of the physical bytes in the
935 * page, using the conventional definition of which bytes are data and which 979 * page, using the conventional definition of which bytes are data and which
936 * are OOB. This gives the caller a way to see the actual, physical bytes 980 * are OOB. This gives the caller a way to see the actual, physical bytes
937 * in the page, without the distortions applied by our ECC engine. 981 * in the page, without the distortions applied by our ECC engine.
938 * 982 *
939 * What we do for this specific read operation depends on whether we're doing 983 * What we do for this specific read operation depends on whether we're doing
940 * "raw" read, or an ECC-based read. 984 * "raw" read, or an ECC-based read.
941 * 985 *
942 * It turns out that knowing whether we want an "ECC-based" or "raw" read is not 986 * It turns out that knowing whether we want an "ECC-based" or "raw" read is not
943 * easy. When reading a page, for example, the NAND Flash MTD code calls our 987 * easy. When reading a page, for example, the NAND Flash MTD code calls our
944 * ecc.read_page or ecc.read_page_raw function. Thus, the fact that MTD wants an 988 * ecc.read_page or ecc.read_page_raw function. Thus, the fact that MTD wants an
945 * ECC-based or raw view of the page is implicit in which function it calls 989 * ECC-based or raw view of the page is implicit in which function it calls
946 * (there is a similar pair of ECC-based/raw functions for writing). 990 * (there is a similar pair of ECC-based/raw functions for writing).
947 * 991 *
948 * Since MTD assumes the OOB is not covered by ECC, there is no pair of 992 * Since MTD assumes the OOB is not covered by ECC, there is no pair of
949 * ECC-based/raw functions for reading or or writing the OOB. The fact that the 993 * ECC-based/raw functions for reading or or writing the OOB. The fact that the
950 * caller wants an ECC-based or raw view of the page is not propagated down to 994 * caller wants an ECC-based or raw view of the page is not propagated down to
951 * this driver. 995 * this driver.
952 * 996 *
953 * Since our OOB *is* covered by ECC, we need this information. So, we hook the 997 * Since our OOB *is* covered by ECC, we need this information. So, we hook the
954 * ecc.read_oob and ecc.write_oob function pointers in the owning 998 * ecc.read_oob and ecc.write_oob function pointers in the owning
955 * struct mtd_info with our own functions. These hook functions set the 999 * struct mtd_info with our own functions. These hook functions set the
956 * raw_oob_mode field so that, when control finally arrives here, we'll know 1000 * raw_oob_mode field so that, when control finally arrives here, we'll know
957 * what to do. 1001 * what to do.
958 */ 1002 */
959 static int mxs_nand_ecc_read_oob(struct mtd_info *mtd, struct nand_chip *nand, 1003 static int mxs_nand_ecc_read_oob(struct mtd_info *mtd, struct nand_chip *nand,
960 int page) 1004 int page)
961 { 1005 {
962 struct mxs_nand_info *nand_info = nand_get_controller_data(nand); 1006 struct mxs_nand_info *nand_info = nand_get_controller_data(nand);
963 1007
964 /* 1008 /*
965 * First, fill in the OOB buffer. If we're doing a raw read, we need to 1009 * First, fill in the OOB buffer. If we're doing a raw read, we need to
966 * get the bytes from the physical page. If we're not doing a raw read, 1010 * get the bytes from the physical page. If we're not doing a raw read,
967 * we need to fill the buffer with set bits. 1011 * we need to fill the buffer with set bits.
968 */ 1012 */
969 if (nand_info->raw_oob_mode) { 1013 if (nand_info->raw_oob_mode) {
970 /* 1014 /*
971 * If control arrives here, we're doing a "raw" read. Send the 1015 * If control arrives here, we're doing a "raw" read. Send the
972 * command to read the conventional OOB and read it. 1016 * command to read the conventional OOB and read it.
973 */ 1017 */
974 nand->cmdfunc(mtd, NAND_CMD_READ0, mtd->writesize, page); 1018 nand->cmdfunc(mtd, NAND_CMD_READ0, mtd->writesize, page);
975 nand->read_buf(mtd, nand->oob_poi, mtd->oobsize); 1019 nand->read_buf(mtd, nand->oob_poi, mtd->oobsize);
976 } else { 1020 } else {
977 /* 1021 /*
978 * If control arrives here, we're not doing a "raw" read. Fill 1022 * If control arrives here, we're not doing a "raw" read. Fill
979 * the OOB buffer with set bits and correct the block mark. 1023 * the OOB buffer with set bits and correct the block mark.
980 */ 1024 */
981 memset(nand->oob_poi, 0xff, mtd->oobsize); 1025 memset(nand->oob_poi, 0xff, mtd->oobsize);
982 1026
983 nand->cmdfunc(mtd, NAND_CMD_READ0, mtd->writesize, page); 1027 nand->cmdfunc(mtd, NAND_CMD_READ0, mtd->writesize, page);
984 mxs_nand_read_buf(mtd, nand->oob_poi, 1); 1028 mxs_nand_read_buf(mtd, nand->oob_poi, 1);
985 } 1029 }
986 1030
987 return 0; 1031 return 0;
988 1032
989 } 1033 }
990 1034
991 /* 1035 /*
992 * Write OOB data to NAND. 1036 * Write OOB data to NAND.
993 */ 1037 */
994 static int mxs_nand_ecc_write_oob(struct mtd_info *mtd, struct nand_chip *nand, 1038 static int mxs_nand_ecc_write_oob(struct mtd_info *mtd, struct nand_chip *nand,
995 int page) 1039 int page)
996 { 1040 {
997 struct mxs_nand_info *nand_info = nand_get_controller_data(nand); 1041 struct mxs_nand_info *nand_info = nand_get_controller_data(nand);
998 uint8_t block_mark = 0; 1042 uint8_t block_mark = 0;
999 1043
1000 /* 1044 /*
1001 * There are fundamental incompatibilities between the i.MX GPMI NFC and 1045 * There are fundamental incompatibilities between the i.MX GPMI NFC and
1002 * the NAND Flash MTD model that make it essentially impossible to write 1046 * the NAND Flash MTD model that make it essentially impossible to write
1003 * the out-of-band bytes. 1047 * the out-of-band bytes.
1004 * 1048 *
1005 * We permit *ONE* exception. If the *intent* of writing the OOB is to 1049 * We permit *ONE* exception. If the *intent* of writing the OOB is to
1006 * mark a block bad, we can do that. 1050 * mark a block bad, we can do that.
1007 */ 1051 */
1008 1052
1009 if (!nand_info->marking_block_bad) { 1053 if (!nand_info->marking_block_bad) {
1010 printf("NXS NAND: Writing OOB isn't supported\n"); 1054 printf("NXS NAND: Writing OOB isn't supported\n");
1011 return -EIO; 1055 return -EIO;
1012 } 1056 }
1013 1057
1014 /* Write the block mark. */ 1058 /* Write the block mark. */
1015 nand->cmdfunc(mtd, NAND_CMD_SEQIN, mtd->writesize, page); 1059 nand->cmdfunc(mtd, NAND_CMD_SEQIN, mtd->writesize, page);
1016 nand->write_buf(mtd, &block_mark, 1); 1060 nand->write_buf(mtd, &block_mark, 1);
1017 nand->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1); 1061 nand->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
1018 1062
1019 /* Check if it worked. */ 1063 /* Check if it worked. */
1020 if (nand->waitfunc(mtd, nand) & NAND_STATUS_FAIL) 1064 if (nand->waitfunc(mtd, nand) & NAND_STATUS_FAIL)
1021 return -EIO; 1065 return -EIO;
1022 1066
1023 return 0; 1067 return 0;
1024 } 1068 }
1025 1069
1026 /* 1070 /*
1027 * Claims all blocks are good. 1071 * Claims all blocks are good.
1028 * 1072 *
1029 * In principle, this function is *only* called when the NAND Flash MTD system 1073 * In principle, this function is *only* called when the NAND Flash MTD system
1030 * isn't allowed to keep an in-memory bad block table, so it is forced to ask 1074 * isn't allowed to keep an in-memory bad block table, so it is forced to ask
1031 * the driver for bad block information. 1075 * the driver for bad block information.
1032 * 1076 *
1033 * In fact, we permit the NAND Flash MTD system to have an in-memory BBT, so 1077 * In fact, we permit the NAND Flash MTD system to have an in-memory BBT, so
1034 * this function is *only* called when we take it away. 1078 * this function is *only* called when we take it away.
1035 * 1079 *
1036 * Thus, this function is only called when we want *all* blocks to look good, 1080 * Thus, this function is only called when we want *all* blocks to look good,
1037 * so it *always* return success. 1081 * so it *always* return success.
1038 */ 1082 */
1039 static int mxs_nand_block_bad(struct mtd_info *mtd, loff_t ofs) 1083 static int mxs_nand_block_bad(struct mtd_info *mtd, loff_t ofs)
1040 { 1084 {
1041 return 0; 1085 return 0;
1042 } 1086 }
1043 1087
1044 static int mxs_nand_set_geometry(struct mtd_info *mtd, struct bch_geometry *geo) 1088 static int mxs_nand_set_geometry(struct mtd_info *mtd, struct bch_geometry *geo)
1045 { 1089 {
1046 struct nand_chip *chip = mtd_to_nand(mtd); 1090 struct nand_chip *chip = mtd_to_nand(mtd);
1047 struct nand_chip *nand = mtd_to_nand(mtd); 1091 struct nand_chip *nand = mtd_to_nand(mtd);
1048 struct mxs_nand_info *nand_info = nand_get_controller_data(nand); 1092 struct mxs_nand_info *nand_info = nand_get_controller_data(nand);
1049 1093
1050 if (chip->ecc_strength_ds > nand_info->max_ecc_strength_supported) { 1094 if (chip->ecc_strength_ds > nand_info->max_ecc_strength_supported) {
1051 printf("unsupported NAND chip, minimum ecc required %d\n" 1095 printf("unsupported NAND chip, minimum ecc required %d\n"
1052 , chip->ecc_strength_ds); 1096 , chip->ecc_strength_ds);
1053 return -EINVAL; 1097 return -EINVAL;
1054 } 1098 }
1055 1099
1056 if ((!(chip->ecc_strength_ds > 0 && chip->ecc_step_ds > 0) && 1100 if ((!(chip->ecc_strength_ds > 0 && chip->ecc_step_ds > 0) &&
1057 (mtd->oobsize < 1024)) || nand_info->legacy_bch_geometry) { 1101 (mtd->oobsize < 1024)) || nand_info->legacy_bch_geometry) {
1058 dev_warn(this->dev, "use legacy bch geometry\n"); 1102 dev_warn(this->dev, "use legacy bch geometry\n");
1059 return mxs_nand_legacy_calc_ecc_layout(geo, mtd); 1103 return mxs_nand_legacy_calc_ecc_layout(geo, mtd);
1060 } 1104 }
1061 1105
1062 if (mtd->oobsize > 1024 || chip->ecc_step_ds < mtd->oobsize) 1106 if (mtd->oobsize > 1024 || chip->ecc_step_ds < mtd->oobsize)
1063 return mxs_nand_calc_ecc_for_large_oob(geo, mtd); 1107 return mxs_nand_calc_ecc_for_large_oob(geo, mtd);
1064 1108
1065 return mxs_nand_calc_ecc_layout_by_info(geo, mtd, 1109 return mxs_nand_calc_ecc_layout_by_info(geo, mtd,
1066 chip->ecc_strength_ds, chip->ecc_step_ds); 1110 chip->ecc_strength_ds, chip->ecc_step_ds);
1067 1111
1068 return 0; 1112 return 0;
1069 } 1113 }
1070 1114
1071 /* 1115 /*
1072 * At this point, the physical NAND Flash chips have been identified and 1116 * At this point, the physical NAND Flash chips have been identified and
1073 * counted, so we know the physical geometry. This enables us to make some 1117 * counted, so we know the physical geometry. This enables us to make some
1074 * important configuration decisions. 1118 * important configuration decisions.
1075 * 1119 *
1076 * The return value of this function propagates directly back to this driver's 1120 * The return value of this function propagates directly back to this driver's
1077 * board_nand_init(). Anything other than zero will cause this driver to 1121 * board_nand_init(). Anything other than zero will cause this driver to
1078 * tear everything down and declare failure. 1122 * tear everything down and declare failure.
1079 */ 1123 */
1080 int mxs_nand_setup_ecc(struct mtd_info *mtd) 1124 int mxs_nand_setup_ecc(struct mtd_info *mtd)
1081 { 1125 {
1082 struct nand_chip *nand = mtd_to_nand(mtd); 1126 struct nand_chip *nand = mtd_to_nand(mtd);
1083 struct mxs_nand_info *nand_info = nand_get_controller_data(nand); 1127 struct mxs_nand_info *nand_info = nand_get_controller_data(nand);
1084 struct bch_geometry *geo = &nand_info->bch_geometry; 1128 struct bch_geometry *geo = &nand_info->bch_geometry;
1085 struct mxs_bch_regs *bch_regs = nand_info->bch_regs; 1129 struct mxs_bch_regs *bch_regs = nand_info->bch_regs;
1086 uint32_t tmp; 1130 uint32_t tmp;
1087 int ret; 1131 int ret;
1088 1132
1089 nand_info->en_randomizer = 0; 1133 nand_info->en_randomizer = 0;
1090 nand_info->oobsize = mtd->oobsize; 1134 nand_info->oobsize = mtd->oobsize;
1091 nand_info->writesize = mtd->writesize; 1135 nand_info->writesize = mtd->writesize;
1092 1136
1093 ret = mxs_nand_set_geometry(mtd, geo); 1137 ret = mxs_nand_set_geometry(mtd, geo);
1094 if (ret) 1138 if (ret)
1095 return ret; 1139 return ret;
1096 1140
1097 /* Configure BCH and set NFC geometry */ 1141 /* Configure BCH and set NFC geometry */
1098 mxs_reset_block(&bch_regs->hw_bch_ctrl_reg); 1142 mxs_reset_block(&bch_regs->hw_bch_ctrl_reg);
1099 1143
1100 /* Configure layout 0 */ 1144 /* Configure layout 0 */
1101 tmp = (geo->ecc_chunk_count - 1) << BCH_FLASHLAYOUT0_NBLOCKS_OFFSET; 1145 tmp = (geo->ecc_chunk_count - 1) << BCH_FLASHLAYOUT0_NBLOCKS_OFFSET;
1102 tmp |= MXS_NAND_METADATA_SIZE << BCH_FLASHLAYOUT0_META_SIZE_OFFSET; 1146 tmp |= MXS_NAND_METADATA_SIZE << BCH_FLASHLAYOUT0_META_SIZE_OFFSET;
1103 tmp |= (geo->ecc_strength >> 1) << BCH_FLASHLAYOUT0_ECC0_OFFSET; 1147 tmp |= (geo->ecc_strength >> 1) << BCH_FLASHLAYOUT0_ECC0_OFFSET;
1104 tmp |= geo->ecc_chunk0_size >> MXS_NAND_CHUNK_DATA_CHUNK_SIZE_SHIFT; 1148 tmp |= geo->ecc_chunk0_size >> MXS_NAND_CHUNK_DATA_CHUNK_SIZE_SHIFT;
1105 tmp |= (geo->gf_len == 14 ? 1 : 0) << 1149 tmp |= (geo->gf_len == 14 ? 1 : 0) <<
1106 BCH_FLASHLAYOUT0_GF13_0_GF14_1_OFFSET; 1150 BCH_FLASHLAYOUT0_GF13_0_GF14_1_OFFSET;
1107 writel(tmp, &bch_regs->hw_bch_flash0layout0); 1151 writel(tmp, &bch_regs->hw_bch_flash0layout0);
1108 nand_info->bch_flash0layout0 = tmp; 1152 nand_info->bch_flash0layout0 = tmp;
1109 1153
1110 tmp = (mtd->writesize + mtd->oobsize) 1154 tmp = (mtd->writesize + mtd->oobsize)
1111 << BCH_FLASHLAYOUT1_PAGE_SIZE_OFFSET; 1155 << BCH_FLASHLAYOUT1_PAGE_SIZE_OFFSET;
1112 tmp |= (geo->ecc_strength >> 1) << BCH_FLASHLAYOUT1_ECCN_OFFSET; 1156 tmp |= (geo->ecc_strength >> 1) << BCH_FLASHLAYOUT1_ECCN_OFFSET;
1113 tmp |= geo->ecc_chunkn_size >> MXS_NAND_CHUNK_DATA_CHUNK_SIZE_SHIFT; 1157 tmp |= geo->ecc_chunkn_size >> MXS_NAND_CHUNK_DATA_CHUNK_SIZE_SHIFT;
1114 tmp |= (geo->gf_len == 14 ? 1 : 0) << 1158 tmp |= (geo->gf_len == 14 ? 1 : 0) <<
1115 BCH_FLASHLAYOUT1_GF13_0_GF14_1_OFFSET; 1159 BCH_FLASHLAYOUT1_GF13_0_GF14_1_OFFSET;
1116 writel(tmp, &bch_regs->hw_bch_flash0layout1); 1160 writel(tmp, &bch_regs->hw_bch_flash0layout1);
1117 nand_info->bch_flash0layout1 = tmp; 1161 nand_info->bch_flash0layout1 = tmp;
1118 1162
1119 /* Set *all* chip selects to use layout 0 */ 1163 /* Set *all* chip selects to use layout 0 */
1120 writel(0, &bch_regs->hw_bch_layoutselect); 1164 writel(0, &bch_regs->hw_bch_layoutselect);
1121 1165
1122 /* Enable BCH complete interrupt */ 1166 /* Enable BCH complete interrupt */
1123 writel(BCH_CTRL_COMPLETE_IRQ_EN, &bch_regs->hw_bch_ctrl_set); 1167 writel(BCH_CTRL_COMPLETE_IRQ_EN, &bch_regs->hw_bch_ctrl_set);
1124 1168
1125 /* Hook some operations at the MTD level. */ 1169 /* Hook some operations at the MTD level. */
1126 if (mtd->_read_oob != mxs_nand_hook_read_oob) { 1170 if (mtd->_read_oob != mxs_nand_hook_read_oob) {
1127 nand_info->hooked_read_oob = mtd->_read_oob; 1171 nand_info->hooked_read_oob = mtd->_read_oob;
1128 mtd->_read_oob = mxs_nand_hook_read_oob; 1172 mtd->_read_oob = mxs_nand_hook_read_oob;
1129 } 1173 }
1130 1174
1131 if (mtd->_write_oob != mxs_nand_hook_write_oob) { 1175 if (mtd->_write_oob != mxs_nand_hook_write_oob) {
1132 nand_info->hooked_write_oob = mtd->_write_oob; 1176 nand_info->hooked_write_oob = mtd->_write_oob;
1133 mtd->_write_oob = mxs_nand_hook_write_oob; 1177 mtd->_write_oob = mxs_nand_hook_write_oob;
1134 } 1178 }
1135 1179
1136 if (mtd->_block_markbad != mxs_nand_hook_block_markbad) { 1180 if (mtd->_block_markbad != mxs_nand_hook_block_markbad) {
1137 nand_info->hooked_block_markbad = mtd->_block_markbad; 1181 nand_info->hooked_block_markbad = mtd->_block_markbad;
1138 mtd->_block_markbad = mxs_nand_hook_block_markbad; 1182 mtd->_block_markbad = mxs_nand_hook_block_markbad;
1139 } 1183 }
1140 1184
1141 return 0; 1185 return 0;
1142 } 1186 }
1143 1187
1144 /* 1188 /*
1145 * Allocate DMA buffers 1189 * Allocate DMA buffers
1146 */ 1190 */
1147 int mxs_nand_alloc_buffers(struct mxs_nand_info *nand_info) 1191 int mxs_nand_alloc_buffers(struct mxs_nand_info *nand_info)
1148 { 1192 {
1149 uint8_t *buf; 1193 uint8_t *buf;
1150 const int size = NAND_MAX_PAGESIZE + NAND_MAX_OOBSIZE; 1194 const int size = NAND_MAX_PAGESIZE + NAND_MAX_OOBSIZE;
1151 1195
1152 nand_info->data_buf_size = roundup(size, MXS_DMA_ALIGNMENT); 1196 nand_info->data_buf_size = roundup(size, MXS_DMA_ALIGNMENT);
1153 1197
1154 /* DMA buffers */ 1198 /* DMA buffers */
1155 buf = memalign(MXS_DMA_ALIGNMENT, nand_info->data_buf_size); 1199 buf = memalign(MXS_DMA_ALIGNMENT, nand_info->data_buf_size);
1156 if (!buf) { 1200 if (!buf) {
1157 printf("MXS NAND: Error allocating DMA buffers\n"); 1201 printf("MXS NAND: Error allocating DMA buffers\n");
1158 return -ENOMEM; 1202 return -ENOMEM;
1159 } 1203 }
1160 1204
1161 memset(buf, 0, nand_info->data_buf_size); 1205 memset(buf, 0, nand_info->data_buf_size);
1162 1206
1163 nand_info->data_buf = buf; 1207 nand_info->data_buf = buf;
1164 nand_info->oob_buf = buf + NAND_MAX_PAGESIZE; 1208 nand_info->oob_buf = buf + NAND_MAX_PAGESIZE;
1165 /* Command buffers */ 1209 /* Command buffers */
1166 nand_info->cmd_buf = memalign(MXS_DMA_ALIGNMENT, 1210 nand_info->cmd_buf = memalign(MXS_DMA_ALIGNMENT,
1167 MXS_NAND_COMMAND_BUFFER_SIZE); 1211 MXS_NAND_COMMAND_BUFFER_SIZE);
1168 if (!nand_info->cmd_buf) { 1212 if (!nand_info->cmd_buf) {
1169 free(buf); 1213 free(buf);
1170 printf("MXS NAND: Error allocating command buffers\n"); 1214 printf("MXS NAND: Error allocating command buffers\n");
1171 return -ENOMEM; 1215 return -ENOMEM;
1172 } 1216 }
1173 memset(nand_info->cmd_buf, 0, MXS_NAND_COMMAND_BUFFER_SIZE); 1217 memset(nand_info->cmd_buf, 0, MXS_NAND_COMMAND_BUFFER_SIZE);
1174 nand_info->cmd_queue_len = 0; 1218 nand_info->cmd_queue_len = 0;
1175 1219
1176 return 0; 1220 return 0;
1177 } 1221 }
1178 1222
1179 /* 1223 /*
1180 * Initializes the NFC hardware. 1224 * Initializes the NFC hardware.
1181 */ 1225 */
1182 static int mxs_nand_init_dma(struct mxs_nand_info *info) 1226 static int mxs_nand_init_dma(struct mxs_nand_info *info)
1183 { 1227 {
1184 int i = 0, j, ret = 0; 1228 int i = 0, j, ret = 0;
1185 1229
1186 info->desc = malloc(sizeof(struct mxs_dma_desc *) * 1230 info->desc = malloc(sizeof(struct mxs_dma_desc *) *
1187 MXS_NAND_DMA_DESCRIPTOR_COUNT); 1231 MXS_NAND_DMA_DESCRIPTOR_COUNT);
1188 if (!info->desc) { 1232 if (!info->desc) {
1189 ret = -ENOMEM; 1233 ret = -ENOMEM;
1190 goto err1; 1234 goto err1;
1191 } 1235 }
1192 1236
1193 /* Allocate the DMA descriptors. */ 1237 /* Allocate the DMA descriptors. */
1194 for (i = 0; i < MXS_NAND_DMA_DESCRIPTOR_COUNT; i++) { 1238 for (i = 0; i < MXS_NAND_DMA_DESCRIPTOR_COUNT; i++) {
1195 info->desc[i] = mxs_dma_desc_alloc(); 1239 info->desc[i] = mxs_dma_desc_alloc();
1196 if (!info->desc[i]) { 1240 if (!info->desc[i]) {
1197 ret = -ENOMEM; 1241 ret = -ENOMEM;
1198 goto err2; 1242 goto err2;
1199 } 1243 }
1200 } 1244 }
1201 1245
1202 /* Init the DMA controller. */ 1246 /* Init the DMA controller. */
1203 mxs_dma_init(); 1247 mxs_dma_init();
1204 for (j = MXS_DMA_CHANNEL_AHB_APBH_GPMI0; 1248 for (j = MXS_DMA_CHANNEL_AHB_APBH_GPMI0;
1205 j <= MXS_DMA_CHANNEL_AHB_APBH_GPMI7; j++) { 1249 j <= MXS_DMA_CHANNEL_AHB_APBH_GPMI7; j++) {
1206 ret = mxs_dma_init_channel(j); 1250 ret = mxs_dma_init_channel(j);
1207 if (ret) 1251 if (ret)
1208 goto err3; 1252 goto err3;
1209 } 1253 }
1210 1254
1211 /* Reset the GPMI block. */ 1255 /* Reset the GPMI block. */
1212 mxs_reset_block(&info->gpmi_regs->hw_gpmi_ctrl0_reg); 1256 mxs_reset_block(&info->gpmi_regs->hw_gpmi_ctrl0_reg);
1213 mxs_reset_block(&info->bch_regs->hw_bch_ctrl_reg); 1257 mxs_reset_block(&info->bch_regs->hw_bch_ctrl_reg);
1214 1258
1215 /* 1259 /*
1216 * Choose NAND mode, set IRQ polarity, disable write protection and 1260 * Choose NAND mode, set IRQ polarity, disable write protection and
1217 * select BCH ECC. 1261 * select BCH ECC.
1218 */ 1262 */
1219 clrsetbits_le32(&info->gpmi_regs->hw_gpmi_ctrl1, 1263 clrsetbits_le32(&info->gpmi_regs->hw_gpmi_ctrl1,
1220 GPMI_CTRL1_GPMI_MODE, 1264 GPMI_CTRL1_GPMI_MODE,
1221 GPMI_CTRL1_ATA_IRQRDY_POLARITY | GPMI_CTRL1_DEV_RESET | 1265 GPMI_CTRL1_ATA_IRQRDY_POLARITY | GPMI_CTRL1_DEV_RESET |
1222 GPMI_CTRL1_BCH_MODE); 1266 GPMI_CTRL1_BCH_MODE);
1223 1267
1224 return 0; 1268 return 0;
1225 1269
1226 err3: 1270 err3:
1227 for (--j; j >= MXS_DMA_CHANNEL_AHB_APBH_GPMI0; j--) 1271 for (--j; j >= MXS_DMA_CHANNEL_AHB_APBH_GPMI0; j--)
1228 mxs_dma_release(j); 1272 mxs_dma_release(j);
1229 err2: 1273 err2:
1230 for (--i; i >= 0; i--) 1274 for (--i; i >= 0; i--)
1231 mxs_dma_desc_free(info->desc[i]); 1275 mxs_dma_desc_free(info->desc[i]);
1232 free(info->desc); 1276 free(info->desc);
1233 err1: 1277 err1:
1234 if (ret == -ENOMEM) 1278 if (ret == -ENOMEM)
1235 printf("MXS NAND: Unable to allocate DMA descriptors\n"); 1279 printf("MXS NAND: Unable to allocate DMA descriptors\n");
1236 return ret; 1280 return ret;
1237 } 1281 }
1238 1282
1239 int mxs_nand_init_spl(struct nand_chip *nand) 1283 int mxs_nand_init_spl(struct nand_chip *nand)
1240 { 1284 {
1241 struct mxs_nand_info *nand_info; 1285 struct mxs_nand_info *nand_info;
1242 int err; 1286 int err;
1243 1287
1244 nand_info = malloc(sizeof(struct mxs_nand_info)); 1288 nand_info = malloc(sizeof(struct mxs_nand_info));
1245 if (!nand_info) { 1289 if (!nand_info) {
1246 printf("MXS NAND: Failed to allocate private data\n"); 1290 printf("MXS NAND: Failed to allocate private data\n");
1247 return -ENOMEM; 1291 return -ENOMEM;
1248 } 1292 }
1249 memset(nand_info, 0, sizeof(struct mxs_nand_info)); 1293 memset(nand_info, 0, sizeof(struct mxs_nand_info));
1250 1294
1251 nand_info->gpmi_regs = (struct mxs_gpmi_regs *)MXS_GPMI_BASE; 1295 nand_info->gpmi_regs = (struct mxs_gpmi_regs *)MXS_GPMI_BASE;
1252 nand_info->bch_regs = (struct mxs_bch_regs *)MXS_BCH_BASE; 1296 nand_info->bch_regs = (struct mxs_bch_regs *)MXS_BCH_BASE;
1253 1297
1254 if (is_mx6sx() || is_mx7()) 1298 if (is_mx6sx() || is_mx7())
1255 nand_info->max_ecc_strength_supported = 62; 1299 nand_info->max_ecc_strength_supported = 62;
1256 else 1300 else
1257 nand_info->max_ecc_strength_supported = 40; 1301 nand_info->max_ecc_strength_supported = 40;
1258 1302
1259 err = mxs_nand_alloc_buffers(nand_info); 1303 err = mxs_nand_alloc_buffers(nand_info);
1260 if (err) 1304 if (err)
1261 return err; 1305 return err;
1262 1306
1263 err = mxs_nand_init_dma(nand_info); 1307 err = mxs_nand_init_dma(nand_info);
1264 if (err) 1308 if (err)
1265 return err; 1309 return err;
1266 1310
1267 nand_set_controller_data(nand, nand_info); 1311 nand_set_controller_data(nand, nand_info);
1268 1312
1269 nand->options |= NAND_NO_SUBPAGE_WRITE; 1313 nand->options |= NAND_NO_SUBPAGE_WRITE;
1270 1314
1271 nand->cmd_ctrl = mxs_nand_cmd_ctrl; 1315 nand->cmd_ctrl = mxs_nand_cmd_ctrl;
1272 nand->dev_ready = mxs_nand_device_ready; 1316 nand->dev_ready = mxs_nand_device_ready;
1273 nand->select_chip = mxs_nand_select_chip; 1317 nand->select_chip = mxs_nand_select_chip;
1274 1318
1275 nand->read_byte = mxs_nand_read_byte; 1319 nand->read_byte = mxs_nand_read_byte;
1276 nand->read_buf = mxs_nand_read_buf; 1320 nand->read_buf = mxs_nand_read_buf;
1277 1321
1278 nand->ecc.read_page = mxs_nand_ecc_read_page; 1322 nand->ecc.read_page = mxs_nand_ecc_read_page;
1279 1323
1280 nand->ecc.mode = NAND_ECC_HW; 1324 nand->ecc.mode = NAND_ECC_HW;
1281 1325
1282 return 0; 1326 return 0;
1283 } 1327 }
1284 1328
1285 int mxs_nand_init_ctrl(struct mxs_nand_info *nand_info) 1329 int mxs_nand_init_ctrl(struct mxs_nand_info *nand_info)
1286 { 1330 {
1287 struct mtd_info *mtd; 1331 struct mtd_info *mtd;
1288 struct nand_chip *nand; 1332 struct nand_chip *nand;
1289 int err; 1333 int err;
1290 1334
1291 nand = &nand_info->chip; 1335 nand = &nand_info->chip;
1292 mtd = nand_to_mtd(nand); 1336 mtd = nand_to_mtd(nand);
1293 err = mxs_nand_alloc_buffers(nand_info); 1337 err = mxs_nand_alloc_buffers(nand_info);
1294 if (err) 1338 if (err)
1295 return err; 1339 return err;
1296 1340
1297 err = mxs_nand_init_dma(nand_info); 1341 err = mxs_nand_init_dma(nand_info);
1298 if (err) 1342 if (err)
1299 goto err_free_buffers; 1343 goto err_free_buffers;
1300 1344
1301 memset(&fake_ecc_layout, 0, sizeof(fake_ecc_layout)); 1345 memset(&fake_ecc_layout, 0, sizeof(fake_ecc_layout));
1302 1346
1303 #ifdef CONFIG_SYS_NAND_USE_FLASH_BBT 1347 #ifdef CONFIG_SYS_NAND_USE_FLASH_BBT
1304 nand->bbt_options |= NAND_BBT_USE_FLASH | NAND_BBT_NO_OOB; 1348 nand->bbt_options |= NAND_BBT_USE_FLASH | NAND_BBT_NO_OOB;
1305 #endif 1349 #endif
1306 1350
1307 nand_set_controller_data(nand, nand_info); 1351 nand_set_controller_data(nand, nand_info);
1308 nand->options |= NAND_NO_SUBPAGE_WRITE; 1352 nand->options |= NAND_NO_SUBPAGE_WRITE;
1309 1353
1310 if (nand_info->dev) 1354 if (nand_info->dev)
1311 nand->flash_node = dev_of_offset(nand_info->dev); 1355 nand->flash_node = dev_of_offset(nand_info->dev);
1312 1356
1313 nand->cmd_ctrl = mxs_nand_cmd_ctrl; 1357 nand->cmd_ctrl = mxs_nand_cmd_ctrl;
1314 1358
1315 nand->dev_ready = mxs_nand_device_ready; 1359 nand->dev_ready = mxs_nand_device_ready;
1316 nand->select_chip = mxs_nand_select_chip; 1360 nand->select_chip = mxs_nand_select_chip;
1317 nand->block_bad = mxs_nand_block_bad; 1361 nand->block_bad = mxs_nand_block_bad;
1318 1362
1319 nand->read_byte = mxs_nand_read_byte; 1363 nand->read_byte = mxs_nand_read_byte;
1320 1364
1321 nand->read_buf = mxs_nand_read_buf; 1365 nand->read_buf = mxs_nand_read_buf;
1322 nand->write_buf = mxs_nand_write_buf; 1366 nand->write_buf = mxs_nand_write_buf;
1323 1367
1324 /* first scan to find the device and get the page size */ 1368 /* first scan to find the device and get the page size */
1325 if (nand_scan_ident(mtd, CONFIG_SYS_MAX_NAND_DEVICE, NULL)) 1369 if (nand_scan_ident(mtd, CONFIG_SYS_MAX_NAND_DEVICE, NULL))
1326 goto err_free_buffers; 1370 goto err_free_buffers;
1327 1371
1328 if (mxs_nand_setup_ecc(mtd)) 1372 if (mxs_nand_setup_ecc(mtd))
1329 goto err_free_buffers; 1373 goto err_free_buffers;
1330 1374
1331 nand->ecc.read_page = mxs_nand_ecc_read_page; 1375 nand->ecc.read_page = mxs_nand_ecc_read_page;
1332 nand->ecc.write_page = mxs_nand_ecc_write_page; 1376 nand->ecc.write_page = mxs_nand_ecc_write_page;
1333 nand->ecc.read_oob = mxs_nand_ecc_read_oob; 1377 nand->ecc.read_oob = mxs_nand_ecc_read_oob;
1334 nand->ecc.write_oob = mxs_nand_ecc_write_oob; 1378 nand->ecc.write_oob = mxs_nand_ecc_write_oob;
1335 1379
1336 nand->ecc.layout = &fake_ecc_layout; 1380 nand->ecc.layout = &fake_ecc_layout;
1337 nand->ecc.mode = NAND_ECC_HW; 1381 nand->ecc.mode = NAND_ECC_HW;
1338 nand->ecc.size = nand_info->bch_geometry.ecc_chunkn_size; 1382 nand->ecc.size = nand_info->bch_geometry.ecc_chunkn_size;
1339 nand->ecc.strength = nand_info->bch_geometry.ecc_strength; 1383 nand->ecc.strength = nand_info->bch_geometry.ecc_strength;
1340 1384
1341 /* second phase scan */ 1385 /* second phase scan */
1342 err = nand_scan_tail(mtd); 1386 err = nand_scan_tail(mtd);
1343 if (err) 1387 if (err)
1344 goto err_free_buffers; 1388 goto err_free_buffers;
1345 1389
1346 err = nand_register(0, mtd); 1390 err = nand_register(0, mtd);
1347 if (err) 1391 if (err)
1348 goto err_free_buffers; 1392 goto err_free_buffers;
1349 1393
1350 return 0; 1394 return 0;
1351 1395
1352 err_free_buffers: 1396 err_free_buffers:
1353 free(nand_info->data_buf); 1397 free(nand_info->data_buf);
1354 free(nand_info->cmd_buf); 1398 free(nand_info->cmd_buf);
1355 1399
1356 return err; 1400 return err;
1357 } 1401 }
1358 1402
1359 #ifndef CONFIG_NAND_MXS_DT 1403 #ifndef CONFIG_NAND_MXS_DT
1360 void board_nand_init(void) 1404 void board_nand_init(void)
1361 { 1405 {
1362 struct mxs_nand_info *nand_info; 1406 struct mxs_nand_info *nand_info;
1363 1407
1364 nand_info = malloc(sizeof(struct mxs_nand_info)); 1408 nand_info = malloc(sizeof(struct mxs_nand_info));
1365 if (!nand_info) { 1409 if (!nand_info) {
1366 printf("MXS NAND: Failed to allocate private data\n"); 1410 printf("MXS NAND: Failed to allocate private data\n");
1367 return; 1411 return;
1368 } 1412 }
1369 memset(nand_info, 0, sizeof(struct mxs_nand_info)); 1413 memset(nand_info, 0, sizeof(struct mxs_nand_info));
1370 1414
1371 nand_info->gpmi_regs = (struct mxs_gpmi_regs *)MXS_GPMI_BASE; 1415 nand_info->gpmi_regs = (struct mxs_gpmi_regs *)MXS_GPMI_BASE;
1372 nand_info->bch_regs = (struct mxs_bch_regs *)MXS_BCH_BASE; 1416 nand_info->bch_regs = (struct mxs_bch_regs *)MXS_BCH_BASE;
1373 1417
1374 /* Refer to Chapter 17 for i.MX6DQ, Chapter 18 for i.MX6SX */ 1418 /* Refer to Chapter 17 for i.MX6DQ, Chapter 18 for i.MX6SX */
1375 if (is_mx6sx() || is_mx7()) 1419 if (is_mx6sx() || is_mx7())
1376 nand_info->max_ecc_strength_supported = 62; 1420 nand_info->max_ecc_strength_supported = 62;
1377 else 1421 else
1378 nand_info->max_ecc_strength_supported = 40; 1422 nand_info->max_ecc_strength_supported = 40;
1379 1423
1380 #ifdef CONFIG_NAND_MXS_USE_MINIMUM_ECC 1424 #ifdef CONFIG_NAND_MXS_USE_MINIMUM_ECC
1381 nand_info->use_minimum_ecc = true; 1425 nand_info->use_minimum_ecc = true;
1382 #endif 1426 #endif
1383 1427
1384 if (mxs_nand_init_ctrl(nand_info) < 0) 1428 if (mxs_nand_init_ctrl(nand_info) < 0)
1385 goto err; 1429 goto err;
1386 1430
1387 return; 1431 return;
1388 1432
1389 err: 1433 err:
1390 free(nand_info); 1434 free(nand_info);
1391 } 1435 }
1392 #endif 1436 #endif
1393 1437
1394 /* 1438 /*
1395 * Read NAND layout for FCB block generation. 1439 * Read NAND layout for FCB block generation.
1396 */ 1440 */
1397 void mxs_nand_get_layout(struct mtd_info *mtd, struct mxs_nand_layout *l) 1441 void mxs_nand_get_layout(struct mtd_info *mtd, struct mxs_nand_layout *l)
1398 { 1442 {
1399 struct mxs_bch_regs *bch_regs = (struct mxs_bch_regs *)MXS_BCH_BASE; 1443 struct mxs_bch_regs *bch_regs = (struct mxs_bch_regs *)MXS_BCH_BASE;
1400 u32 tmp; 1444 u32 tmp;
1401 1445
1402 tmp = readl(&bch_regs->hw_bch_flash0layout0); 1446 tmp = readl(&bch_regs->hw_bch_flash0layout0);
1403 l->nblocks = (tmp & BCH_FLASHLAYOUT0_NBLOCKS_MASK) >> 1447 l->nblocks = (tmp & BCH_FLASHLAYOUT0_NBLOCKS_MASK) >>
1404 BCH_FLASHLAYOUT0_NBLOCKS_OFFSET; 1448 BCH_FLASHLAYOUT0_NBLOCKS_OFFSET;
1405 l->meta_size = (tmp & BCH_FLASHLAYOUT0_META_SIZE_MASK) >> 1449 l->meta_size = (tmp & BCH_FLASHLAYOUT0_META_SIZE_MASK) >>
1406 BCH_FLASHLAYOUT0_META_SIZE_OFFSET; 1450 BCH_FLASHLAYOUT0_META_SIZE_OFFSET;
1407 1451
1408 tmp = readl(&bch_regs->hw_bch_flash0layout1); 1452 tmp = readl(&bch_regs->hw_bch_flash0layout1);
1409 l->data0_size = 4 * ((tmp & BCH_FLASHLAYOUT0_DATA0_SIZE_MASK) >> 1453 l->data0_size = 4 * ((tmp & BCH_FLASHLAYOUT0_DATA0_SIZE_MASK) >>
1410 BCH_FLASHLAYOUT0_DATA0_SIZE_OFFSET); 1454 BCH_FLASHLAYOUT0_DATA0_SIZE_OFFSET);
1411 l->ecc0 = (tmp & BCH_FLASHLAYOUT0_ECC0_MASK) >> 1455 l->ecc0 = (tmp & BCH_FLASHLAYOUT0_ECC0_MASK) >>
1412 BCH_FLASHLAYOUT0_ECC0_OFFSET; 1456 BCH_FLASHLAYOUT0_ECC0_OFFSET;
1413 l->datan_size = 4 * ((tmp & BCH_FLASHLAYOUT1_DATAN_SIZE_MASK) >> 1457 l->datan_size = 4 * ((tmp & BCH_FLASHLAYOUT1_DATAN_SIZE_MASK) >>
1414 BCH_FLASHLAYOUT1_DATAN_SIZE_OFFSET); 1458 BCH_FLASHLAYOUT1_DATAN_SIZE_OFFSET);
1415 l->eccn = (tmp & BCH_FLASHLAYOUT1_ECCN_MASK) >> 1459 l->eccn = (tmp & BCH_FLASHLAYOUT1_ECCN_MASK) >>
1416 BCH_FLASHLAYOUT1_ECCN_OFFSET; 1460 BCH_FLASHLAYOUT1_ECCN_OFFSET;
1417 } 1461 }
1418 1462
1419 /* 1463 /*
1420 * Set BCH to specific layout used by ROM bootloader to read FCB. 1464 * Set BCH to specific layout used by ROM bootloader to read FCB.
1421 */ 1465 */
1422 void mxs_nand_mode_fcb(struct mtd_info *mtd) 1466 void mxs_nand_mode_fcb(struct mtd_info *mtd)
1423 { 1467 {
1424 u32 tmp; 1468 u32 tmp;
1425 struct mxs_bch_regs *bch_regs = (struct mxs_bch_regs *)MXS_BCH_BASE; 1469 struct mxs_bch_regs *bch_regs = (struct mxs_bch_regs *)MXS_BCH_BASE;
1426 struct nand_chip *nand = mtd_to_nand(mtd); 1470 struct nand_chip *nand = mtd_to_nand(mtd);
1427 struct mxs_nand_info *nand_info = nand_get_controller_data(nand); 1471 struct mxs_nand_info *nand_info = nand_get_controller_data(nand);
1428 1472
1429 nand_info->en_randomizer = 1; 1473 nand_info->en_randomizer = 1;
1430 1474
1431 mtd->writesize = 1024; 1475 mtd->writesize = 1024;
1432 mtd->oobsize = 1862 - 1024; 1476 mtd->oobsize = 1862 - 1024;
1433 1477
1434 /* 8 ecc_chunks_*/ 1478 /* 8 ecc_chunks_*/
1435 tmp = 7 << BCH_FLASHLAYOUT0_NBLOCKS_OFFSET; 1479 tmp = 7 << BCH_FLASHLAYOUT0_NBLOCKS_OFFSET;
1436 /* 32 bytes for metadata */ 1480 /* 32 bytes for metadata */
1437 tmp |= 32 << BCH_FLASHLAYOUT0_META_SIZE_OFFSET; 1481 tmp |= 32 << BCH_FLASHLAYOUT0_META_SIZE_OFFSET;
1438 /* using ECC62 level to be performed */ 1482 /* using ECC62 level to be performed */
1439 tmp |= 0x1F << BCH_FLASHLAYOUT0_ECC0_OFFSET; 1483 tmp |= 0x1F << BCH_FLASHLAYOUT0_ECC0_OFFSET;
1440 /* 0x20 * 4 bytes of the data0 block */ 1484 /* 0x20 * 4 bytes of the data0 block */
1441 tmp |= 0x20 << BCH_FLASHLAYOUT0_DATA0_SIZE_OFFSET; 1485 tmp |= 0x20 << BCH_FLASHLAYOUT0_DATA0_SIZE_OFFSET;
1442 tmp |= 0 << BCH_FLASHLAYOUT0_GF13_0_GF14_1_OFFSET; 1486 tmp |= 0 << BCH_FLASHLAYOUT0_GF13_0_GF14_1_OFFSET;
1443 writel(tmp, &bch_regs->hw_bch_flash0layout0); 1487 writel(tmp, &bch_regs->hw_bch_flash0layout0);
1444 1488
1445 /* 1024 for data + 838 for OOB */ 1489 /* 1024 for data + 838 for OOB */
1446 tmp = 1862 << BCH_FLASHLAYOUT1_PAGE_SIZE_OFFSET; 1490 tmp = 1862 << BCH_FLASHLAYOUT1_PAGE_SIZE_OFFSET;
1447 /* using ECC62 level to be performed */ 1491 /* using ECC62 level to be performed */
1448 tmp |= 0x1F << BCH_FLASHLAYOUT1_ECCN_OFFSET; 1492 tmp |= 0x1F << BCH_FLASHLAYOUT1_ECCN_OFFSET;
1449 /* 0x20 * 4 bytes of the data0 block */ 1493 /* 0x20 * 4 bytes of the data0 block */
1450 tmp |= 0x20 << BCH_FLASHLAYOUT1_DATAN_SIZE_OFFSET; 1494 tmp |= 0x20 << BCH_FLASHLAYOUT1_DATAN_SIZE_OFFSET;
1451 tmp |= 0 << BCH_FLASHLAYOUT1_GF13_0_GF14_1_OFFSET; 1495 tmp |= 0 << BCH_FLASHLAYOUT1_GF13_0_GF14_1_OFFSET;
1452 writel(tmp, &bch_regs->hw_bch_flash0layout1); 1496 writel(tmp, &bch_regs->hw_bch_flash0layout1);
1453 } 1497 }
1454 1498
1455 /* 1499 /*
1456 * Restore BCH to normal settings. 1500 * Restore BCH to normal settings.
1457 */ 1501 */
1458 void mxs_nand_mode_normal(struct mtd_info *mtd) 1502 void mxs_nand_mode_normal(struct mtd_info *mtd)
1459 { 1503 {
1460 struct mxs_bch_regs *bch_regs = (struct mxs_bch_regs *)MXS_BCH_BASE; 1504 struct mxs_bch_regs *bch_regs = (struct mxs_bch_regs *)MXS_BCH_BASE;
1461 struct nand_chip *nand = mtd_to_nand(mtd); 1505 struct nand_chip *nand = mtd_to_nand(mtd);
1462 struct mxs_nand_info *nand_info = nand_get_controller_data(nand); 1506 struct mxs_nand_info *nand_info = nand_get_controller_data(nand);
1463 1507
1464 nand_info->en_randomizer = 0; 1508 nand_info->en_randomizer = 0;
1465 1509
1466 mtd->writesize = nand_info->writesize; 1510 mtd->writesize = nand_info->writesize;
1467 mtd->oobsize = nand_info->oobsize; 1511 mtd->oobsize = nand_info->oobsize;
1468 1512
1469 writel(nand_info->bch_flash0layout0, &bch_regs->hw_bch_flash0layout0); 1513 writel(nand_info->bch_flash0layout0, &bch_regs->hw_bch_flash0layout0);
1470 writel(nand_info->bch_flash0layout1, &bch_regs->hw_bch_flash0layout1); 1514 writel(nand_info->bch_flash0layout1, &bch_regs->hw_bch_flash0layout1);
1471 } 1515 }
1472 1516
1473 uint32_t mxs_nand_mark_byte_offset(struct mtd_info *mtd) 1517 uint32_t mxs_nand_mark_byte_offset(struct mtd_info *mtd)
1474 { 1518 {
1475 struct nand_chip *chip = mtd_to_nand(mtd); 1519 struct nand_chip *chip = mtd_to_nand(mtd);
1476 struct mxs_nand_info *nand_info = nand_get_controller_data(chip); 1520 struct mxs_nand_info *nand_info = nand_get_controller_data(chip);
1477 struct bch_geometry *geo = &nand_info->bch_geometry; 1521 struct bch_geometry *geo = &nand_info->bch_geometry;
1478 1522
1479 return geo->block_mark_byte_offset; 1523 return geo->block_mark_byte_offset;
1480 } 1524 }
1481 1525
1482 uint32_t mxs_nand_mark_bit_offset(struct mtd_info *mtd) 1526 uint32_t mxs_nand_mark_bit_offset(struct mtd_info *mtd)
1483 { 1527 {
1484 struct nand_chip *chip = mtd_to_nand(mtd); 1528 struct nand_chip *chip = mtd_to_nand(mtd);
1485 struct mxs_nand_info *nand_info = nand_get_controller_data(chip); 1529 struct mxs_nand_info *nand_info = nand_get_controller_data(chip);
1486 struct bch_geometry *geo = &nand_info->bch_geometry; 1530 struct bch_geometry *geo = &nand_info->bch_geometry;
1487 1531
1488 return geo->block_mark_bit_offset; 1532 return geo->block_mark_bit_offset;
1489 } 1533 }
1490 1534