Commit 00f5e098a16411ec5d5508815d17edf5bbd79909

Authored by Philip, Avinash
1 parent 05d90d89f9
Exists in master

arm:omap:nand - BCH8 read path corrected.

NAND read path for BCH8 is corrected to handle reading of erased pages.
Previously on nand read, ecc was taken from wrong offset in oob_poi buffer
and was giving non-0xff data and hence was giving ecc uncorrectable issue
on erased pages.

Signed-off-by: Philip, Avinash <avinashphilip@ti.com>

Showing 1 changed file with 3 additions and 4 deletions Inline Diff

drivers/mtd/nand/omap2.c
1 /* 1 /*
2 * Copyright © 2004 Texas Instruments, Jian Zhang <jzhang@ti.com> 2 * Copyright © 2004 Texas Instruments, Jian Zhang <jzhang@ti.com>
3 * Copyright © 2004 Micron Technology Inc. 3 * Copyright © 2004 Micron Technology Inc.
4 * Copyright © 2004 David Brownell 4 * Copyright © 2004 David Brownell
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify 6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as 7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation. 8 * published by the Free Software Foundation.
9 */ 9 */
10 10
11 #include <linux/platform_device.h> 11 #include <linux/platform_device.h>
12 #include <linux/dma-mapping.h> 12 #include <linux/dma-mapping.h>
13 #include <linux/delay.h> 13 #include <linux/delay.h>
14 #include <linux/interrupt.h> 14 #include <linux/interrupt.h>
15 #include <linux/jiffies.h> 15 #include <linux/jiffies.h>
16 #include <linux/sched.h> 16 #include <linux/sched.h>
17 #include <linux/mtd/mtd.h> 17 #include <linux/mtd/mtd.h>
18 #include <linux/mtd/nand.h> 18 #include <linux/mtd/nand.h>
19 #include <linux/mtd/partitions.h> 19 #include <linux/mtd/partitions.h>
20 #include <linux/io.h> 20 #include <linux/io.h>
21 #include <linux/slab.h> 21 #include <linux/slab.h>
22 22
23 #include <plat/dma.h> 23 #include <plat/dma.h>
24 #include <plat/gpmc.h> 24 #include <plat/gpmc.h>
25 #include <plat/nand.h> 25 #include <plat/nand.h>
26 #include <plat/elm.h> 26 #include <plat/elm.h>
27 27
28 #define DRIVER_NAME "omap2-nand" 28 #define DRIVER_NAME "omap2-nand"
29 #define OMAP_NAND_TIMEOUT_MS 5000 29 #define OMAP_NAND_TIMEOUT_MS 5000
30 30
31 #define BCH8_ECC_BYTES (512) 31 #define BCH8_ECC_BYTES (512)
32 #define BCH8_ECC_OOB_BYTES (13) 32 #define BCH8_ECC_OOB_BYTES (13)
33 #define BCH8_ECC_MAX ((BCH8_ECC_BYTES + BCH8_ECC_OOB_BYTES) * 8) 33 #define BCH8_ECC_MAX ((BCH8_ECC_BYTES + BCH8_ECC_OOB_BYTES) * 8)
34 34
35 #define NAND_Ecc_P1e (1 << 0) 35 #define NAND_Ecc_P1e (1 << 0)
36 #define NAND_Ecc_P2e (1 << 1) 36 #define NAND_Ecc_P2e (1 << 1)
37 #define NAND_Ecc_P4e (1 << 2) 37 #define NAND_Ecc_P4e (1 << 2)
38 #define NAND_Ecc_P8e (1 << 3) 38 #define NAND_Ecc_P8e (1 << 3)
39 #define NAND_Ecc_P16e (1 << 4) 39 #define NAND_Ecc_P16e (1 << 4)
40 #define NAND_Ecc_P32e (1 << 5) 40 #define NAND_Ecc_P32e (1 << 5)
41 #define NAND_Ecc_P64e (1 << 6) 41 #define NAND_Ecc_P64e (1 << 6)
42 #define NAND_Ecc_P128e (1 << 7) 42 #define NAND_Ecc_P128e (1 << 7)
43 #define NAND_Ecc_P256e (1 << 8) 43 #define NAND_Ecc_P256e (1 << 8)
44 #define NAND_Ecc_P512e (1 << 9) 44 #define NAND_Ecc_P512e (1 << 9)
45 #define NAND_Ecc_P1024e (1 << 10) 45 #define NAND_Ecc_P1024e (1 << 10)
46 #define NAND_Ecc_P2048e (1 << 11) 46 #define NAND_Ecc_P2048e (1 << 11)
47 47
48 #define NAND_Ecc_P1o (1 << 16) 48 #define NAND_Ecc_P1o (1 << 16)
49 #define NAND_Ecc_P2o (1 << 17) 49 #define NAND_Ecc_P2o (1 << 17)
50 #define NAND_Ecc_P4o (1 << 18) 50 #define NAND_Ecc_P4o (1 << 18)
51 #define NAND_Ecc_P8o (1 << 19) 51 #define NAND_Ecc_P8o (1 << 19)
52 #define NAND_Ecc_P16o (1 << 20) 52 #define NAND_Ecc_P16o (1 << 20)
53 #define NAND_Ecc_P32o (1 << 21) 53 #define NAND_Ecc_P32o (1 << 21)
54 #define NAND_Ecc_P64o (1 << 22) 54 #define NAND_Ecc_P64o (1 << 22)
55 #define NAND_Ecc_P128o (1 << 23) 55 #define NAND_Ecc_P128o (1 << 23)
56 #define NAND_Ecc_P256o (1 << 24) 56 #define NAND_Ecc_P256o (1 << 24)
57 #define NAND_Ecc_P512o (1 << 25) 57 #define NAND_Ecc_P512o (1 << 25)
58 #define NAND_Ecc_P1024o (1 << 26) 58 #define NAND_Ecc_P1024o (1 << 26)
59 #define NAND_Ecc_P2048o (1 << 27) 59 #define NAND_Ecc_P2048o (1 << 27)
60 60
61 #define TF(value) (value ? 1 : 0) 61 #define TF(value) (value ? 1 : 0)
62 62
63 #define P2048e(a) (TF(a & NAND_Ecc_P2048e) << 0) 63 #define P2048e(a) (TF(a & NAND_Ecc_P2048e) << 0)
64 #define P2048o(a) (TF(a & NAND_Ecc_P2048o) << 1) 64 #define P2048o(a) (TF(a & NAND_Ecc_P2048o) << 1)
65 #define P1e(a) (TF(a & NAND_Ecc_P1e) << 2) 65 #define P1e(a) (TF(a & NAND_Ecc_P1e) << 2)
66 #define P1o(a) (TF(a & NAND_Ecc_P1o) << 3) 66 #define P1o(a) (TF(a & NAND_Ecc_P1o) << 3)
67 #define P2e(a) (TF(a & NAND_Ecc_P2e) << 4) 67 #define P2e(a) (TF(a & NAND_Ecc_P2e) << 4)
68 #define P2o(a) (TF(a & NAND_Ecc_P2o) << 5) 68 #define P2o(a) (TF(a & NAND_Ecc_P2o) << 5)
69 #define P4e(a) (TF(a & NAND_Ecc_P4e) << 6) 69 #define P4e(a) (TF(a & NAND_Ecc_P4e) << 6)
70 #define P4o(a) (TF(a & NAND_Ecc_P4o) << 7) 70 #define P4o(a) (TF(a & NAND_Ecc_P4o) << 7)
71 71
72 #define P8e(a) (TF(a & NAND_Ecc_P8e) << 0) 72 #define P8e(a) (TF(a & NAND_Ecc_P8e) << 0)
73 #define P8o(a) (TF(a & NAND_Ecc_P8o) << 1) 73 #define P8o(a) (TF(a & NAND_Ecc_P8o) << 1)
74 #define P16e(a) (TF(a & NAND_Ecc_P16e) << 2) 74 #define P16e(a) (TF(a & NAND_Ecc_P16e) << 2)
75 #define P16o(a) (TF(a & NAND_Ecc_P16o) << 3) 75 #define P16o(a) (TF(a & NAND_Ecc_P16o) << 3)
76 #define P32e(a) (TF(a & NAND_Ecc_P32e) << 4) 76 #define P32e(a) (TF(a & NAND_Ecc_P32e) << 4)
77 #define P32o(a) (TF(a & NAND_Ecc_P32o) << 5) 77 #define P32o(a) (TF(a & NAND_Ecc_P32o) << 5)
78 #define P64e(a) (TF(a & NAND_Ecc_P64e) << 6) 78 #define P64e(a) (TF(a & NAND_Ecc_P64e) << 6)
79 #define P64o(a) (TF(a & NAND_Ecc_P64o) << 7) 79 #define P64o(a) (TF(a & NAND_Ecc_P64o) << 7)
80 80
81 #define P128e(a) (TF(a & NAND_Ecc_P128e) << 0) 81 #define P128e(a) (TF(a & NAND_Ecc_P128e) << 0)
82 #define P128o(a) (TF(a & NAND_Ecc_P128o) << 1) 82 #define P128o(a) (TF(a & NAND_Ecc_P128o) << 1)
83 #define P256e(a) (TF(a & NAND_Ecc_P256e) << 2) 83 #define P256e(a) (TF(a & NAND_Ecc_P256e) << 2)
84 #define P256o(a) (TF(a & NAND_Ecc_P256o) << 3) 84 #define P256o(a) (TF(a & NAND_Ecc_P256o) << 3)
85 #define P512e(a) (TF(a & NAND_Ecc_P512e) << 4) 85 #define P512e(a) (TF(a & NAND_Ecc_P512e) << 4)
86 #define P512o(a) (TF(a & NAND_Ecc_P512o) << 5) 86 #define P512o(a) (TF(a & NAND_Ecc_P512o) << 5)
87 #define P1024e(a) (TF(a & NAND_Ecc_P1024e) << 6) 87 #define P1024e(a) (TF(a & NAND_Ecc_P1024e) << 6)
88 #define P1024o(a) (TF(a & NAND_Ecc_P1024o) << 7) 88 #define P1024o(a) (TF(a & NAND_Ecc_P1024o) << 7)
89 89
90 #define P8e_s(a) (TF(a & NAND_Ecc_P8e) << 0) 90 #define P8e_s(a) (TF(a & NAND_Ecc_P8e) << 0)
91 #define P8o_s(a) (TF(a & NAND_Ecc_P8o) << 1) 91 #define P8o_s(a) (TF(a & NAND_Ecc_P8o) << 1)
92 #define P16e_s(a) (TF(a & NAND_Ecc_P16e) << 2) 92 #define P16e_s(a) (TF(a & NAND_Ecc_P16e) << 2)
93 #define P16o_s(a) (TF(a & NAND_Ecc_P16o) << 3) 93 #define P16o_s(a) (TF(a & NAND_Ecc_P16o) << 3)
94 #define P1e_s(a) (TF(a & NAND_Ecc_P1e) << 4) 94 #define P1e_s(a) (TF(a & NAND_Ecc_P1e) << 4)
95 #define P1o_s(a) (TF(a & NAND_Ecc_P1o) << 5) 95 #define P1o_s(a) (TF(a & NAND_Ecc_P1o) << 5)
96 #define P2e_s(a) (TF(a & NAND_Ecc_P2e) << 6) 96 #define P2e_s(a) (TF(a & NAND_Ecc_P2e) << 6)
97 #define P2o_s(a) (TF(a & NAND_Ecc_P2o) << 7) 97 #define P2o_s(a) (TF(a & NAND_Ecc_P2o) << 7)
98 98
99 #define P4e_s(a) (TF(a & NAND_Ecc_P4e) << 0) 99 #define P4e_s(a) (TF(a & NAND_Ecc_P4e) << 0)
100 #define P4o_s(a) (TF(a & NAND_Ecc_P4o) << 1) 100 #define P4o_s(a) (TF(a & NAND_Ecc_P4o) << 1)
101 101
102 #define MAX_HWECC_BYTES_OOB_64 24 102 #define MAX_HWECC_BYTES_OOB_64 24
103 #define JFFS2_CLEAN_MARKER_OFFSET 0x2 103 #define JFFS2_CLEAN_MARKER_OFFSET 0x2
104 #define BCH_ECC_POS 0x2 104 #define BCH_ECC_POS 0x2
105 #define BCH_JFFS2_CLEAN_MARKER_OFFSET 0x3a 105 #define BCH_JFFS2_CLEAN_MARKER_OFFSET 0x3a
106 106
107 static const char *part_probes[] = { "cmdlinepart", NULL }; 107 static const char *part_probes[] = { "cmdlinepart", NULL };
108 108
109 int decode_bch(int select_4_8, unsigned char *ecc, unsigned int *err_loc); 109 int decode_bch(int select_4_8, unsigned char *ecc, unsigned int *err_loc);
110 110
111 /* oob info generated runtime depending on ecc algorithm and layout selected */ 111 /* oob info generated runtime depending on ecc algorithm and layout selected */
112 static struct nand_ecclayout omap_oobinfo; 112 static struct nand_ecclayout omap_oobinfo;
113 /* Define some generic bad / good block scan pattern which are used 113 /* Define some generic bad / good block scan pattern which are used
114 * while scanning a device for factory marked good / bad blocks 114 * while scanning a device for factory marked good / bad blocks
115 */ 115 */
116 static uint8_t scan_ff_pattern[] = { 0xff }; 116 static uint8_t scan_ff_pattern[] = { 0xff };
117 static struct nand_bbt_descr bb_descrip_flashbased = { 117 static struct nand_bbt_descr bb_descrip_flashbased = {
118 .options = NAND_BBT_SCANEMPTY | NAND_BBT_SCANALLPAGES, 118 .options = NAND_BBT_SCANEMPTY | NAND_BBT_SCANALLPAGES,
119 .offs = 0, 119 .offs = 0,
120 .len = 1, 120 .len = 1,
121 .pattern = scan_ff_pattern, 121 .pattern = scan_ff_pattern,
122 }; 122 };
123 123
124 124
125 struct omap_nand_info { 125 struct omap_nand_info {
126 struct nand_hw_control controller; 126 struct nand_hw_control controller;
127 struct omap_nand_platform_data *pdata; 127 struct omap_nand_platform_data *pdata;
128 struct mtd_info mtd; 128 struct mtd_info mtd;
129 struct mtd_partition *parts; 129 struct mtd_partition *parts;
130 struct nand_chip nand; 130 struct nand_chip nand;
131 struct platform_device *pdev; 131 struct platform_device *pdev;
132 132
133 int gpmc_cs; 133 int gpmc_cs;
134 unsigned long phys_base; 134 unsigned long phys_base;
135 struct completion comp; 135 struct completion comp;
136 int dma_ch; 136 int dma_ch;
137 int gpmc_irq; 137 int gpmc_irq;
138 enum { 138 enum {
139 OMAP_NAND_IO_READ = 0, /* read */ 139 OMAP_NAND_IO_READ = 0, /* read */
140 OMAP_NAND_IO_WRITE, /* write */ 140 OMAP_NAND_IO_WRITE, /* write */
141 } iomode; 141 } iomode;
142 u_char *buf; 142 u_char *buf;
143 int buf_len; 143 int buf_len;
144 int ecc_opt; 144 int ecc_opt;
145 }; 145 };
146 146
147 /** 147 /**
148 * omap_hwcontrol - hardware specific access to control-lines 148 * omap_hwcontrol - hardware specific access to control-lines
149 * @mtd: MTD device structure 149 * @mtd: MTD device structure
150 * @cmd: command to device 150 * @cmd: command to device
151 * @ctrl: 151 * @ctrl:
152 * NAND_NCE: bit 0 -> don't care 152 * NAND_NCE: bit 0 -> don't care
153 * NAND_CLE: bit 1 -> Command Latch 153 * NAND_CLE: bit 1 -> Command Latch
154 * NAND_ALE: bit 2 -> Address Latch 154 * NAND_ALE: bit 2 -> Address Latch
155 * 155 *
156 * NOTE: boards may use different bits for these!! 156 * NOTE: boards may use different bits for these!!
157 */ 157 */
158 static void omap_hwcontrol(struct mtd_info *mtd, int cmd, unsigned int ctrl) 158 static void omap_hwcontrol(struct mtd_info *mtd, int cmd, unsigned int ctrl)
159 { 159 {
160 struct omap_nand_info *info = container_of(mtd, 160 struct omap_nand_info *info = container_of(mtd,
161 struct omap_nand_info, mtd); 161 struct omap_nand_info, mtd);
162 162
163 if (cmd != NAND_CMD_NONE) { 163 if (cmd != NAND_CMD_NONE) {
164 if (ctrl & NAND_CLE) 164 if (ctrl & NAND_CLE)
165 gpmc_nand_write(info->gpmc_cs, GPMC_NAND_COMMAND, cmd); 165 gpmc_nand_write(info->gpmc_cs, GPMC_NAND_COMMAND, cmd);
166 166
167 else if (ctrl & NAND_ALE) 167 else if (ctrl & NAND_ALE)
168 gpmc_nand_write(info->gpmc_cs, GPMC_NAND_ADDRESS, cmd); 168 gpmc_nand_write(info->gpmc_cs, GPMC_NAND_ADDRESS, cmd);
169 169
170 else /* NAND_NCE */ 170 else /* NAND_NCE */
171 gpmc_nand_write(info->gpmc_cs, GPMC_NAND_DATA, cmd); 171 gpmc_nand_write(info->gpmc_cs, GPMC_NAND_DATA, cmd);
172 } 172 }
173 } 173 }
174 174
175 /** 175 /**
176 * omap_read_buf8 - read data from NAND controller into buffer 176 * omap_read_buf8 - read data from NAND controller into buffer
177 * @mtd: MTD device structure 177 * @mtd: MTD device structure
178 * @buf: buffer to store date 178 * @buf: buffer to store date
179 * @len: number of bytes to read 179 * @len: number of bytes to read
180 */ 180 */
181 static void omap_read_buf8(struct mtd_info *mtd, u_char *buf, int len) 181 static void omap_read_buf8(struct mtd_info *mtd, u_char *buf, int len)
182 { 182 {
183 struct nand_chip *nand = mtd->priv; 183 struct nand_chip *nand = mtd->priv;
184 184
185 ioread8_rep(nand->IO_ADDR_R, buf, len); 185 ioread8_rep(nand->IO_ADDR_R, buf, len);
186 } 186 }
187 187
188 /** 188 /**
189 * omap_write_buf8 - write buffer to NAND controller 189 * omap_write_buf8 - write buffer to NAND controller
190 * @mtd: MTD device structure 190 * @mtd: MTD device structure
191 * @buf: data buffer 191 * @buf: data buffer
192 * @len: number of bytes to write 192 * @len: number of bytes to write
193 */ 193 */
194 static void omap_write_buf8(struct mtd_info *mtd, const u_char *buf, int len) 194 static void omap_write_buf8(struct mtd_info *mtd, const u_char *buf, int len)
195 { 195 {
196 struct omap_nand_info *info = container_of(mtd, 196 struct omap_nand_info *info = container_of(mtd,
197 struct omap_nand_info, mtd); 197 struct omap_nand_info, mtd);
198 u_char *p = (u_char *)buf; 198 u_char *p = (u_char *)buf;
199 u32 status = 0; 199 u32 status = 0;
200 200
201 while (len--) { 201 while (len--) {
202 iowrite8(*p++, info->nand.IO_ADDR_W); 202 iowrite8(*p++, info->nand.IO_ADDR_W);
203 /* wait until buffer is available for write */ 203 /* wait until buffer is available for write */
204 do { 204 do {
205 status = gpmc_read_status(GPMC_STATUS_BUFFER); 205 status = gpmc_read_status(GPMC_STATUS_BUFFER);
206 } while (!status); 206 } while (!status);
207 } 207 }
208 } 208 }
209 209
210 /** 210 /**
211 * omap_read_buf16 - read data from NAND controller into buffer 211 * omap_read_buf16 - read data from NAND controller into buffer
212 * @mtd: MTD device structure 212 * @mtd: MTD device structure
213 * @buf: buffer to store date 213 * @buf: buffer to store date
214 * @len: number of bytes to read 214 * @len: number of bytes to read
215 */ 215 */
216 static void omap_read_buf16(struct mtd_info *mtd, u_char *buf, int len) 216 static void omap_read_buf16(struct mtd_info *mtd, u_char *buf, int len)
217 { 217 {
218 struct nand_chip *nand = mtd->priv; 218 struct nand_chip *nand = mtd->priv;
219 219
220 ioread16_rep(nand->IO_ADDR_R, buf, len / 2); 220 ioread16_rep(nand->IO_ADDR_R, buf, len / 2);
221 } 221 }
222 222
223 /** 223 /**
224 * omap_write_buf16 - write buffer to NAND controller 224 * omap_write_buf16 - write buffer to NAND controller
225 * @mtd: MTD device structure 225 * @mtd: MTD device structure
226 * @buf: data buffer 226 * @buf: data buffer
227 * @len: number of bytes to write 227 * @len: number of bytes to write
228 */ 228 */
229 static void omap_write_buf16(struct mtd_info *mtd, const u_char * buf, int len) 229 static void omap_write_buf16(struct mtd_info *mtd, const u_char * buf, int len)
230 { 230 {
231 struct omap_nand_info *info = container_of(mtd, 231 struct omap_nand_info *info = container_of(mtd,
232 struct omap_nand_info, mtd); 232 struct omap_nand_info, mtd);
233 u16 *p = (u16 *) buf; 233 u16 *p = (u16 *) buf;
234 u32 status = 0; 234 u32 status = 0;
235 /* FIXME try bursts of writesw() or DMA ... */ 235 /* FIXME try bursts of writesw() or DMA ... */
236 len >>= 1; 236 len >>= 1;
237 237
238 while (len--) { 238 while (len--) {
239 iowrite16(*p++, info->nand.IO_ADDR_W); 239 iowrite16(*p++, info->nand.IO_ADDR_W);
240 /* wait until buffer is available for write */ 240 /* wait until buffer is available for write */
241 do { 241 do {
242 status = gpmc_read_status(GPMC_STATUS_BUFFER); 242 status = gpmc_read_status(GPMC_STATUS_BUFFER);
243 } while (!status); 243 } while (!status);
244 } 244 }
245 } 245 }
246 246
247 /** 247 /**
248 * omap_read_buf_pref - read data from NAND controller into buffer 248 * omap_read_buf_pref - read data from NAND controller into buffer
249 * @mtd: MTD device structure 249 * @mtd: MTD device structure
250 * @buf: buffer to store date 250 * @buf: buffer to store date
251 * @len: number of bytes to read 251 * @len: number of bytes to read
252 */ 252 */
253 static void omap_read_buf_pref(struct mtd_info *mtd, u_char *buf, int len) 253 static void omap_read_buf_pref(struct mtd_info *mtd, u_char *buf, int len)
254 { 254 {
255 struct omap_nand_info *info = container_of(mtd, 255 struct omap_nand_info *info = container_of(mtd,
256 struct omap_nand_info, mtd); 256 struct omap_nand_info, mtd);
257 uint32_t r_count = 0; 257 uint32_t r_count = 0;
258 int ret = 0; 258 int ret = 0;
259 u32 *p = (u32 *)buf; 259 u32 *p = (u32 *)buf;
260 260
261 /* take care of subpage reads */ 261 /* take care of subpage reads */
262 if (len % 4) { 262 if (len % 4) {
263 if (info->nand.options & NAND_BUSWIDTH_16) 263 if (info->nand.options & NAND_BUSWIDTH_16)
264 omap_read_buf16(mtd, buf, len % 4); 264 omap_read_buf16(mtd, buf, len % 4);
265 else 265 else
266 omap_read_buf8(mtd, buf, len % 4); 266 omap_read_buf8(mtd, buf, len % 4);
267 p = (u32 *) (buf + len % 4); 267 p = (u32 *) (buf + len % 4);
268 len -= len % 4; 268 len -= len % 4;
269 } 269 }
270 270
271 /* configure and start prefetch transfer */ 271 /* configure and start prefetch transfer */
272 ret = gpmc_prefetch_enable(info->gpmc_cs, 272 ret = gpmc_prefetch_enable(info->gpmc_cs,
273 PREFETCH_FIFOTHRESHOLD_MAX, 0x0, len, 0x0); 273 PREFETCH_FIFOTHRESHOLD_MAX, 0x0, len, 0x0);
274 if (ret) { 274 if (ret) {
275 /* PFPW engine is busy, use cpu copy method */ 275 /* PFPW engine is busy, use cpu copy method */
276 if (info->nand.options & NAND_BUSWIDTH_16) 276 if (info->nand.options & NAND_BUSWIDTH_16)
277 omap_read_buf16(mtd, (u_char *)p, len); 277 omap_read_buf16(mtd, (u_char *)p, len);
278 else 278 else
279 omap_read_buf8(mtd, (u_char *)p, len); 279 omap_read_buf8(mtd, (u_char *)p, len);
280 } else { 280 } else {
281 do { 281 do {
282 r_count = gpmc_read_status(GPMC_PREFETCH_FIFO_CNT); 282 r_count = gpmc_read_status(GPMC_PREFETCH_FIFO_CNT);
283 r_count = r_count >> 2; 283 r_count = r_count >> 2;
284 ioread32_rep(info->nand.IO_ADDR_R, p, r_count); 284 ioread32_rep(info->nand.IO_ADDR_R, p, r_count);
285 p += r_count; 285 p += r_count;
286 len -= r_count << 2; 286 len -= r_count << 2;
287 } while (len); 287 } while (len);
288 /* disable and stop the PFPW engine */ 288 /* disable and stop the PFPW engine */
289 gpmc_prefetch_reset(info->gpmc_cs); 289 gpmc_prefetch_reset(info->gpmc_cs);
290 } 290 }
291 } 291 }
292 292
293 /** 293 /**
294 * omap_write_buf_pref - write buffer to NAND controller 294 * omap_write_buf_pref - write buffer to NAND controller
295 * @mtd: MTD device structure 295 * @mtd: MTD device structure
296 * @buf: data buffer 296 * @buf: data buffer
297 * @len: number of bytes to write 297 * @len: number of bytes to write
298 */ 298 */
299 static void omap_write_buf_pref(struct mtd_info *mtd, 299 static void omap_write_buf_pref(struct mtd_info *mtd,
300 const u_char *buf, int len) 300 const u_char *buf, int len)
301 { 301 {
302 struct omap_nand_info *info = container_of(mtd, 302 struct omap_nand_info *info = container_of(mtd,
303 struct omap_nand_info, mtd); 303 struct omap_nand_info, mtd);
304 uint32_t w_count = 0; 304 uint32_t w_count = 0;
305 int i = 0, ret = 0; 305 int i = 0, ret = 0;
306 u16 *p = (u16 *)buf; 306 u16 *p = (u16 *)buf;
307 unsigned long tim, limit; 307 unsigned long tim, limit;
308 308
309 /* take care of subpage writes */ 309 /* take care of subpage writes */
310 if (len % 2 != 0) { 310 if (len % 2 != 0) {
311 writeb(*buf, info->nand.IO_ADDR_W); 311 writeb(*buf, info->nand.IO_ADDR_W);
312 p = (u16 *)(buf + 1); 312 p = (u16 *)(buf + 1);
313 len--; 313 len--;
314 } 314 }
315 315
316 /* configure and start prefetch transfer */ 316 /* configure and start prefetch transfer */
317 ret = gpmc_prefetch_enable(info->gpmc_cs, 317 ret = gpmc_prefetch_enable(info->gpmc_cs,
318 PREFETCH_FIFOTHRESHOLD_MAX, 0x0, len, 0x1); 318 PREFETCH_FIFOTHRESHOLD_MAX, 0x0, len, 0x1);
319 if (ret) { 319 if (ret) {
320 /* PFPW engine is busy, use cpu copy method */ 320 /* PFPW engine is busy, use cpu copy method */
321 if (info->nand.options & NAND_BUSWIDTH_16) 321 if (info->nand.options & NAND_BUSWIDTH_16)
322 omap_write_buf16(mtd, (u_char *)p, len); 322 omap_write_buf16(mtd, (u_char *)p, len);
323 else 323 else
324 omap_write_buf8(mtd, (u_char *)p, len); 324 omap_write_buf8(mtd, (u_char *)p, len);
325 } else { 325 } else {
326 while (len) { 326 while (len) {
327 w_count = gpmc_read_status(GPMC_PREFETCH_FIFO_CNT); 327 w_count = gpmc_read_status(GPMC_PREFETCH_FIFO_CNT);
328 w_count = w_count >> 1; 328 w_count = w_count >> 1;
329 for (i = 0; (i < w_count) && len; i++, len -= 2) 329 for (i = 0; (i < w_count) && len; i++, len -= 2)
330 iowrite16(*p++, info->nand.IO_ADDR_W); 330 iowrite16(*p++, info->nand.IO_ADDR_W);
331 } 331 }
332 /* wait for data to flushed-out before reset the prefetch */ 332 /* wait for data to flushed-out before reset the prefetch */
333 tim = 0; 333 tim = 0;
334 limit = (loops_per_jiffy * 334 limit = (loops_per_jiffy *
335 msecs_to_jiffies(OMAP_NAND_TIMEOUT_MS)); 335 msecs_to_jiffies(OMAP_NAND_TIMEOUT_MS));
336 while (gpmc_read_status(GPMC_PREFETCH_COUNT) && (tim++ < limit)) 336 while (gpmc_read_status(GPMC_PREFETCH_COUNT) && (tim++ < limit))
337 cpu_relax(); 337 cpu_relax();
338 338
339 /* disable and stop the PFPW engine */ 339 /* disable and stop the PFPW engine */
340 gpmc_prefetch_reset(info->gpmc_cs); 340 gpmc_prefetch_reset(info->gpmc_cs);
341 } 341 }
342 } 342 }
343 343
344 /* 344 /*
345 * omap_nand_dma_cb: callback on the completion of dma transfer 345 * omap_nand_dma_cb: callback on the completion of dma transfer
346 * @lch: logical channel 346 * @lch: logical channel
347 * @ch_satuts: channel status 347 * @ch_satuts: channel status
348 * @data: pointer to completion data structure 348 * @data: pointer to completion data structure
349 */ 349 */
350 static void omap_nand_dma_cb(int lch, u16 ch_status, void *data) 350 static void omap_nand_dma_cb(int lch, u16 ch_status, void *data)
351 { 351 {
352 complete((struct completion *) data); 352 complete((struct completion *) data);
353 } 353 }
354 354
355 /* 355 /*
356 * omap_nand_dma_transfer: configer and start dma transfer 356 * omap_nand_dma_transfer: configer and start dma transfer
357 * @mtd: MTD device structure 357 * @mtd: MTD device structure
358 * @addr: virtual address in RAM of source/destination 358 * @addr: virtual address in RAM of source/destination
359 * @len: number of data bytes to be transferred 359 * @len: number of data bytes to be transferred
360 * @is_write: flag for read/write operation 360 * @is_write: flag for read/write operation
361 */ 361 */
362 static inline int omap_nand_dma_transfer(struct mtd_info *mtd, void *addr, 362 static inline int omap_nand_dma_transfer(struct mtd_info *mtd, void *addr,
363 unsigned int len, int is_write) 363 unsigned int len, int is_write)
364 { 364 {
365 struct omap_nand_info *info = container_of(mtd, 365 struct omap_nand_info *info = container_of(mtd,
366 struct omap_nand_info, mtd); 366 struct omap_nand_info, mtd);
367 enum dma_data_direction dir = is_write ? DMA_TO_DEVICE : 367 enum dma_data_direction dir = is_write ? DMA_TO_DEVICE :
368 DMA_FROM_DEVICE; 368 DMA_FROM_DEVICE;
369 dma_addr_t dma_addr; 369 dma_addr_t dma_addr;
370 int ret; 370 int ret;
371 unsigned long tim, limit; 371 unsigned long tim, limit;
372 372
373 /* The fifo depth is 64 bytes max. 373 /* The fifo depth is 64 bytes max.
374 * But configure the FIFO-threahold to 32 to get a sync at each frame 374 * But configure the FIFO-threahold to 32 to get a sync at each frame
375 * and frame length is 32 bytes. 375 * and frame length is 32 bytes.
376 */ 376 */
377 int buf_len = len >> 6; 377 int buf_len = len >> 6;
378 378
379 if (addr >= high_memory) { 379 if (addr >= high_memory) {
380 struct page *p1; 380 struct page *p1;
381 381
382 if (((size_t)addr & PAGE_MASK) != 382 if (((size_t)addr & PAGE_MASK) !=
383 ((size_t)(addr + len - 1) & PAGE_MASK)) 383 ((size_t)(addr + len - 1) & PAGE_MASK))
384 goto out_copy; 384 goto out_copy;
385 p1 = vmalloc_to_page(addr); 385 p1 = vmalloc_to_page(addr);
386 if (!p1) 386 if (!p1)
387 goto out_copy; 387 goto out_copy;
388 addr = page_address(p1) + ((size_t)addr & ~PAGE_MASK); 388 addr = page_address(p1) + ((size_t)addr & ~PAGE_MASK);
389 } 389 }
390 390
391 dma_addr = dma_map_single(&info->pdev->dev, addr, len, dir); 391 dma_addr = dma_map_single(&info->pdev->dev, addr, len, dir);
392 if (dma_mapping_error(&info->pdev->dev, dma_addr)) { 392 if (dma_mapping_error(&info->pdev->dev, dma_addr)) {
393 dev_err(&info->pdev->dev, 393 dev_err(&info->pdev->dev,
394 "Couldn't DMA map a %d byte buffer\n", len); 394 "Couldn't DMA map a %d byte buffer\n", len);
395 goto out_copy; 395 goto out_copy;
396 } 396 }
397 397
398 if (is_write) { 398 if (is_write) {
399 omap_set_dma_dest_params(info->dma_ch, 0, OMAP_DMA_AMODE_CONSTANT, 399 omap_set_dma_dest_params(info->dma_ch, 0, OMAP_DMA_AMODE_CONSTANT,
400 info->phys_base, 0, 0); 400 info->phys_base, 0, 0);
401 omap_set_dma_src_params(info->dma_ch, 0, OMAP_DMA_AMODE_POST_INC, 401 omap_set_dma_src_params(info->dma_ch, 0, OMAP_DMA_AMODE_POST_INC,
402 dma_addr, 0, 0); 402 dma_addr, 0, 0);
403 omap_set_dma_transfer_params(info->dma_ch, OMAP_DMA_DATA_TYPE_S32, 403 omap_set_dma_transfer_params(info->dma_ch, OMAP_DMA_DATA_TYPE_S32,
404 0x10, buf_len, OMAP_DMA_SYNC_FRAME, 404 0x10, buf_len, OMAP_DMA_SYNC_FRAME,
405 OMAP24XX_DMA_GPMC, OMAP_DMA_DST_SYNC); 405 OMAP24XX_DMA_GPMC, OMAP_DMA_DST_SYNC);
406 } else { 406 } else {
407 omap_set_dma_src_params(info->dma_ch, 0, OMAP_DMA_AMODE_CONSTANT, 407 omap_set_dma_src_params(info->dma_ch, 0, OMAP_DMA_AMODE_CONSTANT,
408 info->phys_base, 0, 0); 408 info->phys_base, 0, 0);
409 omap_set_dma_dest_params(info->dma_ch, 0, OMAP_DMA_AMODE_POST_INC, 409 omap_set_dma_dest_params(info->dma_ch, 0, OMAP_DMA_AMODE_POST_INC,
410 dma_addr, 0, 0); 410 dma_addr, 0, 0);
411 omap_set_dma_transfer_params(info->dma_ch, OMAP_DMA_DATA_TYPE_S32, 411 omap_set_dma_transfer_params(info->dma_ch, OMAP_DMA_DATA_TYPE_S32,
412 0x10, buf_len, OMAP_DMA_SYNC_FRAME, 412 0x10, buf_len, OMAP_DMA_SYNC_FRAME,
413 OMAP24XX_DMA_GPMC, OMAP_DMA_SRC_SYNC); 413 OMAP24XX_DMA_GPMC, OMAP_DMA_SRC_SYNC);
414 } 414 }
415 /* configure and start prefetch transfer */ 415 /* configure and start prefetch transfer */
416 ret = gpmc_prefetch_enable(info->gpmc_cs, 416 ret = gpmc_prefetch_enable(info->gpmc_cs,
417 PREFETCH_FIFOTHRESHOLD_MAX, 0x1, len, is_write); 417 PREFETCH_FIFOTHRESHOLD_MAX, 0x1, len, is_write);
418 if (ret) 418 if (ret)
419 /* PFPW engine is busy, use cpu copy method */ 419 /* PFPW engine is busy, use cpu copy method */
420 goto out_copy; 420 goto out_copy;
421 421
422 init_completion(&info->comp); 422 init_completion(&info->comp);
423 423
424 omap_start_dma(info->dma_ch); 424 omap_start_dma(info->dma_ch);
425 425
426 /* setup and start DMA using dma_addr */ 426 /* setup and start DMA using dma_addr */
427 wait_for_completion(&info->comp); 427 wait_for_completion(&info->comp);
428 tim = 0; 428 tim = 0;
429 limit = (loops_per_jiffy * msecs_to_jiffies(OMAP_NAND_TIMEOUT_MS)); 429 limit = (loops_per_jiffy * msecs_to_jiffies(OMAP_NAND_TIMEOUT_MS));
430 while (gpmc_read_status(GPMC_PREFETCH_COUNT) && (tim++ < limit)) 430 while (gpmc_read_status(GPMC_PREFETCH_COUNT) && (tim++ < limit))
431 cpu_relax(); 431 cpu_relax();
432 432
433 /* disable and stop the PFPW engine */ 433 /* disable and stop the PFPW engine */
434 gpmc_prefetch_reset(info->gpmc_cs); 434 gpmc_prefetch_reset(info->gpmc_cs);
435 435
436 dma_unmap_single(&info->pdev->dev, dma_addr, len, dir); 436 dma_unmap_single(&info->pdev->dev, dma_addr, len, dir);
437 return 0; 437 return 0;
438 438
439 out_copy: 439 out_copy:
440 if (info->nand.options & NAND_BUSWIDTH_16) 440 if (info->nand.options & NAND_BUSWIDTH_16)
441 is_write == 0 ? omap_read_buf16(mtd, (u_char *) addr, len) 441 is_write == 0 ? omap_read_buf16(mtd, (u_char *) addr, len)
442 : omap_write_buf16(mtd, (u_char *) addr, len); 442 : omap_write_buf16(mtd, (u_char *) addr, len);
443 else 443 else
444 is_write == 0 ? omap_read_buf8(mtd, (u_char *) addr, len) 444 is_write == 0 ? omap_read_buf8(mtd, (u_char *) addr, len)
445 : omap_write_buf8(mtd, (u_char *) addr, len); 445 : omap_write_buf8(mtd, (u_char *) addr, len);
446 return 0; 446 return 0;
447 } 447 }
448 448
449 /** 449 /**
450 * omap_read_buf_dma_pref - read data from NAND controller into buffer 450 * omap_read_buf_dma_pref - read data from NAND controller into buffer
451 * @mtd: MTD device structure 451 * @mtd: MTD device structure
452 * @buf: buffer to store date 452 * @buf: buffer to store date
453 * @len: number of bytes to read 453 * @len: number of bytes to read
454 */ 454 */
455 static void omap_read_buf_dma_pref(struct mtd_info *mtd, u_char *buf, int len) 455 static void omap_read_buf_dma_pref(struct mtd_info *mtd, u_char *buf, int len)
456 { 456 {
457 if (len <= mtd->oobsize) 457 if (len <= mtd->oobsize)
458 omap_read_buf_pref(mtd, buf, len); 458 omap_read_buf_pref(mtd, buf, len);
459 else 459 else
460 /* start transfer in DMA mode */ 460 /* start transfer in DMA mode */
461 omap_nand_dma_transfer(mtd, buf, len, 0x0); 461 omap_nand_dma_transfer(mtd, buf, len, 0x0);
462 } 462 }
463 463
464 /** 464 /**
465 * omap_write_buf_dma_pref - write buffer to NAND controller 465 * omap_write_buf_dma_pref - write buffer to NAND controller
466 * @mtd: MTD device structure 466 * @mtd: MTD device structure
467 * @buf: data buffer 467 * @buf: data buffer
468 * @len: number of bytes to write 468 * @len: number of bytes to write
469 */ 469 */
470 static void omap_write_buf_dma_pref(struct mtd_info *mtd, 470 static void omap_write_buf_dma_pref(struct mtd_info *mtd,
471 const u_char *buf, int len) 471 const u_char *buf, int len)
472 { 472 {
473 if (len <= mtd->oobsize) 473 if (len <= mtd->oobsize)
474 omap_write_buf_pref(mtd, buf, len); 474 omap_write_buf_pref(mtd, buf, len);
475 else 475 else
476 /* start transfer in DMA mode */ 476 /* start transfer in DMA mode */
477 omap_nand_dma_transfer(mtd, (u_char *) buf, len, 0x1); 477 omap_nand_dma_transfer(mtd, (u_char *) buf, len, 0x1);
478 } 478 }
479 479
480 /* 480 /*
481 * omap_nand_irq - GMPC irq handler 481 * omap_nand_irq - GMPC irq handler
482 * @this_irq: gpmc irq number 482 * @this_irq: gpmc irq number
483 * @dev: omap_nand_info structure pointer is passed here 483 * @dev: omap_nand_info structure pointer is passed here
484 */ 484 */
485 static irqreturn_t omap_nand_irq(int this_irq, void *dev) 485 static irqreturn_t omap_nand_irq(int this_irq, void *dev)
486 { 486 {
487 struct omap_nand_info *info = (struct omap_nand_info *) dev; 487 struct omap_nand_info *info = (struct omap_nand_info *) dev;
488 u32 bytes; 488 u32 bytes;
489 u32 irq_stat; 489 u32 irq_stat;
490 490
491 irq_stat = gpmc_read_status(GPMC_GET_IRQ_STATUS); 491 irq_stat = gpmc_read_status(GPMC_GET_IRQ_STATUS);
492 bytes = gpmc_read_status(GPMC_PREFETCH_FIFO_CNT); 492 bytes = gpmc_read_status(GPMC_PREFETCH_FIFO_CNT);
493 bytes = bytes & 0xFFFC; /* io in multiple of 4 bytes */ 493 bytes = bytes & 0xFFFC; /* io in multiple of 4 bytes */
494 if (info->iomode == OMAP_NAND_IO_WRITE) { /* checks for write io */ 494 if (info->iomode == OMAP_NAND_IO_WRITE) { /* checks for write io */
495 if (irq_stat & 0x2) 495 if (irq_stat & 0x2)
496 goto done; 496 goto done;
497 497
498 if (info->buf_len && (info->buf_len < bytes)) 498 if (info->buf_len && (info->buf_len < bytes))
499 bytes = info->buf_len; 499 bytes = info->buf_len;
500 else if (!info->buf_len) 500 else if (!info->buf_len)
501 bytes = 0; 501 bytes = 0;
502 iowrite32_rep(info->nand.IO_ADDR_W, 502 iowrite32_rep(info->nand.IO_ADDR_W,
503 (u32 *)info->buf, bytes >> 2); 503 (u32 *)info->buf, bytes >> 2);
504 info->buf = info->buf + bytes; 504 info->buf = info->buf + bytes;
505 info->buf_len -= bytes; 505 info->buf_len -= bytes;
506 506
507 } else { 507 } else {
508 ioread32_rep(info->nand.IO_ADDR_R, 508 ioread32_rep(info->nand.IO_ADDR_R,
509 (u32 *)info->buf, bytes >> 2); 509 (u32 *)info->buf, bytes >> 2);
510 info->buf = info->buf + bytes; 510 info->buf = info->buf + bytes;
511 511
512 if (irq_stat & 0x2) 512 if (irq_stat & 0x2)
513 goto done; 513 goto done;
514 } 514 }
515 gpmc_cs_configure(info->gpmc_cs, GPMC_SET_IRQ_STATUS, irq_stat); 515 gpmc_cs_configure(info->gpmc_cs, GPMC_SET_IRQ_STATUS, irq_stat);
516 516
517 return IRQ_HANDLED; 517 return IRQ_HANDLED;
518 518
519 done: 519 done:
520 complete(&info->comp); 520 complete(&info->comp);
521 /* disable irq */ 521 /* disable irq */
522 gpmc_cs_configure(info->gpmc_cs, GPMC_ENABLE_IRQ, 0); 522 gpmc_cs_configure(info->gpmc_cs, GPMC_ENABLE_IRQ, 0);
523 523
524 /* clear status */ 524 /* clear status */
525 gpmc_cs_configure(info->gpmc_cs, GPMC_SET_IRQ_STATUS, irq_stat); 525 gpmc_cs_configure(info->gpmc_cs, GPMC_SET_IRQ_STATUS, irq_stat);
526 526
527 return IRQ_HANDLED; 527 return IRQ_HANDLED;
528 } 528 }
529 529
530 /* 530 /*
531 * omap_read_buf_irq_pref - read data from NAND controller into buffer 531 * omap_read_buf_irq_pref - read data from NAND controller into buffer
532 * @mtd: MTD device structure 532 * @mtd: MTD device structure
533 * @buf: buffer to store date 533 * @buf: buffer to store date
534 * @len: number of bytes to read 534 * @len: number of bytes to read
535 */ 535 */
536 static void omap_read_buf_irq_pref(struct mtd_info *mtd, u_char *buf, int len) 536 static void omap_read_buf_irq_pref(struct mtd_info *mtd, u_char *buf, int len)
537 { 537 {
538 struct omap_nand_info *info = container_of(mtd, 538 struct omap_nand_info *info = container_of(mtd,
539 struct omap_nand_info, mtd); 539 struct omap_nand_info, mtd);
540 int ret = 0; 540 int ret = 0;
541 if (len <= mtd->oobsize) { 541 if (len <= mtd->oobsize) {
542 omap_read_buf_pref(mtd, buf, len); 542 omap_read_buf_pref(mtd, buf, len);
543 return; 543 return;
544 } 544 }
545 545
546 info->iomode = OMAP_NAND_IO_READ; 546 info->iomode = OMAP_NAND_IO_READ;
547 info->buf = buf; 547 info->buf = buf;
548 init_completion(&info->comp); 548 init_completion(&info->comp);
549 549
550 /* configure and start prefetch transfer */ 550 /* configure and start prefetch transfer */
551 ret = gpmc_prefetch_enable(info->gpmc_cs, 551 ret = gpmc_prefetch_enable(info->gpmc_cs,
552 PREFETCH_FIFOTHRESHOLD_MAX/2, 0x0, len, 0x0); 552 PREFETCH_FIFOTHRESHOLD_MAX/2, 0x0, len, 0x0);
553 if (ret) 553 if (ret)
554 /* PFPW engine is busy, use cpu copy method */ 554 /* PFPW engine is busy, use cpu copy method */
555 goto out_copy; 555 goto out_copy;
556 556
557 info->buf_len = len; 557 info->buf_len = len;
558 /* enable irq */ 558 /* enable irq */
559 gpmc_cs_configure(info->gpmc_cs, GPMC_ENABLE_IRQ, 559 gpmc_cs_configure(info->gpmc_cs, GPMC_ENABLE_IRQ,
560 (GPMC_IRQ_FIFOEVENTENABLE | GPMC_IRQ_COUNT_EVENT)); 560 (GPMC_IRQ_FIFOEVENTENABLE | GPMC_IRQ_COUNT_EVENT));
561 561
562 /* waiting for read to complete */ 562 /* waiting for read to complete */
563 wait_for_completion(&info->comp); 563 wait_for_completion(&info->comp);
564 564
565 /* disable and stop the PFPW engine */ 565 /* disable and stop the PFPW engine */
566 gpmc_prefetch_reset(info->gpmc_cs); 566 gpmc_prefetch_reset(info->gpmc_cs);
567 return; 567 return;
568 568
569 out_copy: 569 out_copy:
570 if (info->nand.options & NAND_BUSWIDTH_16) 570 if (info->nand.options & NAND_BUSWIDTH_16)
571 omap_read_buf16(mtd, buf, len); 571 omap_read_buf16(mtd, buf, len);
572 else 572 else
573 omap_read_buf8(mtd, buf, len); 573 omap_read_buf8(mtd, buf, len);
574 } 574 }
575 575
576 /* 576 /*
577 * omap_write_buf_irq_pref - write buffer to NAND controller 577 * omap_write_buf_irq_pref - write buffer to NAND controller
578 * @mtd: MTD device structure 578 * @mtd: MTD device structure
579 * @buf: data buffer 579 * @buf: data buffer
580 * @len: number of bytes to write 580 * @len: number of bytes to write
581 */ 581 */
582 static void omap_write_buf_irq_pref(struct mtd_info *mtd, 582 static void omap_write_buf_irq_pref(struct mtd_info *mtd,
583 const u_char *buf, int len) 583 const u_char *buf, int len)
584 { 584 {
585 struct omap_nand_info *info = container_of(mtd, 585 struct omap_nand_info *info = container_of(mtd,
586 struct omap_nand_info, mtd); 586 struct omap_nand_info, mtd);
587 int ret = 0; 587 int ret = 0;
588 unsigned long tim, limit; 588 unsigned long tim, limit;
589 589
590 if (len <= mtd->oobsize) { 590 if (len <= mtd->oobsize) {
591 omap_write_buf_pref(mtd, buf, len); 591 omap_write_buf_pref(mtd, buf, len);
592 return; 592 return;
593 } 593 }
594 594
595 info->iomode = OMAP_NAND_IO_WRITE; 595 info->iomode = OMAP_NAND_IO_WRITE;
596 info->buf = (u_char *) buf; 596 info->buf = (u_char *) buf;
597 init_completion(&info->comp); 597 init_completion(&info->comp);
598 598
599 /* configure and start prefetch transfer : size=24 */ 599 /* configure and start prefetch transfer : size=24 */
600 ret = gpmc_prefetch_enable(info->gpmc_cs, 600 ret = gpmc_prefetch_enable(info->gpmc_cs,
601 (PREFETCH_FIFOTHRESHOLD_MAX * 3) / 8, 0x0, len, 0x1); 601 (PREFETCH_FIFOTHRESHOLD_MAX * 3) / 8, 0x0, len, 0x1);
602 if (ret) 602 if (ret)
603 /* PFPW engine is busy, use cpu copy method */ 603 /* PFPW engine is busy, use cpu copy method */
604 goto out_copy; 604 goto out_copy;
605 605
606 info->buf_len = len; 606 info->buf_len = len;
607 /* enable irq */ 607 /* enable irq */
608 gpmc_cs_configure(info->gpmc_cs, GPMC_ENABLE_IRQ, 608 gpmc_cs_configure(info->gpmc_cs, GPMC_ENABLE_IRQ,
609 (GPMC_IRQ_FIFOEVENTENABLE | GPMC_IRQ_COUNT_EVENT)); 609 (GPMC_IRQ_FIFOEVENTENABLE | GPMC_IRQ_COUNT_EVENT));
610 610
611 /* waiting for write to complete */ 611 /* waiting for write to complete */
612 wait_for_completion(&info->comp); 612 wait_for_completion(&info->comp);
613 /* wait for data to flushed-out before reset the prefetch */ 613 /* wait for data to flushed-out before reset the prefetch */
614 tim = 0; 614 tim = 0;
615 limit = (loops_per_jiffy * msecs_to_jiffies(OMAP_NAND_TIMEOUT_MS)); 615 limit = (loops_per_jiffy * msecs_to_jiffies(OMAP_NAND_TIMEOUT_MS));
616 while (gpmc_read_status(GPMC_PREFETCH_COUNT) && (tim++ < limit)) 616 while (gpmc_read_status(GPMC_PREFETCH_COUNT) && (tim++ < limit))
617 cpu_relax(); 617 cpu_relax();
618 618
619 /* disable and stop the PFPW engine */ 619 /* disable and stop the PFPW engine */
620 gpmc_prefetch_reset(info->gpmc_cs); 620 gpmc_prefetch_reset(info->gpmc_cs);
621 return; 621 return;
622 622
623 out_copy: 623 out_copy:
624 if (info->nand.options & NAND_BUSWIDTH_16) 624 if (info->nand.options & NAND_BUSWIDTH_16)
625 omap_write_buf16(mtd, buf, len); 625 omap_write_buf16(mtd, buf, len);
626 else 626 else
627 omap_write_buf8(mtd, buf, len); 627 omap_write_buf8(mtd, buf, len);
628 } 628 }
629 629
630 /** 630 /**
631 * omap_verify_buf - Verify chip data against buffer 631 * omap_verify_buf - Verify chip data against buffer
632 * @mtd: MTD device structure 632 * @mtd: MTD device structure
633 * @buf: buffer containing the data to compare 633 * @buf: buffer containing the data to compare
634 * @len: number of bytes to compare 634 * @len: number of bytes to compare
635 */ 635 */
636 static int omap_verify_buf(struct mtd_info *mtd, const u_char * buf, int len) 636 static int omap_verify_buf(struct mtd_info *mtd, const u_char * buf, int len)
637 { 637 {
638 struct omap_nand_info *info = container_of(mtd, struct omap_nand_info, 638 struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
639 mtd); 639 mtd);
640 u16 *p = (u16 *) buf; 640 u16 *p = (u16 *) buf;
641 641
642 len >>= 1; 642 len >>= 1;
643 while (len--) { 643 while (len--) {
644 if (*p++ != cpu_to_le16(readw(info->nand.IO_ADDR_R))) 644 if (*p++ != cpu_to_le16(readw(info->nand.IO_ADDR_R)))
645 return -EFAULT; 645 return -EFAULT;
646 } 646 }
647 647
648 return 0; 648 return 0;
649 } 649 }
650 650
651 /** 651 /**
652 * gen_true_ecc - This function will generate true ECC value 652 * gen_true_ecc - This function will generate true ECC value
653 * @ecc_buf: buffer to store ecc code 653 * @ecc_buf: buffer to store ecc code
654 * 654 *
655 * This generated true ECC value can be used when correcting 655 * This generated true ECC value can be used when correcting
656 * data read from NAND flash memory core 656 * data read from NAND flash memory core
657 */ 657 */
658 static void gen_true_ecc(u8 *ecc_buf) 658 static void gen_true_ecc(u8 *ecc_buf)
659 { 659 {
660 u32 tmp = ecc_buf[0] | (ecc_buf[1] << 16) | 660 u32 tmp = ecc_buf[0] | (ecc_buf[1] << 16) |
661 ((ecc_buf[2] & 0xF0) << 20) | ((ecc_buf[2] & 0x0F) << 8); 661 ((ecc_buf[2] & 0xF0) << 20) | ((ecc_buf[2] & 0x0F) << 8);
662 662
663 ecc_buf[0] = ~(P64o(tmp) | P64e(tmp) | P32o(tmp) | P32e(tmp) | 663 ecc_buf[0] = ~(P64o(tmp) | P64e(tmp) | P32o(tmp) | P32e(tmp) |
664 P16o(tmp) | P16e(tmp) | P8o(tmp) | P8e(tmp)); 664 P16o(tmp) | P16e(tmp) | P8o(tmp) | P8e(tmp));
665 ecc_buf[1] = ~(P1024o(tmp) | P1024e(tmp) | P512o(tmp) | P512e(tmp) | 665 ecc_buf[1] = ~(P1024o(tmp) | P1024e(tmp) | P512o(tmp) | P512e(tmp) |
666 P256o(tmp) | P256e(tmp) | P128o(tmp) | P128e(tmp)); 666 P256o(tmp) | P256e(tmp) | P128o(tmp) | P128e(tmp));
667 ecc_buf[2] = ~(P4o(tmp) | P4e(tmp) | P2o(tmp) | P2e(tmp) | P1o(tmp) | 667 ecc_buf[2] = ~(P4o(tmp) | P4e(tmp) | P2o(tmp) | P2e(tmp) | P1o(tmp) |
668 P1e(tmp) | P2048o(tmp) | P2048e(tmp)); 668 P1e(tmp) | P2048o(tmp) | P2048e(tmp));
669 } 669 }
670 670
671 /** 671 /**
672 * omap_compare_ecc - Detect (2 bits) and correct (1 bit) error in data 672 * omap_compare_ecc - Detect (2 bits) and correct (1 bit) error in data
673 * @ecc_data1: ecc code from nand spare area 673 * @ecc_data1: ecc code from nand spare area
674 * @ecc_data2: ecc code from hardware register obtained from hardware ecc 674 * @ecc_data2: ecc code from hardware register obtained from hardware ecc
675 * @page_data: page data 675 * @page_data: page data
676 * 676 *
677 * This function compares two ECC's and indicates if there is an error. 677 * This function compares two ECC's and indicates if there is an error.
678 * If the error can be corrected it will be corrected to the buffer. 678 * If the error can be corrected it will be corrected to the buffer.
679 * If there is no error, %0 is returned. If there is an error but it 679 * If there is no error, %0 is returned. If there is an error but it
680 * was corrected, %1 is returned. Otherwise, %-1 is returned. 680 * was corrected, %1 is returned. Otherwise, %-1 is returned.
681 */ 681 */
682 static int omap_compare_ecc(u8 *ecc_data1, /* read from NAND memory */ 682 static int omap_compare_ecc(u8 *ecc_data1, /* read from NAND memory */
683 u8 *ecc_data2, /* read from register */ 683 u8 *ecc_data2, /* read from register */
684 u8 *page_data) 684 u8 *page_data)
685 { 685 {
686 uint i; 686 uint i;
687 u8 tmp0_bit[8], tmp1_bit[8], tmp2_bit[8]; 687 u8 tmp0_bit[8], tmp1_bit[8], tmp2_bit[8];
688 u8 comp0_bit[8], comp1_bit[8], comp2_bit[8]; 688 u8 comp0_bit[8], comp1_bit[8], comp2_bit[8];
689 u8 ecc_bit[24]; 689 u8 ecc_bit[24];
690 u8 ecc_sum = 0; 690 u8 ecc_sum = 0;
691 u8 find_bit = 0; 691 u8 find_bit = 0;
692 uint find_byte = 0; 692 uint find_byte = 0;
693 int isEccFF; 693 int isEccFF;
694 694
695 isEccFF = ((*(u32 *)ecc_data1 & 0xFFFFFF) == 0xFFFFFF); 695 isEccFF = ((*(u32 *)ecc_data1 & 0xFFFFFF) == 0xFFFFFF);
696 696
697 gen_true_ecc(ecc_data1); 697 gen_true_ecc(ecc_data1);
698 gen_true_ecc(ecc_data2); 698 gen_true_ecc(ecc_data2);
699 699
700 for (i = 0; i <= 2; i++) { 700 for (i = 0; i <= 2; i++) {
701 *(ecc_data1 + i) = ~(*(ecc_data1 + i)); 701 *(ecc_data1 + i) = ~(*(ecc_data1 + i));
702 *(ecc_data2 + i) = ~(*(ecc_data2 + i)); 702 *(ecc_data2 + i) = ~(*(ecc_data2 + i));
703 } 703 }
704 704
705 for (i = 0; i < 8; i++) { 705 for (i = 0; i < 8; i++) {
706 tmp0_bit[i] = *ecc_data1 % 2; 706 tmp0_bit[i] = *ecc_data1 % 2;
707 *ecc_data1 = *ecc_data1 / 2; 707 *ecc_data1 = *ecc_data1 / 2;
708 } 708 }
709 709
710 for (i = 0; i < 8; i++) { 710 for (i = 0; i < 8; i++) {
711 tmp1_bit[i] = *(ecc_data1 + 1) % 2; 711 tmp1_bit[i] = *(ecc_data1 + 1) % 2;
712 *(ecc_data1 + 1) = *(ecc_data1 + 1) / 2; 712 *(ecc_data1 + 1) = *(ecc_data1 + 1) / 2;
713 } 713 }
714 714
715 for (i = 0; i < 8; i++) { 715 for (i = 0; i < 8; i++) {
716 tmp2_bit[i] = *(ecc_data1 + 2) % 2; 716 tmp2_bit[i] = *(ecc_data1 + 2) % 2;
717 *(ecc_data1 + 2) = *(ecc_data1 + 2) / 2; 717 *(ecc_data1 + 2) = *(ecc_data1 + 2) / 2;
718 } 718 }
719 719
720 for (i = 0; i < 8; i++) { 720 for (i = 0; i < 8; i++) {
721 comp0_bit[i] = *ecc_data2 % 2; 721 comp0_bit[i] = *ecc_data2 % 2;
722 *ecc_data2 = *ecc_data2 / 2; 722 *ecc_data2 = *ecc_data2 / 2;
723 } 723 }
724 724
725 for (i = 0; i < 8; i++) { 725 for (i = 0; i < 8; i++) {
726 comp1_bit[i] = *(ecc_data2 + 1) % 2; 726 comp1_bit[i] = *(ecc_data2 + 1) % 2;
727 *(ecc_data2 + 1) = *(ecc_data2 + 1) / 2; 727 *(ecc_data2 + 1) = *(ecc_data2 + 1) / 2;
728 } 728 }
729 729
730 for (i = 0; i < 8; i++) { 730 for (i = 0; i < 8; i++) {
731 comp2_bit[i] = *(ecc_data2 + 2) % 2; 731 comp2_bit[i] = *(ecc_data2 + 2) % 2;
732 *(ecc_data2 + 2) = *(ecc_data2 + 2) / 2; 732 *(ecc_data2 + 2) = *(ecc_data2 + 2) / 2;
733 } 733 }
734 734
735 for (i = 0; i < 6; i++) 735 for (i = 0; i < 6; i++)
736 ecc_bit[i] = tmp2_bit[i + 2] ^ comp2_bit[i + 2]; 736 ecc_bit[i] = tmp2_bit[i + 2] ^ comp2_bit[i + 2];
737 737
738 for (i = 0; i < 8; i++) 738 for (i = 0; i < 8; i++)
739 ecc_bit[i + 6] = tmp0_bit[i] ^ comp0_bit[i]; 739 ecc_bit[i + 6] = tmp0_bit[i] ^ comp0_bit[i];
740 740
741 for (i = 0; i < 8; i++) 741 for (i = 0; i < 8; i++)
742 ecc_bit[i + 14] = tmp1_bit[i] ^ comp1_bit[i]; 742 ecc_bit[i + 14] = tmp1_bit[i] ^ comp1_bit[i];
743 743
744 ecc_bit[22] = tmp2_bit[0] ^ comp2_bit[0]; 744 ecc_bit[22] = tmp2_bit[0] ^ comp2_bit[0];
745 ecc_bit[23] = tmp2_bit[1] ^ comp2_bit[1]; 745 ecc_bit[23] = tmp2_bit[1] ^ comp2_bit[1];
746 746
747 for (i = 0; i < 24; i++) 747 for (i = 0; i < 24; i++)
748 ecc_sum += ecc_bit[i]; 748 ecc_sum += ecc_bit[i];
749 749
750 switch (ecc_sum) { 750 switch (ecc_sum) {
751 case 0: 751 case 0:
752 /* Not reached because this function is not called if 752 /* Not reached because this function is not called if
753 * ECC values are equal 753 * ECC values are equal
754 */ 754 */
755 return 0; 755 return 0;
756 756
757 case 1: 757 case 1:
758 /* Uncorrectable error */ 758 /* Uncorrectable error */
759 DEBUG(MTD_DEBUG_LEVEL0, "ECC UNCORRECTED_ERROR 1\n"); 759 DEBUG(MTD_DEBUG_LEVEL0, "ECC UNCORRECTED_ERROR 1\n");
760 return -1; 760 return -1;
761 761
762 case 11: 762 case 11:
763 /* UN-Correctable error */ 763 /* UN-Correctable error */
764 DEBUG(MTD_DEBUG_LEVEL0, "ECC UNCORRECTED_ERROR B\n"); 764 DEBUG(MTD_DEBUG_LEVEL0, "ECC UNCORRECTED_ERROR B\n");
765 return -1; 765 return -1;
766 766
767 case 12: 767 case 12:
768 /* Correctable error */ 768 /* Correctable error */
769 find_byte = (ecc_bit[23] << 8) + 769 find_byte = (ecc_bit[23] << 8) +
770 (ecc_bit[21] << 7) + 770 (ecc_bit[21] << 7) +
771 (ecc_bit[19] << 6) + 771 (ecc_bit[19] << 6) +
772 (ecc_bit[17] << 5) + 772 (ecc_bit[17] << 5) +
773 (ecc_bit[15] << 4) + 773 (ecc_bit[15] << 4) +
774 (ecc_bit[13] << 3) + 774 (ecc_bit[13] << 3) +
775 (ecc_bit[11] << 2) + 775 (ecc_bit[11] << 2) +
776 (ecc_bit[9] << 1) + 776 (ecc_bit[9] << 1) +
777 ecc_bit[7]; 777 ecc_bit[7];
778 778
779 find_bit = (ecc_bit[5] << 2) + (ecc_bit[3] << 1) + ecc_bit[1]; 779 find_bit = (ecc_bit[5] << 2) + (ecc_bit[3] << 1) + ecc_bit[1];
780 780
781 DEBUG(MTD_DEBUG_LEVEL0, "Correcting single bit ECC error at " 781 DEBUG(MTD_DEBUG_LEVEL0, "Correcting single bit ECC error at "
782 "offset: %d, bit: %d\n", find_byte, find_bit); 782 "offset: %d, bit: %d\n", find_byte, find_bit);
783 783
784 page_data[find_byte] ^= (1 << find_bit); 784 page_data[find_byte] ^= (1 << find_bit);
785 785
786 return 1; 786 return 1;
787 default: 787 default:
788 if (isEccFF) { 788 if (isEccFF) {
789 if (ecc_data2[0] == 0 && 789 if (ecc_data2[0] == 0 &&
790 ecc_data2[1] == 0 && 790 ecc_data2[1] == 0 &&
791 ecc_data2[2] == 0) 791 ecc_data2[2] == 0)
792 return 0; 792 return 0;
793 } 793 }
794 DEBUG(MTD_DEBUG_LEVEL0, "UNCORRECTED_ERROR default\n"); 794 DEBUG(MTD_DEBUG_LEVEL0, "UNCORRECTED_ERROR default\n");
795 return -1; 795 return -1;
796 } 796 }
797 } 797 }
798 798
799 /** 799 /**
800 * omap_read_page_bch - BCH ecc based page read function 800 * omap_read_page_bch - BCH ecc based page read function
801 * @mtd: mtd info structure 801 * @mtd: mtd info structure
802 * @chip: nand chip info structure 802 * @chip: nand chip info structure
803 * @buf: buffer to store read data 803 * @buf: buffer to store read data
804 * @page: page number to read 804 * @page: page number to read
805 * 805 *
806 * For BCH syndrome calculation and error correction using ELM module. 806 * For BCH syndrome calculation and error correction using ELM module.
807 * Syndrome calculation is surpressed for reading of non page aligned length 807 * Syndrome calculation is surpressed for reading of non page aligned length
808 */ 808 */
809 static int omap_read_page_bch(struct mtd_info *mtd, struct nand_chip *chip, 809 static int omap_read_page_bch(struct mtd_info *mtd, struct nand_chip *chip,
810 uint8_t *buf, int page) 810 uint8_t *buf, int page)
811 { 811 {
812 int i, eccsize = chip->ecc.size; 812 int i, eccsize = chip->ecc.size;
813 int eccbytes = chip->ecc.bytes; 813 int eccbytes = chip->ecc.bytes;
814 int eccsteps = chip->ecc.steps; 814 int eccsteps = chip->ecc.steps;
815 uint8_t *p = buf; 815 uint8_t *p = buf;
816 uint8_t *ecc_calc = chip->buffers->ecccalc; 816 uint8_t *ecc_calc = chip->buffers->ecccalc;
817 uint8_t *ecc_code = chip->buffers->ecccode; 817 uint8_t *ecc_code = chip->buffers->ecccode;
818 uint32_t *eccpos = chip->ecc.layout->eccpos; 818 uint32_t *eccpos = chip->ecc.layout->eccpos;
819 uint8_t *oob = chip->oob_poi; 819 uint8_t *oob = &chip->oob_poi[eccpos[0]];
820 uint32_t data_pos; 820 uint32_t data_pos;
821 uint32_t oob_pos; 821 uint32_t oob_pos;
822 822
823 struct omap_nand_info *info = container_of(mtd, struct omap_nand_info, 823 struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
824 mtd); 824 mtd);
825 data_pos = 0; 825 data_pos = 0;
826 /* oob area start */ 826 /* oob area start */
827 oob_pos = (eccsize * eccsteps) + chip->ecc.layout->eccpos[0]; 827 oob_pos = (eccsize * eccsteps) + chip->ecc.layout->eccpos[0];
828 828
829 for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize, 829 for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize,
830 oob += eccbytes) { 830 oob += eccbytes) {
831 chip->ecc.hwctl(mtd, NAND_ECC_READ); 831 chip->ecc.hwctl(mtd, NAND_ECC_READ);
832 /* read data */ 832 /* read data */
833 chip->cmdfunc(mtd, NAND_CMD_RNDOUT, data_pos, page); 833 chip->cmdfunc(mtd, NAND_CMD_RNDOUT, data_pos, page);
834 chip->read_buf(mtd, p, eccsize); 834 chip->read_buf(mtd, p, eccsize);
835 835
836 /* read respective ecc from oob area */ 836 /* read respective ecc from oob area */
837 chip->cmdfunc(mtd, NAND_CMD_RNDOUT, oob_pos, page); 837 chip->cmdfunc(mtd, NAND_CMD_RNDOUT, oob_pos, page);
838 838
839 if (info->ecc_opt == OMAP_ECC_BCH8_CODE_HW) { 839 if (info->ecc_opt == OMAP_ECC_BCH8_CODE_HW)
840 chip->read_buf(mtd, oob, 13); 840 chip->read_buf(mtd, oob, 13);
841 oob++; 841 else
842 } else
843 chip->read_buf(mtd, oob, eccbytes); 842 chip->read_buf(mtd, oob, eccbytes);
844 /* read syndrome */ 843 /* read syndrome */
845 chip->ecc.calculate(mtd, p, &ecc_calc[i]); 844 chip->ecc.calculate(mtd, p, &ecc_calc[i]);
846 845
847 data_pos += eccsize; 846 data_pos += eccsize;
848 oob_pos += eccbytes; 847 oob_pos += eccbytes;
849 } 848 }
850 849
851 for (i = 0; i < chip->ecc.total; i++) 850 for (i = 0; i < chip->ecc.total; i++)
852 ecc_code[i] = chip->oob_poi[eccpos[i]]; 851 ecc_code[i] = chip->oob_poi[eccpos[i]];
853 852
854 eccsteps = chip->ecc.steps; 853 eccsteps = chip->ecc.steps;
855 p = buf; 854 p = buf;
856 855
857 for (i = 0 ; eccsteps; eccsteps--, i += eccbytes, p += eccsize) { 856 for (i = 0 ; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
858 int stat; 857 int stat;
859 858
860 stat = chip->ecc.correct(mtd, p, &ecc_code[i], &ecc_calc[i]); 859 stat = chip->ecc.correct(mtd, p, &ecc_code[i], &ecc_calc[i]);
861 860
862 if (stat < 0) 861 if (stat < 0)
863 mtd->ecc_stats.failed++; 862 mtd->ecc_stats.failed++;
864 else 863 else
865 mtd->ecc_stats.corrected += stat; 864 mtd->ecc_stats.corrected += stat;
866 } 865 }
867 return 0; 866 return 0;
868 } 867 }
869 868
870 /** 869 /**
871 * omap_correct_data - Compares the ECC read with HW generated ECC 870 * omap_correct_data - Compares the ECC read with HW generated ECC
872 * @mtd: MTD device structure 871 * @mtd: MTD device structure
873 * @dat: page data 872 * @dat: page data
874 * @read_ecc: ecc read from nand flash 873 * @read_ecc: ecc read from nand flash
875 * @calc_ecc: ecc read from HW ECC registers 874 * @calc_ecc: ecc read from HW ECC registers
876 * 875 *
877 * Compares the ecc read from nand spare area with ECC registers values 876 * Compares the ecc read from nand spare area with ECC registers values
878 * and if ECC's mismatched, it will call 'omap_compare_ecc' for error 877 * and if ECC's mismatched, it will call 'omap_compare_ecc' for error
879 * detection and correction. If there are no errors, %0 is returned. If 878 * detection and correction. If there are no errors, %0 is returned. If
880 * there were errors and all of the errors were corrected, the number of 879 * there were errors and all of the errors were corrected, the number of
881 * corrected errors is returned. If uncorrectable errors exist, %-1 is 880 * corrected errors is returned. If uncorrectable errors exist, %-1 is
882 * returned. 881 * returned.
883 */ 882 */
884 static int omap_correct_data(struct mtd_info *mtd, u_char *dat, 883 static int omap_correct_data(struct mtd_info *mtd, u_char *dat,
885 u_char *read_ecc, u_char *calc_ecc) 884 u_char *read_ecc, u_char *calc_ecc)
886 { 885 {
887 struct omap_nand_info *info = container_of(mtd, struct omap_nand_info, 886 struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
888 mtd); 887 mtd);
889 int blockCnt = 0, i = 0, ret = 0; 888 int blockCnt = 0, i = 0, ret = 0;
890 int stat = 0; 889 int stat = 0;
891 int j, eccsize, eccflag, count; 890 int j, eccsize, eccflag, count;
892 unsigned int err_loc[8]; 891 unsigned int err_loc[8];
893 892
894 /* Ex NAND_ECC_HW12_2048 */ 893 /* Ex NAND_ECC_HW12_2048 */
895 if ((info->nand.ecc.mode == NAND_ECC_HW) && 894 if ((info->nand.ecc.mode == NAND_ECC_HW) &&
896 (info->nand.ecc.size == 2048)) 895 (info->nand.ecc.size == 2048))
897 blockCnt = 4; 896 blockCnt = 4;
898 else 897 else
899 blockCnt = 1; 898 blockCnt = 1;
900 899
901 switch (info->ecc_opt) { 900 switch (info->ecc_opt) {
902 case OMAP_ECC_HAMMING_CODE_HW: 901 case OMAP_ECC_HAMMING_CODE_HW:
903 case OMAP_ECC_HAMMING_CODE_HW_ROMCODE: 902 case OMAP_ECC_HAMMING_CODE_HW_ROMCODE:
904 for (i = 0; i < blockCnt; i++) { 903 for (i = 0; i < blockCnt; i++) {
905 if (memcmp(read_ecc, calc_ecc, 3) != 0) { 904 if (memcmp(read_ecc, calc_ecc, 3) != 0) {
906 ret = omap_compare_ecc(read_ecc, calc_ecc, dat); 905 ret = omap_compare_ecc(read_ecc, calc_ecc, dat);
907 if (ret < 0) 906 if (ret < 0)
908 return ret; 907 return ret;
909 908
910 /* keep track of number of corrected errors */ 909 /* keep track of number of corrected errors */
911 stat += ret; 910 stat += ret;
912 } 911 }
913 read_ecc += 3; 912 read_ecc += 3;
914 calc_ecc += 3; 913 calc_ecc += 3;
915 dat += 512; 914 dat += 512;
916 } 915 }
917 break; 916 break;
918 917
919 case OMAP_ECC_BCH4_CODE_HW: 918 case OMAP_ECC_BCH4_CODE_HW:
920 eccsize = 7; 919 eccsize = 7;
921 gpmc_calculate_ecc(info->ecc_opt, info->gpmc_cs, dat, calc_ecc); 920 gpmc_calculate_ecc(info->ecc_opt, info->gpmc_cs, dat, calc_ecc);
922 for (i = 0; i < blockCnt; i++) { 921 for (i = 0; i < blockCnt; i++) {
923 /* check if any ecc error */ 922 /* check if any ecc error */
924 eccflag = 0; 923 eccflag = 0;
925 for (j = 0; (j < eccsize) && (eccflag == 0); j++) 924 for (j = 0; (j < eccsize) && (eccflag == 0); j++)
926 if (calc_ecc[j] != 0) 925 if (calc_ecc[j] != 0)
927 eccflag = 1; 926 eccflag = 1;
928 927
929 if (eccflag == 1) { 928 if (eccflag == 1) {
930 eccflag = 0; 929 eccflag = 0;
931 for (j = 0; (j < eccsize) && 930 for (j = 0; (j < eccsize) &&
932 (eccflag == 0); j++) 931 (eccflag == 0); j++)
933 if (read_ecc[j] != 0xFF) 932 if (read_ecc[j] != 0xFF)
934 eccflag = 1; 933 eccflag = 1;
935 } 934 }
936 935
937 count = 0; 936 count = 0;
938 if (eccflag == 1) 937 if (eccflag == 1)
939 count = decode_bch(0, calc_ecc, err_loc); 938 count = decode_bch(0, calc_ecc, err_loc);
940 939
941 for (j = 0; j < count; j++) { 940 for (j = 0; j < count; j++) {
942 if (err_loc[j] < 4096) 941 if (err_loc[j] < 4096)
943 dat[err_loc[j] >> 3] ^= 942 dat[err_loc[j] >> 3] ^=
944 1 << (err_loc[j] & 7); 943 1 << (err_loc[j] & 7);
945 /* else, not interested to correct ecc */ 944 /* else, not interested to correct ecc */
946 } 945 }
947 946
948 stat += count; 947 stat += count;
949 calc_ecc = calc_ecc + eccsize; 948 calc_ecc = calc_ecc + eccsize;
950 read_ecc = read_ecc + eccsize; 949 read_ecc = read_ecc + eccsize;
951 dat += 512; 950 dat += 512;
952 } 951 }
953 break; 952 break;
954 case OMAP_ECC_BCH8_CODE_HW: 953 case OMAP_ECC_BCH8_CODE_HW:
955 eccsize = BCH8_ECC_OOB_BYTES; 954 eccsize = BCH8_ECC_OOB_BYTES;
956 955
957 for (i = 0; i < blockCnt; i++) { 956 for (i = 0; i < blockCnt; i++) {
958 eccflag = 0; 957 eccflag = 0;
959 /* check if area is flashed */ 958 /* check if area is flashed */
960 for (j = 0; (j < eccsize) && (eccflag == 0); j++) 959 for (j = 0; (j < eccsize) && (eccflag == 0); j++)
961 if (read_ecc[j] != 0xFF) 960 if (read_ecc[j] != 0xFF)
962 eccflag = 1; 961 eccflag = 1;
963 962
964 if (eccflag == 1) { 963 if (eccflag == 1) {
965 eccflag = 0; 964 eccflag = 0;
966 /* check if any ecc error */ 965 /* check if any ecc error */
967 for (j = 0; (j < eccsize) && (eccflag == 0); 966 for (j = 0; (j < eccsize) && (eccflag == 0);
968 j++) 967 j++)
969 if (calc_ecc[j] != 0) 968 if (calc_ecc[j] != 0)
970 eccflag = 1; 969 eccflag = 1;
971 } 970 }
972 971
973 count = 0; 972 count = 0;
974 if (eccflag == 1) 973 if (eccflag == 1)
975 count = elm_decode_bch_error(0, calc_ecc, 974 count = elm_decode_bch_error(0, calc_ecc,
976 err_loc); 975 err_loc);
977 976
978 for (j = 0; j < count; j++) { 977 for (j = 0; j < count; j++) {
979 u32 bit_pos, byte_pos; 978 u32 bit_pos, byte_pos;
980 979
981 bit_pos = err_loc[j] % 8; 980 bit_pos = err_loc[j] % 8;
982 byte_pos = (BCH8_ECC_MAX - err_loc[j] - 1) / 8; 981 byte_pos = (BCH8_ECC_MAX - err_loc[j] - 1) / 8;
983 if (err_loc[j] < BCH8_ECC_MAX) 982 if (err_loc[j] < BCH8_ECC_MAX)
984 dat[byte_pos] ^= 983 dat[byte_pos] ^=
985 1 << bit_pos; 984 1 << bit_pos;
986 /* else, not interested to correct ecc */ 985 /* else, not interested to correct ecc */
987 } 986 }
988 987
989 stat += count; 988 stat += count;
990 calc_ecc = calc_ecc + 14; 989 calc_ecc = calc_ecc + 14;
991 read_ecc = read_ecc + 14; 990 read_ecc = read_ecc + 14;
992 dat += BCH8_ECC_BYTES; 991 dat += BCH8_ECC_BYTES;
993 } 992 }
994 break; 993 break;
995 } 994 }
996 return stat; 995 return stat;
997 } 996 }
998 997
999 /** 998 /**
1000 * omap_calcuate_ecc - Generate non-inverted ECC bytes. 999 * omap_calcuate_ecc - Generate non-inverted ECC bytes.
1001 * @mtd: MTD device structure 1000 * @mtd: MTD device structure
1002 * @dat: The pointer to data on which ecc is computed 1001 * @dat: The pointer to data on which ecc is computed
1003 * @ecc_code: The ecc_code buffer 1002 * @ecc_code: The ecc_code buffer
1004 * 1003 *
1005 * Using noninverted ECC can be considered ugly since writing a blank 1004 * Using noninverted ECC can be considered ugly since writing a blank
1006 * page ie. padding will clear the ECC bytes. This is no problem as long 1005 * page ie. padding will clear the ECC bytes. This is no problem as long
1007 * nobody is trying to write data on the seemingly unused page. Reading 1006 * nobody is trying to write data on the seemingly unused page. Reading
1008 * an erased page will produce an ECC mismatch between generated and read 1007 * an erased page will produce an ECC mismatch between generated and read
1009 * ECC bytes that has to be dealt with separately. 1008 * ECC bytes that has to be dealt with separately.
1010 */ 1009 */
1011 static int omap_calculate_ecc(struct mtd_info *mtd, const u_char *dat, 1010 static int omap_calculate_ecc(struct mtd_info *mtd, const u_char *dat,
1012 u_char *ecc_code) 1011 u_char *ecc_code)
1013 { 1012 {
1014 struct omap_nand_info *info = container_of(mtd, struct omap_nand_info, 1013 struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
1015 mtd); 1014 mtd);
1016 return gpmc_calculate_ecc(info->ecc_opt, info->gpmc_cs, dat, ecc_code); 1015 return gpmc_calculate_ecc(info->ecc_opt, info->gpmc_cs, dat, ecc_code);
1017 } 1016 }
1018 1017
1019 /** 1018 /**
1020 * omap_enable_hwecc - This function enables the hardware ecc functionality 1019 * omap_enable_hwecc - This function enables the hardware ecc functionality
1021 * @mtd: MTD device structure 1020 * @mtd: MTD device structure
1022 * @mode: Read/Write mode 1021 * @mode: Read/Write mode
1023 */ 1022 */
1024 static void omap_enable_hwecc(struct mtd_info *mtd, int mode) 1023 static void omap_enable_hwecc(struct mtd_info *mtd, int mode)
1025 { 1024 {
1026 struct omap_nand_info *info = container_of(mtd, struct omap_nand_info, 1025 struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
1027 mtd); 1026 mtd);
1028 struct nand_chip *chip = mtd->priv; 1027 struct nand_chip *chip = mtd->priv;
1029 unsigned int dev_width = (chip->options & NAND_BUSWIDTH_16) ? 1 : 0; 1028 unsigned int dev_width = (chip->options & NAND_BUSWIDTH_16) ? 1 : 0;
1030 1029
1031 gpmc_enable_hwecc(info->ecc_opt, info->gpmc_cs, mode, 1030 gpmc_enable_hwecc(info->ecc_opt, info->gpmc_cs, mode,
1032 dev_width, info->nand.ecc.size); 1031 dev_width, info->nand.ecc.size);
1033 } 1032 }
1034 1033
1035 /** 1034 /**
1036 * omap_wait - wait until the command is done 1035 * omap_wait - wait until the command is done
1037 * @mtd: MTD device structure 1036 * @mtd: MTD device structure
1038 * @chip: NAND Chip structure 1037 * @chip: NAND Chip structure
1039 * 1038 *
1040 * Wait function is called during Program and erase operations and 1039 * Wait function is called during Program and erase operations and
1041 * the way it is called from MTD layer, we should wait till the NAND 1040 * the way it is called from MTD layer, we should wait till the NAND
1042 * chip is ready after the programming/erase operation has completed. 1041 * chip is ready after the programming/erase operation has completed.
1043 * 1042 *
1044 * Erase can take up to 400ms and program up to 20ms according to 1043 * Erase can take up to 400ms and program up to 20ms according to
1045 * general NAND and SmartMedia specs 1044 * general NAND and SmartMedia specs
1046 */ 1045 */
1047 static int omap_wait(struct mtd_info *mtd, struct nand_chip *chip) 1046 static int omap_wait(struct mtd_info *mtd, struct nand_chip *chip)
1048 { 1047 {
1049 struct nand_chip *this = mtd->priv; 1048 struct nand_chip *this = mtd->priv;
1050 struct omap_nand_info *info = container_of(mtd, struct omap_nand_info, 1049 struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
1051 mtd); 1050 mtd);
1052 unsigned long timeo = jiffies; 1051 unsigned long timeo = jiffies;
1053 int status = NAND_STATUS_FAIL, state = this->state; 1052 int status = NAND_STATUS_FAIL, state = this->state;
1054 1053
1055 if (state == FL_ERASING) 1054 if (state == FL_ERASING)
1056 timeo += (HZ * 400) / 1000; 1055 timeo += (HZ * 400) / 1000;
1057 else 1056 else
1058 timeo += (HZ * 20) / 1000; 1057 timeo += (HZ * 20) / 1000;
1059 1058
1060 gpmc_nand_write(info->gpmc_cs, 1059 gpmc_nand_write(info->gpmc_cs,
1061 GPMC_NAND_COMMAND, (NAND_CMD_STATUS & 0xFF)); 1060 GPMC_NAND_COMMAND, (NAND_CMD_STATUS & 0xFF));
1062 while (time_before(jiffies, timeo)) { 1061 while (time_before(jiffies, timeo)) {
1063 status = gpmc_nand_read(info->gpmc_cs, GPMC_NAND_DATA); 1062 status = gpmc_nand_read(info->gpmc_cs, GPMC_NAND_DATA);
1064 if (status & NAND_STATUS_READY) 1063 if (status & NAND_STATUS_READY)
1065 break; 1064 break;
1066 cond_resched(); 1065 cond_resched();
1067 } 1066 }
1068 return status; 1067 return status;
1069 } 1068 }
1070 1069
1071 /** 1070 /**
1072 * omap_dev_ready - calls the platform specific dev_ready function 1071 * omap_dev_ready - calls the platform specific dev_ready function
1073 * @mtd: MTD device structure 1072 * @mtd: MTD device structure
1074 */ 1073 */
1075 static int omap_dev_ready(struct mtd_info *mtd) 1074 static int omap_dev_ready(struct mtd_info *mtd)
1076 { 1075 {
1077 unsigned int val = 0; 1076 unsigned int val = 0;
1078 struct omap_nand_info *info = container_of(mtd, struct omap_nand_info, 1077 struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
1079 mtd); 1078 mtd);
1080 1079
1081 val = gpmc_read_status(GPMC_GET_IRQ_STATUS); 1080 val = gpmc_read_status(GPMC_GET_IRQ_STATUS);
1082 if ((val & 0x100) == 0x100) { 1081 if ((val & 0x100) == 0x100) {
1083 /* Clear IRQ Interrupt */ 1082 /* Clear IRQ Interrupt */
1084 val |= 0x100; 1083 val |= 0x100;
1085 val &= ~(0x0); 1084 val &= ~(0x0);
1086 gpmc_cs_configure(info->gpmc_cs, GPMC_SET_IRQ_STATUS, val); 1085 gpmc_cs_configure(info->gpmc_cs, GPMC_SET_IRQ_STATUS, val);
1087 } else { 1086 } else {
1088 unsigned int cnt = 0; 1087 unsigned int cnt = 0;
1089 while (cnt++ < 0x1FF) { 1088 while (cnt++ < 0x1FF) {
1090 if ((val & 0x100) == 0x100) 1089 if ((val & 0x100) == 0x100)
1091 return 0; 1090 return 0;
1092 val = gpmc_read_status(GPMC_GET_IRQ_STATUS); 1091 val = gpmc_read_status(GPMC_GET_IRQ_STATUS);
1093 } 1092 }
1094 } 1093 }
1095 1094
1096 return 1; 1095 return 1;
1097 } 1096 }
1098 1097
1099 static int __devinit omap_nand_probe(struct platform_device *pdev) 1098 static int __devinit omap_nand_probe(struct platform_device *pdev)
1100 { 1099 {
1101 struct omap_nand_info *info; 1100 struct omap_nand_info *info;
1102 struct omap_nand_platform_data *pdata; 1101 struct omap_nand_platform_data *pdata;
1103 int err; 1102 int err;
1104 int i, offset; 1103 int i, offset;
1105 1104
1106 pdata = pdev->dev.platform_data; 1105 pdata = pdev->dev.platform_data;
1107 if (pdata == NULL) { 1106 if (pdata == NULL) {
1108 dev_err(&pdev->dev, "platform data missing\n"); 1107 dev_err(&pdev->dev, "platform data missing\n");
1109 return -ENODEV; 1108 return -ENODEV;
1110 } 1109 }
1111 1110
1112 info = kzalloc(sizeof(struct omap_nand_info), GFP_KERNEL); 1111 info = kzalloc(sizeof(struct omap_nand_info), GFP_KERNEL);
1113 if (!info) 1112 if (!info)
1114 return -ENOMEM; 1113 return -ENOMEM;
1115 1114
1116 platform_set_drvdata(pdev, info); 1115 platform_set_drvdata(pdev, info);
1117 1116
1118 spin_lock_init(&info->controller.lock); 1117 spin_lock_init(&info->controller.lock);
1119 init_waitqueue_head(&info->controller.wq); 1118 init_waitqueue_head(&info->controller.wq);
1120 1119
1121 info->pdev = pdev; 1120 info->pdev = pdev;
1122 1121
1123 info->gpmc_cs = pdata->cs; 1122 info->gpmc_cs = pdata->cs;
1124 info->phys_base = pdata->phys_base; 1123 info->phys_base = pdata->phys_base;
1125 1124
1126 info->mtd.priv = &info->nand; 1125 info->mtd.priv = &info->nand;
1127 info->mtd.name = dev_name(&pdev->dev); 1126 info->mtd.name = dev_name(&pdev->dev);
1128 info->mtd.owner = THIS_MODULE; 1127 info->mtd.owner = THIS_MODULE;
1129 info->ecc_opt = pdata->ecc_opt; 1128 info->ecc_opt = pdata->ecc_opt;
1130 1129
1131 info->nand.options = pdata->devsize; 1130 info->nand.options = pdata->devsize;
1132 info->nand.options |= NAND_SKIP_BBTSCAN; 1131 info->nand.options |= NAND_SKIP_BBTSCAN;
1133 1132
1134 /* NAND write protect off */ 1133 /* NAND write protect off */
1135 gpmc_cs_configure(info->gpmc_cs, GPMC_CONFIG_WP, 0); 1134 gpmc_cs_configure(info->gpmc_cs, GPMC_CONFIG_WP, 0);
1136 1135
1137 if (!request_mem_region(info->phys_base, NAND_IO_SIZE, 1136 if (!request_mem_region(info->phys_base, NAND_IO_SIZE,
1138 pdev->dev.driver->name)) { 1137 pdev->dev.driver->name)) {
1139 err = -EBUSY; 1138 err = -EBUSY;
1140 goto out_free_info; 1139 goto out_free_info;
1141 } 1140 }
1142 1141
1143 info->nand.IO_ADDR_R = ioremap(info->phys_base, NAND_IO_SIZE); 1142 info->nand.IO_ADDR_R = ioremap(info->phys_base, NAND_IO_SIZE);
1144 if (!info->nand.IO_ADDR_R) { 1143 if (!info->nand.IO_ADDR_R) {
1145 err = -ENOMEM; 1144 err = -ENOMEM;
1146 goto out_release_mem_region; 1145 goto out_release_mem_region;
1147 } 1146 }
1148 1147
1149 info->nand.controller = &info->controller; 1148 info->nand.controller = &info->controller;
1150 1149
1151 info->nand.IO_ADDR_W = info->nand.IO_ADDR_R; 1150 info->nand.IO_ADDR_W = info->nand.IO_ADDR_R;
1152 info->nand.cmd_ctrl = omap_hwcontrol; 1151 info->nand.cmd_ctrl = omap_hwcontrol;
1153 1152
1154 /* 1153 /*
1155 * If RDY/BSY line is connected to OMAP then use the omap ready 1154 * If RDY/BSY line is connected to OMAP then use the omap ready
1156 * funcrtion and the generic nand_wait function which reads the status 1155 * funcrtion and the generic nand_wait function which reads the status
1157 * register after monitoring the RDY/BSY line.Otherwise use a standard 1156 * register after monitoring the RDY/BSY line.Otherwise use a standard
1158 * chip delay which is slightly more than tR (AC Timing) of the NAND 1157 * chip delay which is slightly more than tR (AC Timing) of the NAND
1159 * device and read status register until you get a failure or success 1158 * device and read status register until you get a failure or success
1160 */ 1159 */
1161 if (pdata->dev_ready) { 1160 if (pdata->dev_ready) {
1162 info->nand.dev_ready = omap_dev_ready; 1161 info->nand.dev_ready = omap_dev_ready;
1163 info->nand.chip_delay = 0; 1162 info->nand.chip_delay = 0;
1164 } else { 1163 } else {
1165 info->nand.waitfunc = omap_wait; 1164 info->nand.waitfunc = omap_wait;
1166 info->nand.chip_delay = 50; 1165 info->nand.chip_delay = 50;
1167 } 1166 }
1168 switch (pdata->xfer_type) { 1167 switch (pdata->xfer_type) {
1169 case NAND_OMAP_PREFETCH_POLLED: 1168 case NAND_OMAP_PREFETCH_POLLED:
1170 info->nand.read_buf = omap_read_buf_pref; 1169 info->nand.read_buf = omap_read_buf_pref;
1171 info->nand.write_buf = omap_write_buf_pref; 1170 info->nand.write_buf = omap_write_buf_pref;
1172 break; 1171 break;
1173 1172
1174 case NAND_OMAP_POLLED: 1173 case NAND_OMAP_POLLED:
1175 if (info->nand.options & NAND_BUSWIDTH_16) { 1174 if (info->nand.options & NAND_BUSWIDTH_16) {
1176 info->nand.read_buf = omap_read_buf16; 1175 info->nand.read_buf = omap_read_buf16;
1177 info->nand.write_buf = omap_write_buf16; 1176 info->nand.write_buf = omap_write_buf16;
1178 } else { 1177 } else {
1179 info->nand.read_buf = omap_read_buf8; 1178 info->nand.read_buf = omap_read_buf8;
1180 info->nand.write_buf = omap_write_buf8; 1179 info->nand.write_buf = omap_write_buf8;
1181 } 1180 }
1182 break; 1181 break;
1183 1182
1184 case NAND_OMAP_PREFETCH_DMA: 1183 case NAND_OMAP_PREFETCH_DMA:
1185 err = omap_request_dma(OMAP24XX_DMA_GPMC, "NAND", 1184 err = omap_request_dma(OMAP24XX_DMA_GPMC, "NAND",
1186 omap_nand_dma_cb, &info->comp, &info->dma_ch); 1185 omap_nand_dma_cb, &info->comp, &info->dma_ch);
1187 if (err < 0) { 1186 if (err < 0) {
1188 info->dma_ch = -1; 1187 info->dma_ch = -1;
1189 dev_err(&pdev->dev, "DMA request failed!\n"); 1188 dev_err(&pdev->dev, "DMA request failed!\n");
1190 goto out_release_mem_region; 1189 goto out_release_mem_region;
1191 } else { 1190 } else {
1192 omap_set_dma_dest_burst_mode(info->dma_ch, 1191 omap_set_dma_dest_burst_mode(info->dma_ch,
1193 OMAP_DMA_DATA_BURST_16); 1192 OMAP_DMA_DATA_BURST_16);
1194 omap_set_dma_src_burst_mode(info->dma_ch, 1193 omap_set_dma_src_burst_mode(info->dma_ch,
1195 OMAP_DMA_DATA_BURST_16); 1194 OMAP_DMA_DATA_BURST_16);
1196 1195
1197 info->nand.read_buf = omap_read_buf_dma_pref; 1196 info->nand.read_buf = omap_read_buf_dma_pref;
1198 info->nand.write_buf = omap_write_buf_dma_pref; 1197 info->nand.write_buf = omap_write_buf_dma_pref;
1199 } 1198 }
1200 break; 1199 break;
1201 1200
1202 case NAND_OMAP_PREFETCH_IRQ: 1201 case NAND_OMAP_PREFETCH_IRQ:
1203 err = request_irq(pdata->gpmc_irq, 1202 err = request_irq(pdata->gpmc_irq,
1204 omap_nand_irq, IRQF_SHARED, "gpmc-nand", info); 1203 omap_nand_irq, IRQF_SHARED, "gpmc-nand", info);
1205 if (err) { 1204 if (err) {
1206 dev_err(&pdev->dev, "requesting irq(%d) error:%d", 1205 dev_err(&pdev->dev, "requesting irq(%d) error:%d",
1207 pdata->gpmc_irq, err); 1206 pdata->gpmc_irq, err);
1208 goto out_release_mem_region; 1207 goto out_release_mem_region;
1209 } else { 1208 } else {
1210 info->gpmc_irq = pdata->gpmc_irq; 1209 info->gpmc_irq = pdata->gpmc_irq;
1211 info->nand.read_buf = omap_read_buf_irq_pref; 1210 info->nand.read_buf = omap_read_buf_irq_pref;
1212 info->nand.write_buf = omap_write_buf_irq_pref; 1211 info->nand.write_buf = omap_write_buf_irq_pref;
1213 } 1212 }
1214 break; 1213 break;
1215 1214
1216 default: 1215 default:
1217 dev_err(&pdev->dev, 1216 dev_err(&pdev->dev,
1218 "xfer_type(%d) not supported!\n", pdata->xfer_type); 1217 "xfer_type(%d) not supported!\n", pdata->xfer_type);
1219 err = -EINVAL; 1218 err = -EINVAL;
1220 goto out_release_mem_region; 1219 goto out_release_mem_region;
1221 } 1220 }
1222 1221
1223 info->nand.verify_buf = omap_verify_buf; 1222 info->nand.verify_buf = omap_verify_buf;
1224 1223
1225 /* selsect the ecc type */ 1224 /* selsect the ecc type */
1226 if (pdata->ecc_opt == OMAP_ECC_HAMMING_CODE_DEFAULT) 1225 if (pdata->ecc_opt == OMAP_ECC_HAMMING_CODE_DEFAULT)
1227 info->nand.ecc.mode = NAND_ECC_SOFT; 1226 info->nand.ecc.mode = NAND_ECC_SOFT;
1228 else { 1227 else {
1229 if (pdata->ecc_opt == OMAP_ECC_BCH4_CODE_HW) { 1228 if (pdata->ecc_opt == OMAP_ECC_BCH4_CODE_HW) {
1230 info->nand.ecc.bytes = 4*7; 1229 info->nand.ecc.bytes = 4*7;
1231 info->nand.ecc.size = 4*512; 1230 info->nand.ecc.size = 4*512;
1232 } else if (pdata->ecc_opt == OMAP_ECC_BCH8_CODE_HW) { 1231 } else if (pdata->ecc_opt == OMAP_ECC_BCH8_CODE_HW) {
1233 info->nand.ecc.bytes = 14; 1232 info->nand.ecc.bytes = 14;
1234 info->nand.ecc.size = 512; 1233 info->nand.ecc.size = 512;
1235 info->nand.ecc.read_page = omap_read_page_bch; 1234 info->nand.ecc.read_page = omap_read_page_bch;
1236 } else { 1235 } else {
1237 info->nand.ecc.bytes = 3; 1236 info->nand.ecc.bytes = 3;
1238 info->nand.ecc.size = 512; 1237 info->nand.ecc.size = 512;
1239 } 1238 }
1240 info->nand.ecc.calculate = omap_calculate_ecc; 1239 info->nand.ecc.calculate = omap_calculate_ecc;
1241 info->nand.ecc.hwctl = omap_enable_hwecc; 1240 info->nand.ecc.hwctl = omap_enable_hwecc;
1242 info->nand.ecc.correct = omap_correct_data; 1241 info->nand.ecc.correct = omap_correct_data;
1243 info->nand.ecc.mode = NAND_ECC_HW; 1242 info->nand.ecc.mode = NAND_ECC_HW;
1244 } 1243 }
1245 1244
1246 /* DIP switches on some boards change between 8 and 16 bit 1245 /* DIP switches on some boards change between 8 and 16 bit
1247 * bus widths for flash. Try the other width if the first try fails. 1246 * bus widths for flash. Try the other width if the first try fails.
1248 */ 1247 */
1249 if (nand_scan_ident(&info->mtd, 1, NULL)) { 1248 if (nand_scan_ident(&info->mtd, 1, NULL)) {
1250 info->nand.options ^= NAND_BUSWIDTH_16; 1249 info->nand.options ^= NAND_BUSWIDTH_16;
1251 if (nand_scan_ident(&info->mtd, 1, NULL)) { 1250 if (nand_scan_ident(&info->mtd, 1, NULL)) {
1252 err = -ENXIO; 1251 err = -ENXIO;
1253 goto out_release_mem_region; 1252 goto out_release_mem_region;
1254 } 1253 }
1255 } 1254 }
1256 1255
1257 /* select ecc lyout */ 1256 /* select ecc lyout */
1258 if (info->nand.ecc.mode != NAND_ECC_SOFT) { 1257 if (info->nand.ecc.mode != NAND_ECC_SOFT) {
1259 1258
1260 if (info->nand.options & NAND_BUSWIDTH_16) 1259 if (info->nand.options & NAND_BUSWIDTH_16)
1261 offset = JFFS2_CLEAN_MARKER_OFFSET; 1260 offset = JFFS2_CLEAN_MARKER_OFFSET;
1262 else { 1261 else {
1263 offset = JFFS2_CLEAN_MARKER_OFFSET; 1262 offset = JFFS2_CLEAN_MARKER_OFFSET;
1264 info->nand.badblock_pattern = &bb_descrip_flashbased; 1263 info->nand.badblock_pattern = &bb_descrip_flashbased;
1265 } 1264 }
1266 1265
1267 if (info->mtd.oobsize == 64) 1266 if (info->mtd.oobsize == 64)
1268 omap_oobinfo.eccbytes = info->nand.ecc.bytes * 1267 omap_oobinfo.eccbytes = info->nand.ecc.bytes *
1269 2048/info->nand.ecc.size; 1268 2048/info->nand.ecc.size;
1270 else 1269 else
1271 omap_oobinfo.eccbytes = info->nand.ecc.bytes; 1270 omap_oobinfo.eccbytes = info->nand.ecc.bytes;
1272 1271
1273 if (pdata->ecc_opt == OMAP_ECC_HAMMING_CODE_HW_ROMCODE) { 1272 if (pdata->ecc_opt == OMAP_ECC_HAMMING_CODE_HW_ROMCODE) {
1274 omap_oobinfo.oobfree->offset = 1273 omap_oobinfo.oobfree->offset =
1275 offset + omap_oobinfo.eccbytes; 1274 offset + omap_oobinfo.eccbytes;
1276 omap_oobinfo.oobfree->length = info->mtd.oobsize - 1275 omap_oobinfo.oobfree->length = info->mtd.oobsize -
1277 (offset + omap_oobinfo.eccbytes); 1276 (offset + omap_oobinfo.eccbytes);
1278 } else if (pdata->ecc_opt == OMAP_ECC_BCH8_CODE_HW) { 1277 } else if (pdata->ecc_opt == OMAP_ECC_BCH8_CODE_HW) {
1279 offset = BCH_ECC_POS; /* Synchronize with U-boot */ 1278 offset = BCH_ECC_POS; /* Synchronize with U-boot */
1280 omap_oobinfo.oobfree->offset = 1279 omap_oobinfo.oobfree->offset =
1281 BCH_JFFS2_CLEAN_MARKER_OFFSET; 1280 BCH_JFFS2_CLEAN_MARKER_OFFSET;
1282 omap_oobinfo.oobfree->length = info->mtd.oobsize - 1281 omap_oobinfo.oobfree->length = info->mtd.oobsize -
1283 offset - omap_oobinfo.eccbytes; 1282 offset - omap_oobinfo.eccbytes;
1284 } else { 1283 } else {
1285 omap_oobinfo.oobfree->offset = offset; 1284 omap_oobinfo.oobfree->offset = offset;
1286 omap_oobinfo.oobfree->length = info->mtd.oobsize - 1285 omap_oobinfo.oobfree->length = info->mtd.oobsize -
1287 offset - omap_oobinfo.eccbytes; 1286 offset - omap_oobinfo.eccbytes;
1288 /* 1287 /*
1289 offset is calculated considering the following : 1288 offset is calculated considering the following :
1290 1) 12 bytes ECC for 512 byte access and 24 bytes ECC for 1289 1) 12 bytes ECC for 512 byte access and 24 bytes ECC for
1291 256 byte access in OOB_64 can be supported 1290 256 byte access in OOB_64 can be supported
1292 2)Ecc bytes lie to the end of OOB area. 1291 2)Ecc bytes lie to the end of OOB area.
1293 3)Ecc layout must match with u-boot's ECC layout. 1292 3)Ecc layout must match with u-boot's ECC layout.
1294 */ 1293 */
1295 offset = info->mtd.oobsize - MAX_HWECC_BYTES_OOB_64; 1294 offset = info->mtd.oobsize - MAX_HWECC_BYTES_OOB_64;
1296 } 1295 }
1297 1296
1298 for (i = 0; i < omap_oobinfo.eccbytes; i++) 1297 for (i = 0; i < omap_oobinfo.eccbytes; i++)
1299 omap_oobinfo.eccpos[i] = i+offset; 1298 omap_oobinfo.eccpos[i] = i+offset;
1300 1299
1301 info->nand.ecc.layout = &omap_oobinfo; 1300 info->nand.ecc.layout = &omap_oobinfo;
1302 } 1301 }
1303 1302
1304 /* second phase scan */ 1303 /* second phase scan */
1305 if (nand_scan_tail(&info->mtd)) { 1304 if (nand_scan_tail(&info->mtd)) {
1306 err = -ENXIO; 1305 err = -ENXIO;
1307 goto out_release_mem_region; 1306 goto out_release_mem_region;
1308 } 1307 }
1309 1308
1310 err = parse_mtd_partitions(&info->mtd, part_probes, &info->parts, 0); 1309 err = parse_mtd_partitions(&info->mtd, part_probes, &info->parts, 0);
1311 if (err > 0) 1310 if (err > 0)
1312 mtd_device_register(&info->mtd, info->parts, err); 1311 mtd_device_register(&info->mtd, info->parts, err);
1313 else if (pdata->parts) 1312 else if (pdata->parts)
1314 mtd_device_register(&info->mtd, pdata->parts, pdata->nr_parts); 1313 mtd_device_register(&info->mtd, pdata->parts, pdata->nr_parts);
1315 else 1314 else
1316 mtd_device_register(&info->mtd, NULL, 0); 1315 mtd_device_register(&info->mtd, NULL, 0);
1317 1316
1318 platform_set_drvdata(pdev, &info->mtd); 1317 platform_set_drvdata(pdev, &info->mtd);
1319 1318
1320 return 0; 1319 return 0;
1321 1320
1322 out_release_mem_region: 1321 out_release_mem_region:
1323 release_mem_region(info->phys_base, NAND_IO_SIZE); 1322 release_mem_region(info->phys_base, NAND_IO_SIZE);
1324 out_free_info: 1323 out_free_info:
1325 kfree(info); 1324 kfree(info);
1326 1325
1327 return err; 1326 return err;
1328 } 1327 }
1329 1328
1330 static int omap_nand_remove(struct platform_device *pdev) 1329 static int omap_nand_remove(struct platform_device *pdev)
1331 { 1330 {
1332 struct mtd_info *mtd = platform_get_drvdata(pdev); 1331 struct mtd_info *mtd = platform_get_drvdata(pdev);
1333 struct omap_nand_info *info = container_of(mtd, struct omap_nand_info, 1332 struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
1334 mtd); 1333 mtd);
1335 1334
1336 platform_set_drvdata(pdev, NULL); 1335 platform_set_drvdata(pdev, NULL);
1337 if (info->dma_ch != -1) 1336 if (info->dma_ch != -1)
1338 omap_free_dma(info->dma_ch); 1337 omap_free_dma(info->dma_ch);
1339 1338
1340 if (info->gpmc_irq) 1339 if (info->gpmc_irq)
1341 free_irq(info->gpmc_irq, info); 1340 free_irq(info->gpmc_irq, info);
1342 1341
1343 /* Release NAND device, its internal structures and partitions */ 1342 /* Release NAND device, its internal structures and partitions */
1344 nand_release(&info->mtd); 1343 nand_release(&info->mtd);
1345 iounmap(info->nand.IO_ADDR_R); 1344 iounmap(info->nand.IO_ADDR_R);
1346 release_mem_region(info->phys_base, NAND_IO_SIZE); 1345 release_mem_region(info->phys_base, NAND_IO_SIZE);
1347 kfree(&info->mtd); 1346 kfree(&info->mtd);
1348 return 0; 1347 return 0;
1349 } 1348 }
1350 1349
1351 static struct platform_driver omap_nand_driver = { 1350 static struct platform_driver omap_nand_driver = {
1352 .probe = omap_nand_probe, 1351 .probe = omap_nand_probe,
1353 .remove = omap_nand_remove, 1352 .remove = omap_nand_remove,
1354 .driver = { 1353 .driver = {
1355 .name = DRIVER_NAME, 1354 .name = DRIVER_NAME,
1356 .owner = THIS_MODULE, 1355 .owner = THIS_MODULE,
1357 }, 1356 },
1358 }; 1357 };
1359 1358
1360 static int __init omap_nand_init(void) 1359 static int __init omap_nand_init(void)
1361 { 1360 {
1362 pr_info("%s driver initializing\n", DRIVER_NAME); 1361 pr_info("%s driver initializing\n", DRIVER_NAME);
1363 1362
1364 return platform_driver_register(&omap_nand_driver); 1363 return platform_driver_register(&omap_nand_driver);
1365 } 1364 }
1366 1365
1367 static void __exit omap_nand_exit(void) 1366 static void __exit omap_nand_exit(void)
1368 { 1367 {
1369 platform_driver_unregister(&omap_nand_driver); 1368 platform_driver_unregister(&omap_nand_driver);
1370 } 1369 }
1371 1370
1372 module_init(omap_nand_init); 1371 module_init(omap_nand_init);
1373 module_exit(omap_nand_exit); 1372 module_exit(omap_nand_exit);
1374 1373
1375 MODULE_ALIAS("platform:" DRIVER_NAME); 1374 MODULE_ALIAS("platform:" DRIVER_NAME);
1376 MODULE_LICENSE("GPL"); 1375 MODULE_LICENSE("GPL");
1377 MODULE_DESCRIPTION("Glue layer for NAND flash on TI OMAP boards"); 1376 MODULE_DESCRIPTION("Glue layer for NAND flash on TI OMAP boards");
1378 1377