Commit 05d90d89f97e040f2e950478b0015744e8fb0011

Authored by Philip, Avinash
1 parent 9983506323
Exists in master

arm:omap:nand - Remove unaligned size check

This patch removes the check for unaligned size, on reading from NAND
flash. Previously size check was inserted in order to avoid error
correction handling of non-aligned length of page size.

Signed-off-by: Philip, Avinash <avinashphilip@ti.com>

Showing 1 changed file with 6 additions and 9 deletions Inline Diff

drivers/mtd/nand/omap2.c
1 /* 1 /*
2 * Copyright © 2004 Texas Instruments, Jian Zhang <jzhang@ti.com> 2 * Copyright © 2004 Texas Instruments, Jian Zhang <jzhang@ti.com>
3 * Copyright © 2004 Micron Technology Inc. 3 * Copyright © 2004 Micron Technology Inc.
4 * Copyright © 2004 David Brownell 4 * Copyright © 2004 David Brownell
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify 6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as 7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation. 8 * published by the Free Software Foundation.
9 */ 9 */
10 10
11 #include <linux/platform_device.h> 11 #include <linux/platform_device.h>
12 #include <linux/dma-mapping.h> 12 #include <linux/dma-mapping.h>
13 #include <linux/delay.h> 13 #include <linux/delay.h>
14 #include <linux/interrupt.h> 14 #include <linux/interrupt.h>
15 #include <linux/jiffies.h> 15 #include <linux/jiffies.h>
16 #include <linux/sched.h> 16 #include <linux/sched.h>
17 #include <linux/mtd/mtd.h> 17 #include <linux/mtd/mtd.h>
18 #include <linux/mtd/nand.h> 18 #include <linux/mtd/nand.h>
19 #include <linux/mtd/partitions.h> 19 #include <linux/mtd/partitions.h>
20 #include <linux/io.h> 20 #include <linux/io.h>
21 #include <linux/slab.h> 21 #include <linux/slab.h>
22 22
23 #include <plat/dma.h> 23 #include <plat/dma.h>
24 #include <plat/gpmc.h> 24 #include <plat/gpmc.h>
25 #include <plat/nand.h> 25 #include <plat/nand.h>
26 #include <plat/elm.h> 26 #include <plat/elm.h>
27 27
28 #define DRIVER_NAME "omap2-nand" 28 #define DRIVER_NAME "omap2-nand"
29 #define OMAP_NAND_TIMEOUT_MS 5000 29 #define OMAP_NAND_TIMEOUT_MS 5000
30 30
31 #define BCH8_ECC_BYTES (512) 31 #define BCH8_ECC_BYTES (512)
32 #define BCH8_ECC_OOB_BYTES (13) 32 #define BCH8_ECC_OOB_BYTES (13)
33 #define BCH8_ECC_MAX ((BCH8_ECC_BYTES + BCH8_ECC_OOB_BYTES) * 8) 33 #define BCH8_ECC_MAX ((BCH8_ECC_BYTES + BCH8_ECC_OOB_BYTES) * 8)
34 34
35 #define NAND_Ecc_P1e (1 << 0) 35 #define NAND_Ecc_P1e (1 << 0)
36 #define NAND_Ecc_P2e (1 << 1) 36 #define NAND_Ecc_P2e (1 << 1)
37 #define NAND_Ecc_P4e (1 << 2) 37 #define NAND_Ecc_P4e (1 << 2)
38 #define NAND_Ecc_P8e (1 << 3) 38 #define NAND_Ecc_P8e (1 << 3)
39 #define NAND_Ecc_P16e (1 << 4) 39 #define NAND_Ecc_P16e (1 << 4)
40 #define NAND_Ecc_P32e (1 << 5) 40 #define NAND_Ecc_P32e (1 << 5)
41 #define NAND_Ecc_P64e (1 << 6) 41 #define NAND_Ecc_P64e (1 << 6)
42 #define NAND_Ecc_P128e (1 << 7) 42 #define NAND_Ecc_P128e (1 << 7)
43 #define NAND_Ecc_P256e (1 << 8) 43 #define NAND_Ecc_P256e (1 << 8)
44 #define NAND_Ecc_P512e (1 << 9) 44 #define NAND_Ecc_P512e (1 << 9)
45 #define NAND_Ecc_P1024e (1 << 10) 45 #define NAND_Ecc_P1024e (1 << 10)
46 #define NAND_Ecc_P2048e (1 << 11) 46 #define NAND_Ecc_P2048e (1 << 11)
47 47
48 #define NAND_Ecc_P1o (1 << 16) 48 #define NAND_Ecc_P1o (1 << 16)
49 #define NAND_Ecc_P2o (1 << 17) 49 #define NAND_Ecc_P2o (1 << 17)
50 #define NAND_Ecc_P4o (1 << 18) 50 #define NAND_Ecc_P4o (1 << 18)
51 #define NAND_Ecc_P8o (1 << 19) 51 #define NAND_Ecc_P8o (1 << 19)
52 #define NAND_Ecc_P16o (1 << 20) 52 #define NAND_Ecc_P16o (1 << 20)
53 #define NAND_Ecc_P32o (1 << 21) 53 #define NAND_Ecc_P32o (1 << 21)
54 #define NAND_Ecc_P64o (1 << 22) 54 #define NAND_Ecc_P64o (1 << 22)
55 #define NAND_Ecc_P128o (1 << 23) 55 #define NAND_Ecc_P128o (1 << 23)
56 #define NAND_Ecc_P256o (1 << 24) 56 #define NAND_Ecc_P256o (1 << 24)
57 #define NAND_Ecc_P512o (1 << 25) 57 #define NAND_Ecc_P512o (1 << 25)
58 #define NAND_Ecc_P1024o (1 << 26) 58 #define NAND_Ecc_P1024o (1 << 26)
59 #define NAND_Ecc_P2048o (1 << 27) 59 #define NAND_Ecc_P2048o (1 << 27)
60 60
61 #define TF(value) (value ? 1 : 0) 61 #define TF(value) (value ? 1 : 0)
62 62
63 #define P2048e(a) (TF(a & NAND_Ecc_P2048e) << 0) 63 #define P2048e(a) (TF(a & NAND_Ecc_P2048e) << 0)
64 #define P2048o(a) (TF(a & NAND_Ecc_P2048o) << 1) 64 #define P2048o(a) (TF(a & NAND_Ecc_P2048o) << 1)
65 #define P1e(a) (TF(a & NAND_Ecc_P1e) << 2) 65 #define P1e(a) (TF(a & NAND_Ecc_P1e) << 2)
66 #define P1o(a) (TF(a & NAND_Ecc_P1o) << 3) 66 #define P1o(a) (TF(a & NAND_Ecc_P1o) << 3)
67 #define P2e(a) (TF(a & NAND_Ecc_P2e) << 4) 67 #define P2e(a) (TF(a & NAND_Ecc_P2e) << 4)
68 #define P2o(a) (TF(a & NAND_Ecc_P2o) << 5) 68 #define P2o(a) (TF(a & NAND_Ecc_P2o) << 5)
69 #define P4e(a) (TF(a & NAND_Ecc_P4e) << 6) 69 #define P4e(a) (TF(a & NAND_Ecc_P4e) << 6)
70 #define P4o(a) (TF(a & NAND_Ecc_P4o) << 7) 70 #define P4o(a) (TF(a & NAND_Ecc_P4o) << 7)
71 71
72 #define P8e(a) (TF(a & NAND_Ecc_P8e) << 0) 72 #define P8e(a) (TF(a & NAND_Ecc_P8e) << 0)
73 #define P8o(a) (TF(a & NAND_Ecc_P8o) << 1) 73 #define P8o(a) (TF(a & NAND_Ecc_P8o) << 1)
74 #define P16e(a) (TF(a & NAND_Ecc_P16e) << 2) 74 #define P16e(a) (TF(a & NAND_Ecc_P16e) << 2)
75 #define P16o(a) (TF(a & NAND_Ecc_P16o) << 3) 75 #define P16o(a) (TF(a & NAND_Ecc_P16o) << 3)
76 #define P32e(a) (TF(a & NAND_Ecc_P32e) << 4) 76 #define P32e(a) (TF(a & NAND_Ecc_P32e) << 4)
77 #define P32o(a) (TF(a & NAND_Ecc_P32o) << 5) 77 #define P32o(a) (TF(a & NAND_Ecc_P32o) << 5)
78 #define P64e(a) (TF(a & NAND_Ecc_P64e) << 6) 78 #define P64e(a) (TF(a & NAND_Ecc_P64e) << 6)
79 #define P64o(a) (TF(a & NAND_Ecc_P64o) << 7) 79 #define P64o(a) (TF(a & NAND_Ecc_P64o) << 7)
80 80
81 #define P128e(a) (TF(a & NAND_Ecc_P128e) << 0) 81 #define P128e(a) (TF(a & NAND_Ecc_P128e) << 0)
82 #define P128o(a) (TF(a & NAND_Ecc_P128o) << 1) 82 #define P128o(a) (TF(a & NAND_Ecc_P128o) << 1)
83 #define P256e(a) (TF(a & NAND_Ecc_P256e) << 2) 83 #define P256e(a) (TF(a & NAND_Ecc_P256e) << 2)
84 #define P256o(a) (TF(a & NAND_Ecc_P256o) << 3) 84 #define P256o(a) (TF(a & NAND_Ecc_P256o) << 3)
85 #define P512e(a) (TF(a & NAND_Ecc_P512e) << 4) 85 #define P512e(a) (TF(a & NAND_Ecc_P512e) << 4)
86 #define P512o(a) (TF(a & NAND_Ecc_P512o) << 5) 86 #define P512o(a) (TF(a & NAND_Ecc_P512o) << 5)
87 #define P1024e(a) (TF(a & NAND_Ecc_P1024e) << 6) 87 #define P1024e(a) (TF(a & NAND_Ecc_P1024e) << 6)
88 #define P1024o(a) (TF(a & NAND_Ecc_P1024o) << 7) 88 #define P1024o(a) (TF(a & NAND_Ecc_P1024o) << 7)
89 89
90 #define P8e_s(a) (TF(a & NAND_Ecc_P8e) << 0) 90 #define P8e_s(a) (TF(a & NAND_Ecc_P8e) << 0)
91 #define P8o_s(a) (TF(a & NAND_Ecc_P8o) << 1) 91 #define P8o_s(a) (TF(a & NAND_Ecc_P8o) << 1)
92 #define P16e_s(a) (TF(a & NAND_Ecc_P16e) << 2) 92 #define P16e_s(a) (TF(a & NAND_Ecc_P16e) << 2)
93 #define P16o_s(a) (TF(a & NAND_Ecc_P16o) << 3) 93 #define P16o_s(a) (TF(a & NAND_Ecc_P16o) << 3)
94 #define P1e_s(a) (TF(a & NAND_Ecc_P1e) << 4) 94 #define P1e_s(a) (TF(a & NAND_Ecc_P1e) << 4)
95 #define P1o_s(a) (TF(a & NAND_Ecc_P1o) << 5) 95 #define P1o_s(a) (TF(a & NAND_Ecc_P1o) << 5)
96 #define P2e_s(a) (TF(a & NAND_Ecc_P2e) << 6) 96 #define P2e_s(a) (TF(a & NAND_Ecc_P2e) << 6)
97 #define P2o_s(a) (TF(a & NAND_Ecc_P2o) << 7) 97 #define P2o_s(a) (TF(a & NAND_Ecc_P2o) << 7)
98 98
99 #define P4e_s(a) (TF(a & NAND_Ecc_P4e) << 0) 99 #define P4e_s(a) (TF(a & NAND_Ecc_P4e) << 0)
100 #define P4o_s(a) (TF(a & NAND_Ecc_P4o) << 1) 100 #define P4o_s(a) (TF(a & NAND_Ecc_P4o) << 1)
101 101
102 #define MAX_HWECC_BYTES_OOB_64 24 102 #define MAX_HWECC_BYTES_OOB_64 24
103 #define JFFS2_CLEAN_MARKER_OFFSET 0x2 103 #define JFFS2_CLEAN_MARKER_OFFSET 0x2
104 #define BCH_ECC_POS 0x2 104 #define BCH_ECC_POS 0x2
105 #define BCH_JFFS2_CLEAN_MARKER_OFFSET 0x3a 105 #define BCH_JFFS2_CLEAN_MARKER_OFFSET 0x3a
106 106
107 static const char *part_probes[] = { "cmdlinepart", NULL }; 107 static const char *part_probes[] = { "cmdlinepart", NULL };
108 108
109 int decode_bch(int select_4_8, unsigned char *ecc, unsigned int *err_loc); 109 int decode_bch(int select_4_8, unsigned char *ecc, unsigned int *err_loc);
110 110
111 /* oob info generated runtime depending on ecc algorithm and layout selected */ 111 /* oob info generated runtime depending on ecc algorithm and layout selected */
112 static struct nand_ecclayout omap_oobinfo; 112 static struct nand_ecclayout omap_oobinfo;
113 /* Define some generic bad / good block scan pattern which are used 113 /* Define some generic bad / good block scan pattern which are used
114 * while scanning a device for factory marked good / bad blocks 114 * while scanning a device for factory marked good / bad blocks
115 */ 115 */
116 static uint8_t scan_ff_pattern[] = { 0xff }; 116 static uint8_t scan_ff_pattern[] = { 0xff };
117 static struct nand_bbt_descr bb_descrip_flashbased = { 117 static struct nand_bbt_descr bb_descrip_flashbased = {
118 .options = NAND_BBT_SCANEMPTY | NAND_BBT_SCANALLPAGES, 118 .options = NAND_BBT_SCANEMPTY | NAND_BBT_SCANALLPAGES,
119 .offs = 0, 119 .offs = 0,
120 .len = 1, 120 .len = 1,
121 .pattern = scan_ff_pattern, 121 .pattern = scan_ff_pattern,
122 }; 122 };
123 123
124 124
125 struct omap_nand_info { 125 struct omap_nand_info {
126 struct nand_hw_control controller; 126 struct nand_hw_control controller;
127 struct omap_nand_platform_data *pdata; 127 struct omap_nand_platform_data *pdata;
128 struct mtd_info mtd; 128 struct mtd_info mtd;
129 struct mtd_partition *parts; 129 struct mtd_partition *parts;
130 struct nand_chip nand; 130 struct nand_chip nand;
131 struct platform_device *pdev; 131 struct platform_device *pdev;
132 132
133 int gpmc_cs; 133 int gpmc_cs;
134 unsigned long phys_base; 134 unsigned long phys_base;
135 struct completion comp; 135 struct completion comp;
136 int dma_ch; 136 int dma_ch;
137 int gpmc_irq; 137 int gpmc_irq;
138 enum { 138 enum {
139 OMAP_NAND_IO_READ = 0, /* read */ 139 OMAP_NAND_IO_READ = 0, /* read */
140 OMAP_NAND_IO_WRITE, /* write */ 140 OMAP_NAND_IO_WRITE, /* write */
141 } iomode; 141 } iomode;
142 u_char *buf; 142 u_char *buf;
143 int buf_len; 143 int buf_len;
144 int ecc_opt; 144 int ecc_opt;
145 }; 145 };
146 146
147 /** 147 /**
148 * omap_hwcontrol - hardware specific access to control-lines 148 * omap_hwcontrol - hardware specific access to control-lines
149 * @mtd: MTD device structure 149 * @mtd: MTD device structure
150 * @cmd: command to device 150 * @cmd: command to device
151 * @ctrl: 151 * @ctrl:
152 * NAND_NCE: bit 0 -> don't care 152 * NAND_NCE: bit 0 -> don't care
153 * NAND_CLE: bit 1 -> Command Latch 153 * NAND_CLE: bit 1 -> Command Latch
154 * NAND_ALE: bit 2 -> Address Latch 154 * NAND_ALE: bit 2 -> Address Latch
155 * 155 *
156 * NOTE: boards may use different bits for these!! 156 * NOTE: boards may use different bits for these!!
157 */ 157 */
158 static void omap_hwcontrol(struct mtd_info *mtd, int cmd, unsigned int ctrl) 158 static void omap_hwcontrol(struct mtd_info *mtd, int cmd, unsigned int ctrl)
159 { 159 {
160 struct omap_nand_info *info = container_of(mtd, 160 struct omap_nand_info *info = container_of(mtd,
161 struct omap_nand_info, mtd); 161 struct omap_nand_info, mtd);
162 162
163 if (cmd != NAND_CMD_NONE) { 163 if (cmd != NAND_CMD_NONE) {
164 if (ctrl & NAND_CLE) 164 if (ctrl & NAND_CLE)
165 gpmc_nand_write(info->gpmc_cs, GPMC_NAND_COMMAND, cmd); 165 gpmc_nand_write(info->gpmc_cs, GPMC_NAND_COMMAND, cmd);
166 166
167 else if (ctrl & NAND_ALE) 167 else if (ctrl & NAND_ALE)
168 gpmc_nand_write(info->gpmc_cs, GPMC_NAND_ADDRESS, cmd); 168 gpmc_nand_write(info->gpmc_cs, GPMC_NAND_ADDRESS, cmd);
169 169
170 else /* NAND_NCE */ 170 else /* NAND_NCE */
171 gpmc_nand_write(info->gpmc_cs, GPMC_NAND_DATA, cmd); 171 gpmc_nand_write(info->gpmc_cs, GPMC_NAND_DATA, cmd);
172 } 172 }
173 } 173 }
174 174
175 /** 175 /**
176 * omap_read_buf8 - read data from NAND controller into buffer 176 * omap_read_buf8 - read data from NAND controller into buffer
177 * @mtd: MTD device structure 177 * @mtd: MTD device structure
178 * @buf: buffer to store date 178 * @buf: buffer to store date
179 * @len: number of bytes to read 179 * @len: number of bytes to read
180 */ 180 */
181 static void omap_read_buf8(struct mtd_info *mtd, u_char *buf, int len) 181 static void omap_read_buf8(struct mtd_info *mtd, u_char *buf, int len)
182 { 182 {
183 struct nand_chip *nand = mtd->priv; 183 struct nand_chip *nand = mtd->priv;
184 184
185 ioread8_rep(nand->IO_ADDR_R, buf, len); 185 ioread8_rep(nand->IO_ADDR_R, buf, len);
186 } 186 }
187 187
188 /** 188 /**
189 * omap_write_buf8 - write buffer to NAND controller 189 * omap_write_buf8 - write buffer to NAND controller
190 * @mtd: MTD device structure 190 * @mtd: MTD device structure
191 * @buf: data buffer 191 * @buf: data buffer
192 * @len: number of bytes to write 192 * @len: number of bytes to write
193 */ 193 */
194 static void omap_write_buf8(struct mtd_info *mtd, const u_char *buf, int len) 194 static void omap_write_buf8(struct mtd_info *mtd, const u_char *buf, int len)
195 { 195 {
196 struct omap_nand_info *info = container_of(mtd, 196 struct omap_nand_info *info = container_of(mtd,
197 struct omap_nand_info, mtd); 197 struct omap_nand_info, mtd);
198 u_char *p = (u_char *)buf; 198 u_char *p = (u_char *)buf;
199 u32 status = 0; 199 u32 status = 0;
200 200
201 while (len--) { 201 while (len--) {
202 iowrite8(*p++, info->nand.IO_ADDR_W); 202 iowrite8(*p++, info->nand.IO_ADDR_W);
203 /* wait until buffer is available for write */ 203 /* wait until buffer is available for write */
204 do { 204 do {
205 status = gpmc_read_status(GPMC_STATUS_BUFFER); 205 status = gpmc_read_status(GPMC_STATUS_BUFFER);
206 } while (!status); 206 } while (!status);
207 } 207 }
208 } 208 }
209 209
210 /** 210 /**
211 * omap_read_buf16 - read data from NAND controller into buffer 211 * omap_read_buf16 - read data from NAND controller into buffer
212 * @mtd: MTD device structure 212 * @mtd: MTD device structure
213 * @buf: buffer to store date 213 * @buf: buffer to store date
214 * @len: number of bytes to read 214 * @len: number of bytes to read
215 */ 215 */
216 static void omap_read_buf16(struct mtd_info *mtd, u_char *buf, int len) 216 static void omap_read_buf16(struct mtd_info *mtd, u_char *buf, int len)
217 { 217 {
218 struct nand_chip *nand = mtd->priv; 218 struct nand_chip *nand = mtd->priv;
219 219
220 ioread16_rep(nand->IO_ADDR_R, buf, len / 2); 220 ioread16_rep(nand->IO_ADDR_R, buf, len / 2);
221 } 221 }
222 222
223 /** 223 /**
224 * omap_write_buf16 - write buffer to NAND controller 224 * omap_write_buf16 - write buffer to NAND controller
225 * @mtd: MTD device structure 225 * @mtd: MTD device structure
226 * @buf: data buffer 226 * @buf: data buffer
227 * @len: number of bytes to write 227 * @len: number of bytes to write
228 */ 228 */
229 static void omap_write_buf16(struct mtd_info *mtd, const u_char * buf, int len) 229 static void omap_write_buf16(struct mtd_info *mtd, const u_char * buf, int len)
230 { 230 {
231 struct omap_nand_info *info = container_of(mtd, 231 struct omap_nand_info *info = container_of(mtd,
232 struct omap_nand_info, mtd); 232 struct omap_nand_info, mtd);
233 u16 *p = (u16 *) buf; 233 u16 *p = (u16 *) buf;
234 u32 status = 0; 234 u32 status = 0;
235 /* FIXME try bursts of writesw() or DMA ... */ 235 /* FIXME try bursts of writesw() or DMA ... */
236 len >>= 1; 236 len >>= 1;
237 237
238 while (len--) { 238 while (len--) {
239 iowrite16(*p++, info->nand.IO_ADDR_W); 239 iowrite16(*p++, info->nand.IO_ADDR_W);
240 /* wait until buffer is available for write */ 240 /* wait until buffer is available for write */
241 do { 241 do {
242 status = gpmc_read_status(GPMC_STATUS_BUFFER); 242 status = gpmc_read_status(GPMC_STATUS_BUFFER);
243 } while (!status); 243 } while (!status);
244 } 244 }
245 } 245 }
246 246
247 /** 247 /**
248 * omap_read_buf_pref - read data from NAND controller into buffer 248 * omap_read_buf_pref - read data from NAND controller into buffer
249 * @mtd: MTD device structure 249 * @mtd: MTD device structure
250 * @buf: buffer to store date 250 * @buf: buffer to store date
251 * @len: number of bytes to read 251 * @len: number of bytes to read
252 */ 252 */
253 static void omap_read_buf_pref(struct mtd_info *mtd, u_char *buf, int len) 253 static void omap_read_buf_pref(struct mtd_info *mtd, u_char *buf, int len)
254 { 254 {
255 struct omap_nand_info *info = container_of(mtd, 255 struct omap_nand_info *info = container_of(mtd,
256 struct omap_nand_info, mtd); 256 struct omap_nand_info, mtd);
257 uint32_t r_count = 0; 257 uint32_t r_count = 0;
258 int ret = 0; 258 int ret = 0;
259 u32 *p = (u32 *)buf; 259 u32 *p = (u32 *)buf;
260 260
261 /* take care of subpage reads */ 261 /* take care of subpage reads */
262 if (len % 4) { 262 if (len % 4) {
263 if (info->nand.options & NAND_BUSWIDTH_16) 263 if (info->nand.options & NAND_BUSWIDTH_16)
264 omap_read_buf16(mtd, buf, len % 4); 264 omap_read_buf16(mtd, buf, len % 4);
265 else 265 else
266 omap_read_buf8(mtd, buf, len % 4); 266 omap_read_buf8(mtd, buf, len % 4);
267 p = (u32 *) (buf + len % 4); 267 p = (u32 *) (buf + len % 4);
268 len -= len % 4; 268 len -= len % 4;
269 } 269 }
270 270
271 /* configure and start prefetch transfer */ 271 /* configure and start prefetch transfer */
272 ret = gpmc_prefetch_enable(info->gpmc_cs, 272 ret = gpmc_prefetch_enable(info->gpmc_cs,
273 PREFETCH_FIFOTHRESHOLD_MAX, 0x0, len, 0x0); 273 PREFETCH_FIFOTHRESHOLD_MAX, 0x0, len, 0x0);
274 if (ret) { 274 if (ret) {
275 /* PFPW engine is busy, use cpu copy method */ 275 /* PFPW engine is busy, use cpu copy method */
276 if (info->nand.options & NAND_BUSWIDTH_16) 276 if (info->nand.options & NAND_BUSWIDTH_16)
277 omap_read_buf16(mtd, (u_char *)p, len); 277 omap_read_buf16(mtd, (u_char *)p, len);
278 else 278 else
279 omap_read_buf8(mtd, (u_char *)p, len); 279 omap_read_buf8(mtd, (u_char *)p, len);
280 } else { 280 } else {
281 do { 281 do {
282 r_count = gpmc_read_status(GPMC_PREFETCH_FIFO_CNT); 282 r_count = gpmc_read_status(GPMC_PREFETCH_FIFO_CNT);
283 r_count = r_count >> 2; 283 r_count = r_count >> 2;
284 ioread32_rep(info->nand.IO_ADDR_R, p, r_count); 284 ioread32_rep(info->nand.IO_ADDR_R, p, r_count);
285 p += r_count; 285 p += r_count;
286 len -= r_count << 2; 286 len -= r_count << 2;
287 } while (len); 287 } while (len);
288 /* disable and stop the PFPW engine */ 288 /* disable and stop the PFPW engine */
289 gpmc_prefetch_reset(info->gpmc_cs); 289 gpmc_prefetch_reset(info->gpmc_cs);
290 } 290 }
291 } 291 }
292 292
293 /** 293 /**
294 * omap_write_buf_pref - write buffer to NAND controller 294 * omap_write_buf_pref - write buffer to NAND controller
295 * @mtd: MTD device structure 295 * @mtd: MTD device structure
296 * @buf: data buffer 296 * @buf: data buffer
297 * @len: number of bytes to write 297 * @len: number of bytes to write
298 */ 298 */
299 static void omap_write_buf_pref(struct mtd_info *mtd, 299 static void omap_write_buf_pref(struct mtd_info *mtd,
300 const u_char *buf, int len) 300 const u_char *buf, int len)
301 { 301 {
302 struct omap_nand_info *info = container_of(mtd, 302 struct omap_nand_info *info = container_of(mtd,
303 struct omap_nand_info, mtd); 303 struct omap_nand_info, mtd);
304 uint32_t w_count = 0; 304 uint32_t w_count = 0;
305 int i = 0, ret = 0; 305 int i = 0, ret = 0;
306 u16 *p = (u16 *)buf; 306 u16 *p = (u16 *)buf;
307 unsigned long tim, limit; 307 unsigned long tim, limit;
308 308
309 /* take care of subpage writes */ 309 /* take care of subpage writes */
310 if (len % 2 != 0) { 310 if (len % 2 != 0) {
311 writeb(*buf, info->nand.IO_ADDR_W); 311 writeb(*buf, info->nand.IO_ADDR_W);
312 p = (u16 *)(buf + 1); 312 p = (u16 *)(buf + 1);
313 len--; 313 len--;
314 } 314 }
315 315
316 /* configure and start prefetch transfer */ 316 /* configure and start prefetch transfer */
317 ret = gpmc_prefetch_enable(info->gpmc_cs, 317 ret = gpmc_prefetch_enable(info->gpmc_cs,
318 PREFETCH_FIFOTHRESHOLD_MAX, 0x0, len, 0x1); 318 PREFETCH_FIFOTHRESHOLD_MAX, 0x0, len, 0x1);
319 if (ret) { 319 if (ret) {
320 /* PFPW engine is busy, use cpu copy method */ 320 /* PFPW engine is busy, use cpu copy method */
321 if (info->nand.options & NAND_BUSWIDTH_16) 321 if (info->nand.options & NAND_BUSWIDTH_16)
322 omap_write_buf16(mtd, (u_char *)p, len); 322 omap_write_buf16(mtd, (u_char *)p, len);
323 else 323 else
324 omap_write_buf8(mtd, (u_char *)p, len); 324 omap_write_buf8(mtd, (u_char *)p, len);
325 } else { 325 } else {
326 while (len) { 326 while (len) {
327 w_count = gpmc_read_status(GPMC_PREFETCH_FIFO_CNT); 327 w_count = gpmc_read_status(GPMC_PREFETCH_FIFO_CNT);
328 w_count = w_count >> 1; 328 w_count = w_count >> 1;
329 for (i = 0; (i < w_count) && len; i++, len -= 2) 329 for (i = 0; (i < w_count) && len; i++, len -= 2)
330 iowrite16(*p++, info->nand.IO_ADDR_W); 330 iowrite16(*p++, info->nand.IO_ADDR_W);
331 } 331 }
332 /* wait for data to flushed-out before reset the prefetch */ 332 /* wait for data to flushed-out before reset the prefetch */
333 tim = 0; 333 tim = 0;
334 limit = (loops_per_jiffy * 334 limit = (loops_per_jiffy *
335 msecs_to_jiffies(OMAP_NAND_TIMEOUT_MS)); 335 msecs_to_jiffies(OMAP_NAND_TIMEOUT_MS));
336 while (gpmc_read_status(GPMC_PREFETCH_COUNT) && (tim++ < limit)) 336 while (gpmc_read_status(GPMC_PREFETCH_COUNT) && (tim++ < limit))
337 cpu_relax(); 337 cpu_relax();
338 338
339 /* disable and stop the PFPW engine */ 339 /* disable and stop the PFPW engine */
340 gpmc_prefetch_reset(info->gpmc_cs); 340 gpmc_prefetch_reset(info->gpmc_cs);
341 } 341 }
342 } 342 }
343 343
344 /* 344 /*
345 * omap_nand_dma_cb: callback on the completion of dma transfer 345 * omap_nand_dma_cb: callback on the completion of dma transfer
346 * @lch: logical channel 346 * @lch: logical channel
347 * @ch_satuts: channel status 347 * @ch_satuts: channel status
348 * @data: pointer to completion data structure 348 * @data: pointer to completion data structure
349 */ 349 */
350 static void omap_nand_dma_cb(int lch, u16 ch_status, void *data) 350 static void omap_nand_dma_cb(int lch, u16 ch_status, void *data)
351 { 351 {
352 complete((struct completion *) data); 352 complete((struct completion *) data);
353 } 353 }
354 354
355 /* 355 /*
356 * omap_nand_dma_transfer: configer and start dma transfer 356 * omap_nand_dma_transfer: configer and start dma transfer
357 * @mtd: MTD device structure 357 * @mtd: MTD device structure
358 * @addr: virtual address in RAM of source/destination 358 * @addr: virtual address in RAM of source/destination
359 * @len: number of data bytes to be transferred 359 * @len: number of data bytes to be transferred
360 * @is_write: flag for read/write operation 360 * @is_write: flag for read/write operation
361 */ 361 */
362 static inline int omap_nand_dma_transfer(struct mtd_info *mtd, void *addr, 362 static inline int omap_nand_dma_transfer(struct mtd_info *mtd, void *addr,
363 unsigned int len, int is_write) 363 unsigned int len, int is_write)
364 { 364 {
365 struct omap_nand_info *info = container_of(mtd, 365 struct omap_nand_info *info = container_of(mtd,
366 struct omap_nand_info, mtd); 366 struct omap_nand_info, mtd);
367 enum dma_data_direction dir = is_write ? DMA_TO_DEVICE : 367 enum dma_data_direction dir = is_write ? DMA_TO_DEVICE :
368 DMA_FROM_DEVICE; 368 DMA_FROM_DEVICE;
369 dma_addr_t dma_addr; 369 dma_addr_t dma_addr;
370 int ret; 370 int ret;
371 unsigned long tim, limit; 371 unsigned long tim, limit;
372 372
373 /* The fifo depth is 64 bytes max. 373 /* The fifo depth is 64 bytes max.
374 * But configure the FIFO-threahold to 32 to get a sync at each frame 374 * But configure the FIFO-threahold to 32 to get a sync at each frame
375 * and frame length is 32 bytes. 375 * and frame length is 32 bytes.
376 */ 376 */
377 int buf_len = len >> 6; 377 int buf_len = len >> 6;
378 378
379 if (addr >= high_memory) { 379 if (addr >= high_memory) {
380 struct page *p1; 380 struct page *p1;
381 381
382 if (((size_t)addr & PAGE_MASK) != 382 if (((size_t)addr & PAGE_MASK) !=
383 ((size_t)(addr + len - 1) & PAGE_MASK)) 383 ((size_t)(addr + len - 1) & PAGE_MASK))
384 goto out_copy; 384 goto out_copy;
385 p1 = vmalloc_to_page(addr); 385 p1 = vmalloc_to_page(addr);
386 if (!p1) 386 if (!p1)
387 goto out_copy; 387 goto out_copy;
388 addr = page_address(p1) + ((size_t)addr & ~PAGE_MASK); 388 addr = page_address(p1) + ((size_t)addr & ~PAGE_MASK);
389 } 389 }
390 390
391 dma_addr = dma_map_single(&info->pdev->dev, addr, len, dir); 391 dma_addr = dma_map_single(&info->pdev->dev, addr, len, dir);
392 if (dma_mapping_error(&info->pdev->dev, dma_addr)) { 392 if (dma_mapping_error(&info->pdev->dev, dma_addr)) {
393 dev_err(&info->pdev->dev, 393 dev_err(&info->pdev->dev,
394 "Couldn't DMA map a %d byte buffer\n", len); 394 "Couldn't DMA map a %d byte buffer\n", len);
395 goto out_copy; 395 goto out_copy;
396 } 396 }
397 397
398 if (is_write) { 398 if (is_write) {
399 omap_set_dma_dest_params(info->dma_ch, 0, OMAP_DMA_AMODE_CONSTANT, 399 omap_set_dma_dest_params(info->dma_ch, 0, OMAP_DMA_AMODE_CONSTANT,
400 info->phys_base, 0, 0); 400 info->phys_base, 0, 0);
401 omap_set_dma_src_params(info->dma_ch, 0, OMAP_DMA_AMODE_POST_INC, 401 omap_set_dma_src_params(info->dma_ch, 0, OMAP_DMA_AMODE_POST_INC,
402 dma_addr, 0, 0); 402 dma_addr, 0, 0);
403 omap_set_dma_transfer_params(info->dma_ch, OMAP_DMA_DATA_TYPE_S32, 403 omap_set_dma_transfer_params(info->dma_ch, OMAP_DMA_DATA_TYPE_S32,
404 0x10, buf_len, OMAP_DMA_SYNC_FRAME, 404 0x10, buf_len, OMAP_DMA_SYNC_FRAME,
405 OMAP24XX_DMA_GPMC, OMAP_DMA_DST_SYNC); 405 OMAP24XX_DMA_GPMC, OMAP_DMA_DST_SYNC);
406 } else { 406 } else {
407 omap_set_dma_src_params(info->dma_ch, 0, OMAP_DMA_AMODE_CONSTANT, 407 omap_set_dma_src_params(info->dma_ch, 0, OMAP_DMA_AMODE_CONSTANT,
408 info->phys_base, 0, 0); 408 info->phys_base, 0, 0);
409 omap_set_dma_dest_params(info->dma_ch, 0, OMAP_DMA_AMODE_POST_INC, 409 omap_set_dma_dest_params(info->dma_ch, 0, OMAP_DMA_AMODE_POST_INC,
410 dma_addr, 0, 0); 410 dma_addr, 0, 0);
411 omap_set_dma_transfer_params(info->dma_ch, OMAP_DMA_DATA_TYPE_S32, 411 omap_set_dma_transfer_params(info->dma_ch, OMAP_DMA_DATA_TYPE_S32,
412 0x10, buf_len, OMAP_DMA_SYNC_FRAME, 412 0x10, buf_len, OMAP_DMA_SYNC_FRAME,
413 OMAP24XX_DMA_GPMC, OMAP_DMA_SRC_SYNC); 413 OMAP24XX_DMA_GPMC, OMAP_DMA_SRC_SYNC);
414 } 414 }
415 /* configure and start prefetch transfer */ 415 /* configure and start prefetch transfer */
416 ret = gpmc_prefetch_enable(info->gpmc_cs, 416 ret = gpmc_prefetch_enable(info->gpmc_cs,
417 PREFETCH_FIFOTHRESHOLD_MAX, 0x1, len, is_write); 417 PREFETCH_FIFOTHRESHOLD_MAX, 0x1, len, is_write);
418 if (ret) 418 if (ret)
419 /* PFPW engine is busy, use cpu copy method */ 419 /* PFPW engine is busy, use cpu copy method */
420 goto out_copy; 420 goto out_copy;
421 421
422 init_completion(&info->comp); 422 init_completion(&info->comp);
423 423
424 omap_start_dma(info->dma_ch); 424 omap_start_dma(info->dma_ch);
425 425
426 /* setup and start DMA using dma_addr */ 426 /* setup and start DMA using dma_addr */
427 wait_for_completion(&info->comp); 427 wait_for_completion(&info->comp);
428 tim = 0; 428 tim = 0;
429 limit = (loops_per_jiffy * msecs_to_jiffies(OMAP_NAND_TIMEOUT_MS)); 429 limit = (loops_per_jiffy * msecs_to_jiffies(OMAP_NAND_TIMEOUT_MS));
430 while (gpmc_read_status(GPMC_PREFETCH_COUNT) && (tim++ < limit)) 430 while (gpmc_read_status(GPMC_PREFETCH_COUNT) && (tim++ < limit))
431 cpu_relax(); 431 cpu_relax();
432 432
433 /* disable and stop the PFPW engine */ 433 /* disable and stop the PFPW engine */
434 gpmc_prefetch_reset(info->gpmc_cs); 434 gpmc_prefetch_reset(info->gpmc_cs);
435 435
436 dma_unmap_single(&info->pdev->dev, dma_addr, len, dir); 436 dma_unmap_single(&info->pdev->dev, dma_addr, len, dir);
437 return 0; 437 return 0;
438 438
439 out_copy: 439 out_copy:
440 if (info->nand.options & NAND_BUSWIDTH_16) 440 if (info->nand.options & NAND_BUSWIDTH_16)
441 is_write == 0 ? omap_read_buf16(mtd, (u_char *) addr, len) 441 is_write == 0 ? omap_read_buf16(mtd, (u_char *) addr, len)
442 : omap_write_buf16(mtd, (u_char *) addr, len); 442 : omap_write_buf16(mtd, (u_char *) addr, len);
443 else 443 else
444 is_write == 0 ? omap_read_buf8(mtd, (u_char *) addr, len) 444 is_write == 0 ? omap_read_buf8(mtd, (u_char *) addr, len)
445 : omap_write_buf8(mtd, (u_char *) addr, len); 445 : omap_write_buf8(mtd, (u_char *) addr, len);
446 return 0; 446 return 0;
447 } 447 }
448 448
449 /** 449 /**
450 * omap_read_buf_dma_pref - read data from NAND controller into buffer 450 * omap_read_buf_dma_pref - read data from NAND controller into buffer
451 * @mtd: MTD device structure 451 * @mtd: MTD device structure
452 * @buf: buffer to store date 452 * @buf: buffer to store date
453 * @len: number of bytes to read 453 * @len: number of bytes to read
454 */ 454 */
455 static void omap_read_buf_dma_pref(struct mtd_info *mtd, u_char *buf, int len) 455 static void omap_read_buf_dma_pref(struct mtd_info *mtd, u_char *buf, int len)
456 { 456 {
457 if (len <= mtd->oobsize) 457 if (len <= mtd->oobsize)
458 omap_read_buf_pref(mtd, buf, len); 458 omap_read_buf_pref(mtd, buf, len);
459 else 459 else
460 /* start transfer in DMA mode */ 460 /* start transfer in DMA mode */
461 omap_nand_dma_transfer(mtd, buf, len, 0x0); 461 omap_nand_dma_transfer(mtd, buf, len, 0x0);
462 } 462 }
463 463
464 /** 464 /**
465 * omap_write_buf_dma_pref - write buffer to NAND controller 465 * omap_write_buf_dma_pref - write buffer to NAND controller
466 * @mtd: MTD device structure 466 * @mtd: MTD device structure
467 * @buf: data buffer 467 * @buf: data buffer
468 * @len: number of bytes to write 468 * @len: number of bytes to write
469 */ 469 */
470 static void omap_write_buf_dma_pref(struct mtd_info *mtd, 470 static void omap_write_buf_dma_pref(struct mtd_info *mtd,
471 const u_char *buf, int len) 471 const u_char *buf, int len)
472 { 472 {
473 if (len <= mtd->oobsize) 473 if (len <= mtd->oobsize)
474 omap_write_buf_pref(mtd, buf, len); 474 omap_write_buf_pref(mtd, buf, len);
475 else 475 else
476 /* start transfer in DMA mode */ 476 /* start transfer in DMA mode */
477 omap_nand_dma_transfer(mtd, (u_char *) buf, len, 0x1); 477 omap_nand_dma_transfer(mtd, (u_char *) buf, len, 0x1);
478 } 478 }
479 479
480 /* 480 /*
481 * omap_nand_irq - GMPC irq handler 481 * omap_nand_irq - GMPC irq handler
482 * @this_irq: gpmc irq number 482 * @this_irq: gpmc irq number
483 * @dev: omap_nand_info structure pointer is passed here 483 * @dev: omap_nand_info structure pointer is passed here
484 */ 484 */
485 static irqreturn_t omap_nand_irq(int this_irq, void *dev) 485 static irqreturn_t omap_nand_irq(int this_irq, void *dev)
486 { 486 {
487 struct omap_nand_info *info = (struct omap_nand_info *) dev; 487 struct omap_nand_info *info = (struct omap_nand_info *) dev;
488 u32 bytes; 488 u32 bytes;
489 u32 irq_stat; 489 u32 irq_stat;
490 490
491 irq_stat = gpmc_read_status(GPMC_GET_IRQ_STATUS); 491 irq_stat = gpmc_read_status(GPMC_GET_IRQ_STATUS);
492 bytes = gpmc_read_status(GPMC_PREFETCH_FIFO_CNT); 492 bytes = gpmc_read_status(GPMC_PREFETCH_FIFO_CNT);
493 bytes = bytes & 0xFFFC; /* io in multiple of 4 bytes */ 493 bytes = bytes & 0xFFFC; /* io in multiple of 4 bytes */
494 if (info->iomode == OMAP_NAND_IO_WRITE) { /* checks for write io */ 494 if (info->iomode == OMAP_NAND_IO_WRITE) { /* checks for write io */
495 if (irq_stat & 0x2) 495 if (irq_stat & 0x2)
496 goto done; 496 goto done;
497 497
498 if (info->buf_len && (info->buf_len < bytes)) 498 if (info->buf_len && (info->buf_len < bytes))
499 bytes = info->buf_len; 499 bytes = info->buf_len;
500 else if (!info->buf_len) 500 else if (!info->buf_len)
501 bytes = 0; 501 bytes = 0;
502 iowrite32_rep(info->nand.IO_ADDR_W, 502 iowrite32_rep(info->nand.IO_ADDR_W,
503 (u32 *)info->buf, bytes >> 2); 503 (u32 *)info->buf, bytes >> 2);
504 info->buf = info->buf + bytes; 504 info->buf = info->buf + bytes;
505 info->buf_len -= bytes; 505 info->buf_len -= bytes;
506 506
507 } else { 507 } else {
508 ioread32_rep(info->nand.IO_ADDR_R, 508 ioread32_rep(info->nand.IO_ADDR_R,
509 (u32 *)info->buf, bytes >> 2); 509 (u32 *)info->buf, bytes >> 2);
510 info->buf = info->buf + bytes; 510 info->buf = info->buf + bytes;
511 511
512 if (irq_stat & 0x2) 512 if (irq_stat & 0x2)
513 goto done; 513 goto done;
514 } 514 }
515 gpmc_cs_configure(info->gpmc_cs, GPMC_SET_IRQ_STATUS, irq_stat); 515 gpmc_cs_configure(info->gpmc_cs, GPMC_SET_IRQ_STATUS, irq_stat);
516 516
517 return IRQ_HANDLED; 517 return IRQ_HANDLED;
518 518
519 done: 519 done:
520 complete(&info->comp); 520 complete(&info->comp);
521 /* disable irq */ 521 /* disable irq */
522 gpmc_cs_configure(info->gpmc_cs, GPMC_ENABLE_IRQ, 0); 522 gpmc_cs_configure(info->gpmc_cs, GPMC_ENABLE_IRQ, 0);
523 523
524 /* clear status */ 524 /* clear status */
525 gpmc_cs_configure(info->gpmc_cs, GPMC_SET_IRQ_STATUS, irq_stat); 525 gpmc_cs_configure(info->gpmc_cs, GPMC_SET_IRQ_STATUS, irq_stat);
526 526
527 return IRQ_HANDLED; 527 return IRQ_HANDLED;
528 } 528 }
529 529
530 /* 530 /*
531 * omap_read_buf_irq_pref - read data from NAND controller into buffer 531 * omap_read_buf_irq_pref - read data from NAND controller into buffer
532 * @mtd: MTD device structure 532 * @mtd: MTD device structure
533 * @buf: buffer to store date 533 * @buf: buffer to store date
534 * @len: number of bytes to read 534 * @len: number of bytes to read
535 */ 535 */
536 static void omap_read_buf_irq_pref(struct mtd_info *mtd, u_char *buf, int len) 536 static void omap_read_buf_irq_pref(struct mtd_info *mtd, u_char *buf, int len)
537 { 537 {
538 struct omap_nand_info *info = container_of(mtd, 538 struct omap_nand_info *info = container_of(mtd,
539 struct omap_nand_info, mtd); 539 struct omap_nand_info, mtd);
540 int ret = 0; 540 int ret = 0;
541 if (len <= mtd->oobsize) { 541 if (len <= mtd->oobsize) {
542 omap_read_buf_pref(mtd, buf, len); 542 omap_read_buf_pref(mtd, buf, len);
543 return; 543 return;
544 } 544 }
545 545
546 info->iomode = OMAP_NAND_IO_READ; 546 info->iomode = OMAP_NAND_IO_READ;
547 info->buf = buf; 547 info->buf = buf;
548 init_completion(&info->comp); 548 init_completion(&info->comp);
549 549
550 /* configure and start prefetch transfer */ 550 /* configure and start prefetch transfer */
551 ret = gpmc_prefetch_enable(info->gpmc_cs, 551 ret = gpmc_prefetch_enable(info->gpmc_cs,
552 PREFETCH_FIFOTHRESHOLD_MAX/2, 0x0, len, 0x0); 552 PREFETCH_FIFOTHRESHOLD_MAX/2, 0x0, len, 0x0);
553 if (ret) 553 if (ret)
554 /* PFPW engine is busy, use cpu copy method */ 554 /* PFPW engine is busy, use cpu copy method */
555 goto out_copy; 555 goto out_copy;
556 556
557 info->buf_len = len; 557 info->buf_len = len;
558 /* enable irq */ 558 /* enable irq */
559 gpmc_cs_configure(info->gpmc_cs, GPMC_ENABLE_IRQ, 559 gpmc_cs_configure(info->gpmc_cs, GPMC_ENABLE_IRQ,
560 (GPMC_IRQ_FIFOEVENTENABLE | GPMC_IRQ_COUNT_EVENT)); 560 (GPMC_IRQ_FIFOEVENTENABLE | GPMC_IRQ_COUNT_EVENT));
561 561
562 /* waiting for read to complete */ 562 /* waiting for read to complete */
563 wait_for_completion(&info->comp); 563 wait_for_completion(&info->comp);
564 564
565 /* disable and stop the PFPW engine */ 565 /* disable and stop the PFPW engine */
566 gpmc_prefetch_reset(info->gpmc_cs); 566 gpmc_prefetch_reset(info->gpmc_cs);
567 return; 567 return;
568 568
569 out_copy: 569 out_copy:
570 if (info->nand.options & NAND_BUSWIDTH_16) 570 if (info->nand.options & NAND_BUSWIDTH_16)
571 omap_read_buf16(mtd, buf, len); 571 omap_read_buf16(mtd, buf, len);
572 else 572 else
573 omap_read_buf8(mtd, buf, len); 573 omap_read_buf8(mtd, buf, len);
574 } 574 }
575 575
576 /* 576 /*
577 * omap_write_buf_irq_pref - write buffer to NAND controller 577 * omap_write_buf_irq_pref - write buffer to NAND controller
578 * @mtd: MTD device structure 578 * @mtd: MTD device structure
579 * @buf: data buffer 579 * @buf: data buffer
580 * @len: number of bytes to write 580 * @len: number of bytes to write
581 */ 581 */
582 static void omap_write_buf_irq_pref(struct mtd_info *mtd, 582 static void omap_write_buf_irq_pref(struct mtd_info *mtd,
583 const u_char *buf, int len) 583 const u_char *buf, int len)
584 { 584 {
585 struct omap_nand_info *info = container_of(mtd, 585 struct omap_nand_info *info = container_of(mtd,
586 struct omap_nand_info, mtd); 586 struct omap_nand_info, mtd);
587 int ret = 0; 587 int ret = 0;
588 unsigned long tim, limit; 588 unsigned long tim, limit;
589 589
590 if (len <= mtd->oobsize) { 590 if (len <= mtd->oobsize) {
591 omap_write_buf_pref(mtd, buf, len); 591 omap_write_buf_pref(mtd, buf, len);
592 return; 592 return;
593 } 593 }
594 594
595 info->iomode = OMAP_NAND_IO_WRITE; 595 info->iomode = OMAP_NAND_IO_WRITE;
596 info->buf = (u_char *) buf; 596 info->buf = (u_char *) buf;
597 init_completion(&info->comp); 597 init_completion(&info->comp);
598 598
599 /* configure and start prefetch transfer : size=24 */ 599 /* configure and start prefetch transfer : size=24 */
600 ret = gpmc_prefetch_enable(info->gpmc_cs, 600 ret = gpmc_prefetch_enable(info->gpmc_cs,
601 (PREFETCH_FIFOTHRESHOLD_MAX * 3) / 8, 0x0, len, 0x1); 601 (PREFETCH_FIFOTHRESHOLD_MAX * 3) / 8, 0x0, len, 0x1);
602 if (ret) 602 if (ret)
603 /* PFPW engine is busy, use cpu copy method */ 603 /* PFPW engine is busy, use cpu copy method */
604 goto out_copy; 604 goto out_copy;
605 605
606 info->buf_len = len; 606 info->buf_len = len;
607 /* enable irq */ 607 /* enable irq */
608 gpmc_cs_configure(info->gpmc_cs, GPMC_ENABLE_IRQ, 608 gpmc_cs_configure(info->gpmc_cs, GPMC_ENABLE_IRQ,
609 (GPMC_IRQ_FIFOEVENTENABLE | GPMC_IRQ_COUNT_EVENT)); 609 (GPMC_IRQ_FIFOEVENTENABLE | GPMC_IRQ_COUNT_EVENT));
610 610
611 /* waiting for write to complete */ 611 /* waiting for write to complete */
612 wait_for_completion(&info->comp); 612 wait_for_completion(&info->comp);
613 /* wait for data to flushed-out before reset the prefetch */ 613 /* wait for data to flushed-out before reset the prefetch */
614 tim = 0; 614 tim = 0;
615 limit = (loops_per_jiffy * msecs_to_jiffies(OMAP_NAND_TIMEOUT_MS)); 615 limit = (loops_per_jiffy * msecs_to_jiffies(OMAP_NAND_TIMEOUT_MS));
616 while (gpmc_read_status(GPMC_PREFETCH_COUNT) && (tim++ < limit)) 616 while (gpmc_read_status(GPMC_PREFETCH_COUNT) && (tim++ < limit))
617 cpu_relax(); 617 cpu_relax();
618 618
619 /* disable and stop the PFPW engine */ 619 /* disable and stop the PFPW engine */
620 gpmc_prefetch_reset(info->gpmc_cs); 620 gpmc_prefetch_reset(info->gpmc_cs);
621 return; 621 return;
622 622
623 out_copy: 623 out_copy:
624 if (info->nand.options & NAND_BUSWIDTH_16) 624 if (info->nand.options & NAND_BUSWIDTH_16)
625 omap_write_buf16(mtd, buf, len); 625 omap_write_buf16(mtd, buf, len);
626 else 626 else
627 omap_write_buf8(mtd, buf, len); 627 omap_write_buf8(mtd, buf, len);
628 } 628 }
629 629
630 /** 630 /**
631 * omap_verify_buf - Verify chip data against buffer 631 * omap_verify_buf - Verify chip data against buffer
632 * @mtd: MTD device structure 632 * @mtd: MTD device structure
633 * @buf: buffer containing the data to compare 633 * @buf: buffer containing the data to compare
634 * @len: number of bytes to compare 634 * @len: number of bytes to compare
635 */ 635 */
636 static int omap_verify_buf(struct mtd_info *mtd, const u_char * buf, int len) 636 static int omap_verify_buf(struct mtd_info *mtd, const u_char * buf, int len)
637 { 637 {
638 struct omap_nand_info *info = container_of(mtd, struct omap_nand_info, 638 struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
639 mtd); 639 mtd);
640 u16 *p = (u16 *) buf; 640 u16 *p = (u16 *) buf;
641 641
642 len >>= 1; 642 len >>= 1;
643 while (len--) { 643 while (len--) {
644 if (*p++ != cpu_to_le16(readw(info->nand.IO_ADDR_R))) 644 if (*p++ != cpu_to_le16(readw(info->nand.IO_ADDR_R)))
645 return -EFAULT; 645 return -EFAULT;
646 } 646 }
647 647
648 return 0; 648 return 0;
649 } 649 }
650 650
651 /** 651 /**
652 * gen_true_ecc - This function will generate true ECC value 652 * gen_true_ecc - This function will generate true ECC value
653 * @ecc_buf: buffer to store ecc code 653 * @ecc_buf: buffer to store ecc code
654 * 654 *
655 * This generated true ECC value can be used when correcting 655 * This generated true ECC value can be used when correcting
656 * data read from NAND flash memory core 656 * data read from NAND flash memory core
657 */ 657 */
658 static void gen_true_ecc(u8 *ecc_buf) 658 static void gen_true_ecc(u8 *ecc_buf)
659 { 659 {
660 u32 tmp = ecc_buf[0] | (ecc_buf[1] << 16) | 660 u32 tmp = ecc_buf[0] | (ecc_buf[1] << 16) |
661 ((ecc_buf[2] & 0xF0) << 20) | ((ecc_buf[2] & 0x0F) << 8); 661 ((ecc_buf[2] & 0xF0) << 20) | ((ecc_buf[2] & 0x0F) << 8);
662 662
663 ecc_buf[0] = ~(P64o(tmp) | P64e(tmp) | P32o(tmp) | P32e(tmp) | 663 ecc_buf[0] = ~(P64o(tmp) | P64e(tmp) | P32o(tmp) | P32e(tmp) |
664 P16o(tmp) | P16e(tmp) | P8o(tmp) | P8e(tmp)); 664 P16o(tmp) | P16e(tmp) | P8o(tmp) | P8e(tmp));
665 ecc_buf[1] = ~(P1024o(tmp) | P1024e(tmp) | P512o(tmp) | P512e(tmp) | 665 ecc_buf[1] = ~(P1024o(tmp) | P1024e(tmp) | P512o(tmp) | P512e(tmp) |
666 P256o(tmp) | P256e(tmp) | P128o(tmp) | P128e(tmp)); 666 P256o(tmp) | P256e(tmp) | P128o(tmp) | P128e(tmp));
667 ecc_buf[2] = ~(P4o(tmp) | P4e(tmp) | P2o(tmp) | P2e(tmp) | P1o(tmp) | 667 ecc_buf[2] = ~(P4o(tmp) | P4e(tmp) | P2o(tmp) | P2e(tmp) | P1o(tmp) |
668 P1e(tmp) | P2048o(tmp) | P2048e(tmp)); 668 P1e(tmp) | P2048o(tmp) | P2048e(tmp));
669 } 669 }
670 670
671 /** 671 /**
672 * omap_compare_ecc - Detect (2 bits) and correct (1 bit) error in data 672 * omap_compare_ecc - Detect (2 bits) and correct (1 bit) error in data
673 * @ecc_data1: ecc code from nand spare area 673 * @ecc_data1: ecc code from nand spare area
674 * @ecc_data2: ecc code from hardware register obtained from hardware ecc 674 * @ecc_data2: ecc code from hardware register obtained from hardware ecc
675 * @page_data: page data 675 * @page_data: page data
676 * 676 *
677 * This function compares two ECC's and indicates if there is an error. 677 * This function compares two ECC's and indicates if there is an error.
678 * If the error can be corrected it will be corrected to the buffer. 678 * If the error can be corrected it will be corrected to the buffer.
679 * If there is no error, %0 is returned. If there is an error but it 679 * If there is no error, %0 is returned. If there is an error but it
680 * was corrected, %1 is returned. Otherwise, %-1 is returned. 680 * was corrected, %1 is returned. Otherwise, %-1 is returned.
681 */ 681 */
682 static int omap_compare_ecc(u8 *ecc_data1, /* read from NAND memory */ 682 static int omap_compare_ecc(u8 *ecc_data1, /* read from NAND memory */
683 u8 *ecc_data2, /* read from register */ 683 u8 *ecc_data2, /* read from register */
684 u8 *page_data) 684 u8 *page_data)
685 { 685 {
686 uint i; 686 uint i;
687 u8 tmp0_bit[8], tmp1_bit[8], tmp2_bit[8]; 687 u8 tmp0_bit[8], tmp1_bit[8], tmp2_bit[8];
688 u8 comp0_bit[8], comp1_bit[8], comp2_bit[8]; 688 u8 comp0_bit[8], comp1_bit[8], comp2_bit[8];
689 u8 ecc_bit[24]; 689 u8 ecc_bit[24];
690 u8 ecc_sum = 0; 690 u8 ecc_sum = 0;
691 u8 find_bit = 0; 691 u8 find_bit = 0;
692 uint find_byte = 0; 692 uint find_byte = 0;
693 int isEccFF; 693 int isEccFF;
694 694
695 isEccFF = ((*(u32 *)ecc_data1 & 0xFFFFFF) == 0xFFFFFF); 695 isEccFF = ((*(u32 *)ecc_data1 & 0xFFFFFF) == 0xFFFFFF);
696 696
697 gen_true_ecc(ecc_data1); 697 gen_true_ecc(ecc_data1);
698 gen_true_ecc(ecc_data2); 698 gen_true_ecc(ecc_data2);
699 699
700 for (i = 0; i <= 2; i++) { 700 for (i = 0; i <= 2; i++) {
701 *(ecc_data1 + i) = ~(*(ecc_data1 + i)); 701 *(ecc_data1 + i) = ~(*(ecc_data1 + i));
702 *(ecc_data2 + i) = ~(*(ecc_data2 + i)); 702 *(ecc_data2 + i) = ~(*(ecc_data2 + i));
703 } 703 }
704 704
705 for (i = 0; i < 8; i++) { 705 for (i = 0; i < 8; i++) {
706 tmp0_bit[i] = *ecc_data1 % 2; 706 tmp0_bit[i] = *ecc_data1 % 2;
707 *ecc_data1 = *ecc_data1 / 2; 707 *ecc_data1 = *ecc_data1 / 2;
708 } 708 }
709 709
710 for (i = 0; i < 8; i++) { 710 for (i = 0; i < 8; i++) {
711 tmp1_bit[i] = *(ecc_data1 + 1) % 2; 711 tmp1_bit[i] = *(ecc_data1 + 1) % 2;
712 *(ecc_data1 + 1) = *(ecc_data1 + 1) / 2; 712 *(ecc_data1 + 1) = *(ecc_data1 + 1) / 2;
713 } 713 }
714 714
715 for (i = 0; i < 8; i++) { 715 for (i = 0; i < 8; i++) {
716 tmp2_bit[i] = *(ecc_data1 + 2) % 2; 716 tmp2_bit[i] = *(ecc_data1 + 2) % 2;
717 *(ecc_data1 + 2) = *(ecc_data1 + 2) / 2; 717 *(ecc_data1 + 2) = *(ecc_data1 + 2) / 2;
718 } 718 }
719 719
720 for (i = 0; i < 8; i++) { 720 for (i = 0; i < 8; i++) {
721 comp0_bit[i] = *ecc_data2 % 2; 721 comp0_bit[i] = *ecc_data2 % 2;
722 *ecc_data2 = *ecc_data2 / 2; 722 *ecc_data2 = *ecc_data2 / 2;
723 } 723 }
724 724
725 for (i = 0; i < 8; i++) { 725 for (i = 0; i < 8; i++) {
726 comp1_bit[i] = *(ecc_data2 + 1) % 2; 726 comp1_bit[i] = *(ecc_data2 + 1) % 2;
727 *(ecc_data2 + 1) = *(ecc_data2 + 1) / 2; 727 *(ecc_data2 + 1) = *(ecc_data2 + 1) / 2;
728 } 728 }
729 729
730 for (i = 0; i < 8; i++) { 730 for (i = 0; i < 8; i++) {
731 comp2_bit[i] = *(ecc_data2 + 2) % 2; 731 comp2_bit[i] = *(ecc_data2 + 2) % 2;
732 *(ecc_data2 + 2) = *(ecc_data2 + 2) / 2; 732 *(ecc_data2 + 2) = *(ecc_data2 + 2) / 2;
733 } 733 }
734 734
735 for (i = 0; i < 6; i++) 735 for (i = 0; i < 6; i++)
736 ecc_bit[i] = tmp2_bit[i + 2] ^ comp2_bit[i + 2]; 736 ecc_bit[i] = tmp2_bit[i + 2] ^ comp2_bit[i + 2];
737 737
738 for (i = 0; i < 8; i++) 738 for (i = 0; i < 8; i++)
739 ecc_bit[i + 6] = tmp0_bit[i] ^ comp0_bit[i]; 739 ecc_bit[i + 6] = tmp0_bit[i] ^ comp0_bit[i];
740 740
741 for (i = 0; i < 8; i++) 741 for (i = 0; i < 8; i++)
742 ecc_bit[i + 14] = tmp1_bit[i] ^ comp1_bit[i]; 742 ecc_bit[i + 14] = tmp1_bit[i] ^ comp1_bit[i];
743 743
744 ecc_bit[22] = tmp2_bit[0] ^ comp2_bit[0]; 744 ecc_bit[22] = tmp2_bit[0] ^ comp2_bit[0];
745 ecc_bit[23] = tmp2_bit[1] ^ comp2_bit[1]; 745 ecc_bit[23] = tmp2_bit[1] ^ comp2_bit[1];
746 746
747 for (i = 0; i < 24; i++) 747 for (i = 0; i < 24; i++)
748 ecc_sum += ecc_bit[i]; 748 ecc_sum += ecc_bit[i];
749 749
750 switch (ecc_sum) { 750 switch (ecc_sum) {
751 case 0: 751 case 0:
752 /* Not reached because this function is not called if 752 /* Not reached because this function is not called if
753 * ECC values are equal 753 * ECC values are equal
754 */ 754 */
755 return 0; 755 return 0;
756 756
757 case 1: 757 case 1:
758 /* Uncorrectable error */ 758 /* Uncorrectable error */
759 DEBUG(MTD_DEBUG_LEVEL0, "ECC UNCORRECTED_ERROR 1\n"); 759 DEBUG(MTD_DEBUG_LEVEL0, "ECC UNCORRECTED_ERROR 1\n");
760 return -1; 760 return -1;
761 761
762 case 11: 762 case 11:
763 /* UN-Correctable error */ 763 /* UN-Correctable error */
764 DEBUG(MTD_DEBUG_LEVEL0, "ECC UNCORRECTED_ERROR B\n"); 764 DEBUG(MTD_DEBUG_LEVEL0, "ECC UNCORRECTED_ERROR B\n");
765 return -1; 765 return -1;
766 766
767 case 12: 767 case 12:
768 /* Correctable error */ 768 /* Correctable error */
769 find_byte = (ecc_bit[23] << 8) + 769 find_byte = (ecc_bit[23] << 8) +
770 (ecc_bit[21] << 7) + 770 (ecc_bit[21] << 7) +
771 (ecc_bit[19] << 6) + 771 (ecc_bit[19] << 6) +
772 (ecc_bit[17] << 5) + 772 (ecc_bit[17] << 5) +
773 (ecc_bit[15] << 4) + 773 (ecc_bit[15] << 4) +
774 (ecc_bit[13] << 3) + 774 (ecc_bit[13] << 3) +
775 (ecc_bit[11] << 2) + 775 (ecc_bit[11] << 2) +
776 (ecc_bit[9] << 1) + 776 (ecc_bit[9] << 1) +
777 ecc_bit[7]; 777 ecc_bit[7];
778 778
779 find_bit = (ecc_bit[5] << 2) + (ecc_bit[3] << 1) + ecc_bit[1]; 779 find_bit = (ecc_bit[5] << 2) + (ecc_bit[3] << 1) + ecc_bit[1];
780 780
781 DEBUG(MTD_DEBUG_LEVEL0, "Correcting single bit ECC error at " 781 DEBUG(MTD_DEBUG_LEVEL0, "Correcting single bit ECC error at "
782 "offset: %d, bit: %d\n", find_byte, find_bit); 782 "offset: %d, bit: %d\n", find_byte, find_bit);
783 783
784 page_data[find_byte] ^= (1 << find_bit); 784 page_data[find_byte] ^= (1 << find_bit);
785 785
786 return 1; 786 return 1;
787 default: 787 default:
788 if (isEccFF) { 788 if (isEccFF) {
789 if (ecc_data2[0] == 0 && 789 if (ecc_data2[0] == 0 &&
790 ecc_data2[1] == 0 && 790 ecc_data2[1] == 0 &&
791 ecc_data2[2] == 0) 791 ecc_data2[2] == 0)
792 return 0; 792 return 0;
793 } 793 }
794 DEBUG(MTD_DEBUG_LEVEL0, "UNCORRECTED_ERROR default\n"); 794 DEBUG(MTD_DEBUG_LEVEL0, "UNCORRECTED_ERROR default\n");
795 return -1; 795 return -1;
796 } 796 }
797 } 797 }
798 798
799 /** 799 /**
800 * omap_read_page_bch - BCH ecc based page read function 800 * omap_read_page_bch - BCH ecc based page read function
801 * @mtd: mtd info structure 801 * @mtd: mtd info structure
802 * @chip: nand chip info structure 802 * @chip: nand chip info structure
803 * @buf: buffer to store read data 803 * @buf: buffer to store read data
804 * @page: page number to read 804 * @page: page number to read
805 * 805 *
806 * For BCH syndrome calculation and error correction using ELM module. 806 * For BCH syndrome calculation and error correction using ELM module.
807 * Syndrome calculation is surpressed for reading of non page aligned length 807 * Syndrome calculation is surpressed for reading of non page aligned length
808 */ 808 */
809 static int omap_read_page_bch(struct mtd_info *mtd, struct nand_chip *chip, 809 static int omap_read_page_bch(struct mtd_info *mtd, struct nand_chip *chip,
810 uint8_t *buf, int page) 810 uint8_t *buf, int page)
811 { 811 {
812 int i, eccsize = chip->ecc.size; 812 int i, eccsize = chip->ecc.size;
813 int eccbytes = chip->ecc.bytes; 813 int eccbytes = chip->ecc.bytes;
814 int eccsteps = chip->ecc.steps; 814 int eccsteps = chip->ecc.steps;
815 uint8_t *p = buf; 815 uint8_t *p = buf;
816 uint8_t *ecc_calc = chip->buffers->ecccalc; 816 uint8_t *ecc_calc = chip->buffers->ecccalc;
817 uint8_t *ecc_code = chip->buffers->ecccode; 817 uint8_t *ecc_code = chip->buffers->ecccode;
818 uint32_t *eccpos = chip->ecc.layout->eccpos; 818 uint32_t *eccpos = chip->ecc.layout->eccpos;
819 uint8_t *oob = chip->oob_poi; 819 uint8_t *oob = chip->oob_poi;
820 uint32_t data_pos; 820 uint32_t data_pos;
821 uint32_t oob_pos; 821 uint32_t oob_pos;
822 822
823 struct omap_nand_info *info = container_of(mtd, struct omap_nand_info, 823 struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
824 mtd); 824 mtd);
825 data_pos = 0; 825 data_pos = 0;
826 /* oob area start */ 826 /* oob area start */
827 oob_pos = (eccsize * eccsteps) + chip->ecc.layout->eccpos[0]; 827 oob_pos = (eccsize * eccsteps) + chip->ecc.layout->eccpos[0];
828 828
829 for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize, 829 for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize,
830 oob += eccbytes) { 830 oob += eccbytes) {
831 chip->ecc.hwctl(mtd, NAND_ECC_READ); 831 chip->ecc.hwctl(mtd, NAND_ECC_READ);
832 /* read data */ 832 /* read data */
833 chip->cmdfunc(mtd, NAND_CMD_RNDOUT, data_pos, page); 833 chip->cmdfunc(mtd, NAND_CMD_RNDOUT, data_pos, page);
834 chip->read_buf(mtd, p, eccsize); 834 chip->read_buf(mtd, p, eccsize);
835 835
836 /* read respective ecc from oob area */ 836 /* read respective ecc from oob area */
837 chip->cmdfunc(mtd, NAND_CMD_RNDOUT, oob_pos, page); 837 chip->cmdfunc(mtd, NAND_CMD_RNDOUT, oob_pos, page);
838 838
839 if (info->ecc_opt == OMAP_ECC_BCH8_CODE_HW) { 839 if (info->ecc_opt == OMAP_ECC_BCH8_CODE_HW) {
840 chip->read_buf(mtd, oob, 13); 840 chip->read_buf(mtd, oob, 13);
841 oob++; 841 oob++;
842 } else 842 } else
843 chip->read_buf(mtd, oob, eccbytes); 843 chip->read_buf(mtd, oob, eccbytes);
844 /* read syndrome */ 844 /* read syndrome */
845 chip->ecc.calculate(mtd, p, &ecc_calc[i]); 845 chip->ecc.calculate(mtd, p, &ecc_calc[i]);
846 846
847 data_pos += eccsize; 847 data_pos += eccsize;
848 oob_pos += eccbytes; 848 oob_pos += eccbytes;
849 } 849 }
850 850
851 for (i = 0; i < chip->ecc.total; i++) 851 for (i = 0; i < chip->ecc.total; i++)
852 ecc_code[i] = chip->oob_poi[eccpos[i]]; 852 ecc_code[i] = chip->oob_poi[eccpos[i]];
853 853
854 eccsteps = chip->ecc.steps; 854 eccsteps = chip->ecc.steps;
855 p = buf; 855 p = buf;
856 856
857 for (i = 0 ; eccsteps; eccsteps--, i += eccbytes, p += eccsize) { 857 for (i = 0 ; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
858 int stat; 858 int stat;
859 859
860 if (!(chip->ops.len & 0x7ff)) { 860 stat = chip->ecc.correct(mtd, p, &ecc_code[i], &ecc_calc[i]);
861 stat = chip->ecc.correct(mtd, p, &ecc_code[i],
862 &ecc_calc[i]);
863 861
864 if (stat < 0) 862 if (stat < 0)
865 mtd->ecc_stats.failed++; 863 mtd->ecc_stats.failed++;
866 else 864 else
867 mtd->ecc_stats.corrected += stat; 865 mtd->ecc_stats.corrected += stat;
868 }
869 } 866 }
870 return 0; 867 return 0;
871 } 868 }
872 869
873 /** 870 /**
874 * omap_correct_data - Compares the ECC read with HW generated ECC 871 * omap_correct_data - Compares the ECC read with HW generated ECC
875 * @mtd: MTD device structure 872 * @mtd: MTD device structure
876 * @dat: page data 873 * @dat: page data
877 * @read_ecc: ecc read from nand flash 874 * @read_ecc: ecc read from nand flash
878 * @calc_ecc: ecc read from HW ECC registers 875 * @calc_ecc: ecc read from HW ECC registers
879 * 876 *
880 * Compares the ecc read from nand spare area with ECC registers values 877 * Compares the ecc read from nand spare area with ECC registers values
881 * and if ECC's mismatched, it will call 'omap_compare_ecc' for error 878 * and if ECC's mismatched, it will call 'omap_compare_ecc' for error
882 * detection and correction. If there are no errors, %0 is returned. If 879 * detection and correction. If there are no errors, %0 is returned. If
883 * there were errors and all of the errors were corrected, the number of 880 * there were errors and all of the errors were corrected, the number of
884 * corrected errors is returned. If uncorrectable errors exist, %-1 is 881 * corrected errors is returned. If uncorrectable errors exist, %-1 is
885 * returned. 882 * returned.
886 */ 883 */
887 static int omap_correct_data(struct mtd_info *mtd, u_char *dat, 884 static int omap_correct_data(struct mtd_info *mtd, u_char *dat,
888 u_char *read_ecc, u_char *calc_ecc) 885 u_char *read_ecc, u_char *calc_ecc)
889 { 886 {
890 struct omap_nand_info *info = container_of(mtd, struct omap_nand_info, 887 struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
891 mtd); 888 mtd);
892 int blockCnt = 0, i = 0, ret = 0; 889 int blockCnt = 0, i = 0, ret = 0;
893 int stat = 0; 890 int stat = 0;
894 int j, eccsize, eccflag, count; 891 int j, eccsize, eccflag, count;
895 unsigned int err_loc[8]; 892 unsigned int err_loc[8];
896 893
897 /* Ex NAND_ECC_HW12_2048 */ 894 /* Ex NAND_ECC_HW12_2048 */
898 if ((info->nand.ecc.mode == NAND_ECC_HW) && 895 if ((info->nand.ecc.mode == NAND_ECC_HW) &&
899 (info->nand.ecc.size == 2048)) 896 (info->nand.ecc.size == 2048))
900 blockCnt = 4; 897 blockCnt = 4;
901 else 898 else
902 blockCnt = 1; 899 blockCnt = 1;
903 900
904 switch (info->ecc_opt) { 901 switch (info->ecc_opt) {
905 case OMAP_ECC_HAMMING_CODE_HW: 902 case OMAP_ECC_HAMMING_CODE_HW:
906 case OMAP_ECC_HAMMING_CODE_HW_ROMCODE: 903 case OMAP_ECC_HAMMING_CODE_HW_ROMCODE:
907 for (i = 0; i < blockCnt; i++) { 904 for (i = 0; i < blockCnt; i++) {
908 if (memcmp(read_ecc, calc_ecc, 3) != 0) { 905 if (memcmp(read_ecc, calc_ecc, 3) != 0) {
909 ret = omap_compare_ecc(read_ecc, calc_ecc, dat); 906 ret = omap_compare_ecc(read_ecc, calc_ecc, dat);
910 if (ret < 0) 907 if (ret < 0)
911 return ret; 908 return ret;
912 909
913 /* keep track of number of corrected errors */ 910 /* keep track of number of corrected errors */
914 stat += ret; 911 stat += ret;
915 } 912 }
916 read_ecc += 3; 913 read_ecc += 3;
917 calc_ecc += 3; 914 calc_ecc += 3;
918 dat += 512; 915 dat += 512;
919 } 916 }
920 break; 917 break;
921 918
922 case OMAP_ECC_BCH4_CODE_HW: 919 case OMAP_ECC_BCH4_CODE_HW:
923 eccsize = 7; 920 eccsize = 7;
924 gpmc_calculate_ecc(info->ecc_opt, info->gpmc_cs, dat, calc_ecc); 921 gpmc_calculate_ecc(info->ecc_opt, info->gpmc_cs, dat, calc_ecc);
925 for (i = 0; i < blockCnt; i++) { 922 for (i = 0; i < blockCnt; i++) {
926 /* check if any ecc error */ 923 /* check if any ecc error */
927 eccflag = 0; 924 eccflag = 0;
928 for (j = 0; (j < eccsize) && (eccflag == 0); j++) 925 for (j = 0; (j < eccsize) && (eccflag == 0); j++)
929 if (calc_ecc[j] != 0) 926 if (calc_ecc[j] != 0)
930 eccflag = 1; 927 eccflag = 1;
931 928
932 if (eccflag == 1) { 929 if (eccflag == 1) {
933 eccflag = 0; 930 eccflag = 0;
934 for (j = 0; (j < eccsize) && 931 for (j = 0; (j < eccsize) &&
935 (eccflag == 0); j++) 932 (eccflag == 0); j++)
936 if (read_ecc[j] != 0xFF) 933 if (read_ecc[j] != 0xFF)
937 eccflag = 1; 934 eccflag = 1;
938 } 935 }
939 936
940 count = 0; 937 count = 0;
941 if (eccflag == 1) 938 if (eccflag == 1)
942 count = decode_bch(0, calc_ecc, err_loc); 939 count = decode_bch(0, calc_ecc, err_loc);
943 940
944 for (j = 0; j < count; j++) { 941 for (j = 0; j < count; j++) {
945 if (err_loc[j] < 4096) 942 if (err_loc[j] < 4096)
946 dat[err_loc[j] >> 3] ^= 943 dat[err_loc[j] >> 3] ^=
947 1 << (err_loc[j] & 7); 944 1 << (err_loc[j] & 7);
948 /* else, not interested to correct ecc */ 945 /* else, not interested to correct ecc */
949 } 946 }
950 947
951 stat += count; 948 stat += count;
952 calc_ecc = calc_ecc + eccsize; 949 calc_ecc = calc_ecc + eccsize;
953 read_ecc = read_ecc + eccsize; 950 read_ecc = read_ecc + eccsize;
954 dat += 512; 951 dat += 512;
955 } 952 }
956 break; 953 break;
957 case OMAP_ECC_BCH8_CODE_HW: 954 case OMAP_ECC_BCH8_CODE_HW:
958 eccsize = BCH8_ECC_OOB_BYTES; 955 eccsize = BCH8_ECC_OOB_BYTES;
959 956
960 for (i = 0; i < blockCnt; i++) { 957 for (i = 0; i < blockCnt; i++) {
961 eccflag = 0; 958 eccflag = 0;
962 /* check if area is flashed */ 959 /* check if area is flashed */
963 for (j = 0; (j < eccsize) && (eccflag == 0); j++) 960 for (j = 0; (j < eccsize) && (eccflag == 0); j++)
964 if (read_ecc[j] != 0xFF) 961 if (read_ecc[j] != 0xFF)
965 eccflag = 1; 962 eccflag = 1;
966 963
967 if (eccflag == 1) { 964 if (eccflag == 1) {
968 eccflag = 0; 965 eccflag = 0;
969 /* check if any ecc error */ 966 /* check if any ecc error */
970 for (j = 0; (j < eccsize) && (eccflag == 0); 967 for (j = 0; (j < eccsize) && (eccflag == 0);
971 j++) 968 j++)
972 if (calc_ecc[j] != 0) 969 if (calc_ecc[j] != 0)
973 eccflag = 1; 970 eccflag = 1;
974 } 971 }
975 972
976 count = 0; 973 count = 0;
977 if (eccflag == 1) 974 if (eccflag == 1)
978 count = elm_decode_bch_error(0, calc_ecc, 975 count = elm_decode_bch_error(0, calc_ecc,
979 err_loc); 976 err_loc);
980 977
981 for (j = 0; j < count; j++) { 978 for (j = 0; j < count; j++) {
982 u32 bit_pos, byte_pos; 979 u32 bit_pos, byte_pos;
983 980
984 bit_pos = err_loc[j] % 8; 981 bit_pos = err_loc[j] % 8;
985 byte_pos = (BCH8_ECC_MAX - err_loc[j] - 1) / 8; 982 byte_pos = (BCH8_ECC_MAX - err_loc[j] - 1) / 8;
986 if (err_loc[j] < BCH8_ECC_MAX) 983 if (err_loc[j] < BCH8_ECC_MAX)
987 dat[byte_pos] ^= 984 dat[byte_pos] ^=
988 1 << bit_pos; 985 1 << bit_pos;
989 /* else, not interested to correct ecc */ 986 /* else, not interested to correct ecc */
990 } 987 }
991 988
992 stat += count; 989 stat += count;
993 calc_ecc = calc_ecc + 14; 990 calc_ecc = calc_ecc + 14;
994 read_ecc = read_ecc + 14; 991 read_ecc = read_ecc + 14;
995 dat += BCH8_ECC_BYTES; 992 dat += BCH8_ECC_BYTES;
996 } 993 }
997 break; 994 break;
998 } 995 }
999 return stat; 996 return stat;
1000 } 997 }
1001 998
1002 /** 999 /**
1003 * omap_calcuate_ecc - Generate non-inverted ECC bytes. 1000 * omap_calcuate_ecc - Generate non-inverted ECC bytes.
1004 * @mtd: MTD device structure 1001 * @mtd: MTD device structure
1005 * @dat: The pointer to data on which ecc is computed 1002 * @dat: The pointer to data on which ecc is computed
1006 * @ecc_code: The ecc_code buffer 1003 * @ecc_code: The ecc_code buffer
1007 * 1004 *
1008 * Using noninverted ECC can be considered ugly since writing a blank 1005 * Using noninverted ECC can be considered ugly since writing a blank
1009 * page ie. padding will clear the ECC bytes. This is no problem as long 1006 * page ie. padding will clear the ECC bytes. This is no problem as long
1010 * nobody is trying to write data on the seemingly unused page. Reading 1007 * nobody is trying to write data on the seemingly unused page. Reading
1011 * an erased page will produce an ECC mismatch between generated and read 1008 * an erased page will produce an ECC mismatch between generated and read
1012 * ECC bytes that has to be dealt with separately. 1009 * ECC bytes that has to be dealt with separately.
1013 */ 1010 */
1014 static int omap_calculate_ecc(struct mtd_info *mtd, const u_char *dat, 1011 static int omap_calculate_ecc(struct mtd_info *mtd, const u_char *dat,
1015 u_char *ecc_code) 1012 u_char *ecc_code)
1016 { 1013 {
1017 struct omap_nand_info *info = container_of(mtd, struct omap_nand_info, 1014 struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
1018 mtd); 1015 mtd);
1019 return gpmc_calculate_ecc(info->ecc_opt, info->gpmc_cs, dat, ecc_code); 1016 return gpmc_calculate_ecc(info->ecc_opt, info->gpmc_cs, dat, ecc_code);
1020 } 1017 }
1021 1018
1022 /** 1019 /**
1023 * omap_enable_hwecc - This function enables the hardware ecc functionality 1020 * omap_enable_hwecc - This function enables the hardware ecc functionality
1024 * @mtd: MTD device structure 1021 * @mtd: MTD device structure
1025 * @mode: Read/Write mode 1022 * @mode: Read/Write mode
1026 */ 1023 */
1027 static void omap_enable_hwecc(struct mtd_info *mtd, int mode) 1024 static void omap_enable_hwecc(struct mtd_info *mtd, int mode)
1028 { 1025 {
1029 struct omap_nand_info *info = container_of(mtd, struct omap_nand_info, 1026 struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
1030 mtd); 1027 mtd);
1031 struct nand_chip *chip = mtd->priv; 1028 struct nand_chip *chip = mtd->priv;
1032 unsigned int dev_width = (chip->options & NAND_BUSWIDTH_16) ? 1 : 0; 1029 unsigned int dev_width = (chip->options & NAND_BUSWIDTH_16) ? 1 : 0;
1033 1030
1034 gpmc_enable_hwecc(info->ecc_opt, info->gpmc_cs, mode, 1031 gpmc_enable_hwecc(info->ecc_opt, info->gpmc_cs, mode,
1035 dev_width, info->nand.ecc.size); 1032 dev_width, info->nand.ecc.size);
1036 } 1033 }
1037 1034
1038 /** 1035 /**
1039 * omap_wait - wait until the command is done 1036 * omap_wait - wait until the command is done
1040 * @mtd: MTD device structure 1037 * @mtd: MTD device structure
1041 * @chip: NAND Chip structure 1038 * @chip: NAND Chip structure
1042 * 1039 *
1043 * Wait function is called during Program and erase operations and 1040 * Wait function is called during Program and erase operations and
1044 * the way it is called from MTD layer, we should wait till the NAND 1041 * the way it is called from MTD layer, we should wait till the NAND
1045 * chip is ready after the programming/erase operation has completed. 1042 * chip is ready after the programming/erase operation has completed.
1046 * 1043 *
1047 * Erase can take up to 400ms and program up to 20ms according to 1044 * Erase can take up to 400ms and program up to 20ms according to
1048 * general NAND and SmartMedia specs 1045 * general NAND and SmartMedia specs
1049 */ 1046 */
1050 static int omap_wait(struct mtd_info *mtd, struct nand_chip *chip) 1047 static int omap_wait(struct mtd_info *mtd, struct nand_chip *chip)
1051 { 1048 {
1052 struct nand_chip *this = mtd->priv; 1049 struct nand_chip *this = mtd->priv;
1053 struct omap_nand_info *info = container_of(mtd, struct omap_nand_info, 1050 struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
1054 mtd); 1051 mtd);
1055 unsigned long timeo = jiffies; 1052 unsigned long timeo = jiffies;
1056 int status = NAND_STATUS_FAIL, state = this->state; 1053 int status = NAND_STATUS_FAIL, state = this->state;
1057 1054
1058 if (state == FL_ERASING) 1055 if (state == FL_ERASING)
1059 timeo += (HZ * 400) / 1000; 1056 timeo += (HZ * 400) / 1000;
1060 else 1057 else
1061 timeo += (HZ * 20) / 1000; 1058 timeo += (HZ * 20) / 1000;
1062 1059
1063 gpmc_nand_write(info->gpmc_cs, 1060 gpmc_nand_write(info->gpmc_cs,
1064 GPMC_NAND_COMMAND, (NAND_CMD_STATUS & 0xFF)); 1061 GPMC_NAND_COMMAND, (NAND_CMD_STATUS & 0xFF));
1065 while (time_before(jiffies, timeo)) { 1062 while (time_before(jiffies, timeo)) {
1066 status = gpmc_nand_read(info->gpmc_cs, GPMC_NAND_DATA); 1063 status = gpmc_nand_read(info->gpmc_cs, GPMC_NAND_DATA);
1067 if (status & NAND_STATUS_READY) 1064 if (status & NAND_STATUS_READY)
1068 break; 1065 break;
1069 cond_resched(); 1066 cond_resched();
1070 } 1067 }
1071 return status; 1068 return status;
1072 } 1069 }
1073 1070
1074 /** 1071 /**
1075 * omap_dev_ready - calls the platform specific dev_ready function 1072 * omap_dev_ready - calls the platform specific dev_ready function
1076 * @mtd: MTD device structure 1073 * @mtd: MTD device structure
1077 */ 1074 */
1078 static int omap_dev_ready(struct mtd_info *mtd) 1075 static int omap_dev_ready(struct mtd_info *mtd)
1079 { 1076 {
1080 unsigned int val = 0; 1077 unsigned int val = 0;
1081 struct omap_nand_info *info = container_of(mtd, struct omap_nand_info, 1078 struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
1082 mtd); 1079 mtd);
1083 1080
1084 val = gpmc_read_status(GPMC_GET_IRQ_STATUS); 1081 val = gpmc_read_status(GPMC_GET_IRQ_STATUS);
1085 if ((val & 0x100) == 0x100) { 1082 if ((val & 0x100) == 0x100) {
1086 /* Clear IRQ Interrupt */ 1083 /* Clear IRQ Interrupt */
1087 val |= 0x100; 1084 val |= 0x100;
1088 val &= ~(0x0); 1085 val &= ~(0x0);
1089 gpmc_cs_configure(info->gpmc_cs, GPMC_SET_IRQ_STATUS, val); 1086 gpmc_cs_configure(info->gpmc_cs, GPMC_SET_IRQ_STATUS, val);
1090 } else { 1087 } else {
1091 unsigned int cnt = 0; 1088 unsigned int cnt = 0;
1092 while (cnt++ < 0x1FF) { 1089 while (cnt++ < 0x1FF) {
1093 if ((val & 0x100) == 0x100) 1090 if ((val & 0x100) == 0x100)
1094 return 0; 1091 return 0;
1095 val = gpmc_read_status(GPMC_GET_IRQ_STATUS); 1092 val = gpmc_read_status(GPMC_GET_IRQ_STATUS);
1096 } 1093 }
1097 } 1094 }
1098 1095
1099 return 1; 1096 return 1;
1100 } 1097 }
1101 1098
1102 static int __devinit omap_nand_probe(struct platform_device *pdev) 1099 static int __devinit omap_nand_probe(struct platform_device *pdev)
1103 { 1100 {
1104 struct omap_nand_info *info; 1101 struct omap_nand_info *info;
1105 struct omap_nand_platform_data *pdata; 1102 struct omap_nand_platform_data *pdata;
1106 int err; 1103 int err;
1107 int i, offset; 1104 int i, offset;
1108 1105
1109 pdata = pdev->dev.platform_data; 1106 pdata = pdev->dev.platform_data;
1110 if (pdata == NULL) { 1107 if (pdata == NULL) {
1111 dev_err(&pdev->dev, "platform data missing\n"); 1108 dev_err(&pdev->dev, "platform data missing\n");
1112 return -ENODEV; 1109 return -ENODEV;
1113 } 1110 }
1114 1111
1115 info = kzalloc(sizeof(struct omap_nand_info), GFP_KERNEL); 1112 info = kzalloc(sizeof(struct omap_nand_info), GFP_KERNEL);
1116 if (!info) 1113 if (!info)
1117 return -ENOMEM; 1114 return -ENOMEM;
1118 1115
1119 platform_set_drvdata(pdev, info); 1116 platform_set_drvdata(pdev, info);
1120 1117
1121 spin_lock_init(&info->controller.lock); 1118 spin_lock_init(&info->controller.lock);
1122 init_waitqueue_head(&info->controller.wq); 1119 init_waitqueue_head(&info->controller.wq);
1123 1120
1124 info->pdev = pdev; 1121 info->pdev = pdev;
1125 1122
1126 info->gpmc_cs = pdata->cs; 1123 info->gpmc_cs = pdata->cs;
1127 info->phys_base = pdata->phys_base; 1124 info->phys_base = pdata->phys_base;
1128 1125
1129 info->mtd.priv = &info->nand; 1126 info->mtd.priv = &info->nand;
1130 info->mtd.name = dev_name(&pdev->dev); 1127 info->mtd.name = dev_name(&pdev->dev);
1131 info->mtd.owner = THIS_MODULE; 1128 info->mtd.owner = THIS_MODULE;
1132 info->ecc_opt = pdata->ecc_opt; 1129 info->ecc_opt = pdata->ecc_opt;
1133 1130
1134 info->nand.options = pdata->devsize; 1131 info->nand.options = pdata->devsize;
1135 info->nand.options |= NAND_SKIP_BBTSCAN; 1132 info->nand.options |= NAND_SKIP_BBTSCAN;
1136 1133
1137 /* NAND write protect off */ 1134 /* NAND write protect off */
1138 gpmc_cs_configure(info->gpmc_cs, GPMC_CONFIG_WP, 0); 1135 gpmc_cs_configure(info->gpmc_cs, GPMC_CONFIG_WP, 0);
1139 1136
1140 if (!request_mem_region(info->phys_base, NAND_IO_SIZE, 1137 if (!request_mem_region(info->phys_base, NAND_IO_SIZE,
1141 pdev->dev.driver->name)) { 1138 pdev->dev.driver->name)) {
1142 err = -EBUSY; 1139 err = -EBUSY;
1143 goto out_free_info; 1140 goto out_free_info;
1144 } 1141 }
1145 1142
1146 info->nand.IO_ADDR_R = ioremap(info->phys_base, NAND_IO_SIZE); 1143 info->nand.IO_ADDR_R = ioremap(info->phys_base, NAND_IO_SIZE);
1147 if (!info->nand.IO_ADDR_R) { 1144 if (!info->nand.IO_ADDR_R) {
1148 err = -ENOMEM; 1145 err = -ENOMEM;
1149 goto out_release_mem_region; 1146 goto out_release_mem_region;
1150 } 1147 }
1151 1148
1152 info->nand.controller = &info->controller; 1149 info->nand.controller = &info->controller;
1153 1150
1154 info->nand.IO_ADDR_W = info->nand.IO_ADDR_R; 1151 info->nand.IO_ADDR_W = info->nand.IO_ADDR_R;
1155 info->nand.cmd_ctrl = omap_hwcontrol; 1152 info->nand.cmd_ctrl = omap_hwcontrol;
1156 1153
1157 /* 1154 /*
1158 * If RDY/BSY line is connected to OMAP then use the omap ready 1155 * If RDY/BSY line is connected to OMAP then use the omap ready
1159 * funcrtion and the generic nand_wait function which reads the status 1156 * funcrtion and the generic nand_wait function which reads the status
1160 * register after monitoring the RDY/BSY line.Otherwise use a standard 1157 * register after monitoring the RDY/BSY line.Otherwise use a standard
1161 * chip delay which is slightly more than tR (AC Timing) of the NAND 1158 * chip delay which is slightly more than tR (AC Timing) of the NAND
1162 * device and read status register until you get a failure or success 1159 * device and read status register until you get a failure or success
1163 */ 1160 */
1164 if (pdata->dev_ready) { 1161 if (pdata->dev_ready) {
1165 info->nand.dev_ready = omap_dev_ready; 1162 info->nand.dev_ready = omap_dev_ready;
1166 info->nand.chip_delay = 0; 1163 info->nand.chip_delay = 0;
1167 } else { 1164 } else {
1168 info->nand.waitfunc = omap_wait; 1165 info->nand.waitfunc = omap_wait;
1169 info->nand.chip_delay = 50; 1166 info->nand.chip_delay = 50;
1170 } 1167 }
1171 switch (pdata->xfer_type) { 1168 switch (pdata->xfer_type) {
1172 case NAND_OMAP_PREFETCH_POLLED: 1169 case NAND_OMAP_PREFETCH_POLLED:
1173 info->nand.read_buf = omap_read_buf_pref; 1170 info->nand.read_buf = omap_read_buf_pref;
1174 info->nand.write_buf = omap_write_buf_pref; 1171 info->nand.write_buf = omap_write_buf_pref;
1175 break; 1172 break;
1176 1173
1177 case NAND_OMAP_POLLED: 1174 case NAND_OMAP_POLLED:
1178 if (info->nand.options & NAND_BUSWIDTH_16) { 1175 if (info->nand.options & NAND_BUSWIDTH_16) {
1179 info->nand.read_buf = omap_read_buf16; 1176 info->nand.read_buf = omap_read_buf16;
1180 info->nand.write_buf = omap_write_buf16; 1177 info->nand.write_buf = omap_write_buf16;
1181 } else { 1178 } else {
1182 info->nand.read_buf = omap_read_buf8; 1179 info->nand.read_buf = omap_read_buf8;
1183 info->nand.write_buf = omap_write_buf8; 1180 info->nand.write_buf = omap_write_buf8;
1184 } 1181 }
1185 break; 1182 break;
1186 1183
1187 case NAND_OMAP_PREFETCH_DMA: 1184 case NAND_OMAP_PREFETCH_DMA:
1188 err = omap_request_dma(OMAP24XX_DMA_GPMC, "NAND", 1185 err = omap_request_dma(OMAP24XX_DMA_GPMC, "NAND",
1189 omap_nand_dma_cb, &info->comp, &info->dma_ch); 1186 omap_nand_dma_cb, &info->comp, &info->dma_ch);
1190 if (err < 0) { 1187 if (err < 0) {
1191 info->dma_ch = -1; 1188 info->dma_ch = -1;
1192 dev_err(&pdev->dev, "DMA request failed!\n"); 1189 dev_err(&pdev->dev, "DMA request failed!\n");
1193 goto out_release_mem_region; 1190 goto out_release_mem_region;
1194 } else { 1191 } else {
1195 omap_set_dma_dest_burst_mode(info->dma_ch, 1192 omap_set_dma_dest_burst_mode(info->dma_ch,
1196 OMAP_DMA_DATA_BURST_16); 1193 OMAP_DMA_DATA_BURST_16);
1197 omap_set_dma_src_burst_mode(info->dma_ch, 1194 omap_set_dma_src_burst_mode(info->dma_ch,
1198 OMAP_DMA_DATA_BURST_16); 1195 OMAP_DMA_DATA_BURST_16);
1199 1196
1200 info->nand.read_buf = omap_read_buf_dma_pref; 1197 info->nand.read_buf = omap_read_buf_dma_pref;
1201 info->nand.write_buf = omap_write_buf_dma_pref; 1198 info->nand.write_buf = omap_write_buf_dma_pref;
1202 } 1199 }
1203 break; 1200 break;
1204 1201
1205 case NAND_OMAP_PREFETCH_IRQ: 1202 case NAND_OMAP_PREFETCH_IRQ:
1206 err = request_irq(pdata->gpmc_irq, 1203 err = request_irq(pdata->gpmc_irq,
1207 omap_nand_irq, IRQF_SHARED, "gpmc-nand", info); 1204 omap_nand_irq, IRQF_SHARED, "gpmc-nand", info);
1208 if (err) { 1205 if (err) {
1209 dev_err(&pdev->dev, "requesting irq(%d) error:%d", 1206 dev_err(&pdev->dev, "requesting irq(%d) error:%d",
1210 pdata->gpmc_irq, err); 1207 pdata->gpmc_irq, err);
1211 goto out_release_mem_region; 1208 goto out_release_mem_region;
1212 } else { 1209 } else {
1213 info->gpmc_irq = pdata->gpmc_irq; 1210 info->gpmc_irq = pdata->gpmc_irq;
1214 info->nand.read_buf = omap_read_buf_irq_pref; 1211 info->nand.read_buf = omap_read_buf_irq_pref;
1215 info->nand.write_buf = omap_write_buf_irq_pref; 1212 info->nand.write_buf = omap_write_buf_irq_pref;
1216 } 1213 }
1217 break; 1214 break;
1218 1215
1219 default: 1216 default:
1220 dev_err(&pdev->dev, 1217 dev_err(&pdev->dev,
1221 "xfer_type(%d) not supported!\n", pdata->xfer_type); 1218 "xfer_type(%d) not supported!\n", pdata->xfer_type);
1222 err = -EINVAL; 1219 err = -EINVAL;
1223 goto out_release_mem_region; 1220 goto out_release_mem_region;
1224 } 1221 }
1225 1222
1226 info->nand.verify_buf = omap_verify_buf; 1223 info->nand.verify_buf = omap_verify_buf;
1227 1224
1228 /* selsect the ecc type */ 1225 /* selsect the ecc type */
1229 if (pdata->ecc_opt == OMAP_ECC_HAMMING_CODE_DEFAULT) 1226 if (pdata->ecc_opt == OMAP_ECC_HAMMING_CODE_DEFAULT)
1230 info->nand.ecc.mode = NAND_ECC_SOFT; 1227 info->nand.ecc.mode = NAND_ECC_SOFT;
1231 else { 1228 else {
1232 if (pdata->ecc_opt == OMAP_ECC_BCH4_CODE_HW) { 1229 if (pdata->ecc_opt == OMAP_ECC_BCH4_CODE_HW) {
1233 info->nand.ecc.bytes = 4*7; 1230 info->nand.ecc.bytes = 4*7;
1234 info->nand.ecc.size = 4*512; 1231 info->nand.ecc.size = 4*512;
1235 } else if (pdata->ecc_opt == OMAP_ECC_BCH8_CODE_HW) { 1232 } else if (pdata->ecc_opt == OMAP_ECC_BCH8_CODE_HW) {
1236 info->nand.ecc.bytes = 14; 1233 info->nand.ecc.bytes = 14;
1237 info->nand.ecc.size = 512; 1234 info->nand.ecc.size = 512;
1238 info->nand.ecc.read_page = omap_read_page_bch; 1235 info->nand.ecc.read_page = omap_read_page_bch;
1239 } else { 1236 } else {
1240 info->nand.ecc.bytes = 3; 1237 info->nand.ecc.bytes = 3;
1241 info->nand.ecc.size = 512; 1238 info->nand.ecc.size = 512;
1242 } 1239 }
1243 info->nand.ecc.calculate = omap_calculate_ecc; 1240 info->nand.ecc.calculate = omap_calculate_ecc;
1244 info->nand.ecc.hwctl = omap_enable_hwecc; 1241 info->nand.ecc.hwctl = omap_enable_hwecc;
1245 info->nand.ecc.correct = omap_correct_data; 1242 info->nand.ecc.correct = omap_correct_data;
1246 info->nand.ecc.mode = NAND_ECC_HW; 1243 info->nand.ecc.mode = NAND_ECC_HW;
1247 } 1244 }
1248 1245
1249 /* DIP switches on some boards change between 8 and 16 bit 1246 /* DIP switches on some boards change between 8 and 16 bit
1250 * bus widths for flash. Try the other width if the first try fails. 1247 * bus widths for flash. Try the other width if the first try fails.
1251 */ 1248 */
1252 if (nand_scan_ident(&info->mtd, 1, NULL)) { 1249 if (nand_scan_ident(&info->mtd, 1, NULL)) {
1253 info->nand.options ^= NAND_BUSWIDTH_16; 1250 info->nand.options ^= NAND_BUSWIDTH_16;
1254 if (nand_scan_ident(&info->mtd, 1, NULL)) { 1251 if (nand_scan_ident(&info->mtd, 1, NULL)) {
1255 err = -ENXIO; 1252 err = -ENXIO;
1256 goto out_release_mem_region; 1253 goto out_release_mem_region;
1257 } 1254 }
1258 } 1255 }
1259 1256
1260 /* select ecc lyout */ 1257 /* select ecc lyout */
1261 if (info->nand.ecc.mode != NAND_ECC_SOFT) { 1258 if (info->nand.ecc.mode != NAND_ECC_SOFT) {
1262 1259
1263 if (info->nand.options & NAND_BUSWIDTH_16) 1260 if (info->nand.options & NAND_BUSWIDTH_16)
1264 offset = JFFS2_CLEAN_MARKER_OFFSET; 1261 offset = JFFS2_CLEAN_MARKER_OFFSET;
1265 else { 1262 else {
1266 offset = JFFS2_CLEAN_MARKER_OFFSET; 1263 offset = JFFS2_CLEAN_MARKER_OFFSET;
1267 info->nand.badblock_pattern = &bb_descrip_flashbased; 1264 info->nand.badblock_pattern = &bb_descrip_flashbased;
1268 } 1265 }
1269 1266
1270 if (info->mtd.oobsize == 64) 1267 if (info->mtd.oobsize == 64)
1271 omap_oobinfo.eccbytes = info->nand.ecc.bytes * 1268 omap_oobinfo.eccbytes = info->nand.ecc.bytes *
1272 2048/info->nand.ecc.size; 1269 2048/info->nand.ecc.size;
1273 else 1270 else
1274 omap_oobinfo.eccbytes = info->nand.ecc.bytes; 1271 omap_oobinfo.eccbytes = info->nand.ecc.bytes;
1275 1272
1276 if (pdata->ecc_opt == OMAP_ECC_HAMMING_CODE_HW_ROMCODE) { 1273 if (pdata->ecc_opt == OMAP_ECC_HAMMING_CODE_HW_ROMCODE) {
1277 omap_oobinfo.oobfree->offset = 1274 omap_oobinfo.oobfree->offset =
1278 offset + omap_oobinfo.eccbytes; 1275 offset + omap_oobinfo.eccbytes;
1279 omap_oobinfo.oobfree->length = info->mtd.oobsize - 1276 omap_oobinfo.oobfree->length = info->mtd.oobsize -
1280 (offset + omap_oobinfo.eccbytes); 1277 (offset + omap_oobinfo.eccbytes);
1281 } else if (pdata->ecc_opt == OMAP_ECC_BCH8_CODE_HW) { 1278 } else if (pdata->ecc_opt == OMAP_ECC_BCH8_CODE_HW) {
1282 offset = BCH_ECC_POS; /* Synchronize with U-boot */ 1279 offset = BCH_ECC_POS; /* Synchronize with U-boot */
1283 omap_oobinfo.oobfree->offset = 1280 omap_oobinfo.oobfree->offset =
1284 BCH_JFFS2_CLEAN_MARKER_OFFSET; 1281 BCH_JFFS2_CLEAN_MARKER_OFFSET;
1285 omap_oobinfo.oobfree->length = info->mtd.oobsize - 1282 omap_oobinfo.oobfree->length = info->mtd.oobsize -
1286 offset - omap_oobinfo.eccbytes; 1283 offset - omap_oobinfo.eccbytes;
1287 } else { 1284 } else {
1288 omap_oobinfo.oobfree->offset = offset; 1285 omap_oobinfo.oobfree->offset = offset;
1289 omap_oobinfo.oobfree->length = info->mtd.oobsize - 1286 omap_oobinfo.oobfree->length = info->mtd.oobsize -
1290 offset - omap_oobinfo.eccbytes; 1287 offset - omap_oobinfo.eccbytes;
1291 /* 1288 /*
1292 offset is calculated considering the following : 1289 offset is calculated considering the following :
1293 1) 12 bytes ECC for 512 byte access and 24 bytes ECC for 1290 1) 12 bytes ECC for 512 byte access and 24 bytes ECC for
1294 256 byte access in OOB_64 can be supported 1291 256 byte access in OOB_64 can be supported
1295 2)Ecc bytes lie to the end of OOB area. 1292 2)Ecc bytes lie to the end of OOB area.
1296 3)Ecc layout must match with u-boot's ECC layout. 1293 3)Ecc layout must match with u-boot's ECC layout.
1297 */ 1294 */
1298 offset = info->mtd.oobsize - MAX_HWECC_BYTES_OOB_64; 1295 offset = info->mtd.oobsize - MAX_HWECC_BYTES_OOB_64;
1299 } 1296 }
1300 1297
1301 for (i = 0; i < omap_oobinfo.eccbytes; i++) 1298 for (i = 0; i < omap_oobinfo.eccbytes; i++)
1302 omap_oobinfo.eccpos[i] = i+offset; 1299 omap_oobinfo.eccpos[i] = i+offset;
1303 1300
1304 info->nand.ecc.layout = &omap_oobinfo; 1301 info->nand.ecc.layout = &omap_oobinfo;
1305 } 1302 }
1306 1303
1307 /* second phase scan */ 1304 /* second phase scan */
1308 if (nand_scan_tail(&info->mtd)) { 1305 if (nand_scan_tail(&info->mtd)) {
1309 err = -ENXIO; 1306 err = -ENXIO;
1310 goto out_release_mem_region; 1307 goto out_release_mem_region;
1311 } 1308 }
1312 1309
1313 err = parse_mtd_partitions(&info->mtd, part_probes, &info->parts, 0); 1310 err = parse_mtd_partitions(&info->mtd, part_probes, &info->parts, 0);
1314 if (err > 0) 1311 if (err > 0)
1315 mtd_device_register(&info->mtd, info->parts, err); 1312 mtd_device_register(&info->mtd, info->parts, err);
1316 else if (pdata->parts) 1313 else if (pdata->parts)
1317 mtd_device_register(&info->mtd, pdata->parts, pdata->nr_parts); 1314 mtd_device_register(&info->mtd, pdata->parts, pdata->nr_parts);
1318 else 1315 else
1319 mtd_device_register(&info->mtd, NULL, 0); 1316 mtd_device_register(&info->mtd, NULL, 0);
1320 1317
1321 platform_set_drvdata(pdev, &info->mtd); 1318 platform_set_drvdata(pdev, &info->mtd);
1322 1319
1323 return 0; 1320 return 0;
1324 1321
1325 out_release_mem_region: 1322 out_release_mem_region:
1326 release_mem_region(info->phys_base, NAND_IO_SIZE); 1323 release_mem_region(info->phys_base, NAND_IO_SIZE);
1327 out_free_info: 1324 out_free_info:
1328 kfree(info); 1325 kfree(info);
1329 1326
1330 return err; 1327 return err;
1331 } 1328 }
1332 1329
1333 static int omap_nand_remove(struct platform_device *pdev) 1330 static int omap_nand_remove(struct platform_device *pdev)
1334 { 1331 {
1335 struct mtd_info *mtd = platform_get_drvdata(pdev); 1332 struct mtd_info *mtd = platform_get_drvdata(pdev);
1336 struct omap_nand_info *info = container_of(mtd, struct omap_nand_info, 1333 struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
1337 mtd); 1334 mtd);
1338 1335
1339 platform_set_drvdata(pdev, NULL); 1336 platform_set_drvdata(pdev, NULL);
1340 if (info->dma_ch != -1) 1337 if (info->dma_ch != -1)
1341 omap_free_dma(info->dma_ch); 1338 omap_free_dma(info->dma_ch);
1342 1339
1343 if (info->gpmc_irq) 1340 if (info->gpmc_irq)
1344 free_irq(info->gpmc_irq, info); 1341 free_irq(info->gpmc_irq, info);
1345 1342
1346 /* Release NAND device, its internal structures and partitions */ 1343 /* Release NAND device, its internal structures and partitions */
1347 nand_release(&info->mtd); 1344 nand_release(&info->mtd);
1348 iounmap(info->nand.IO_ADDR_R); 1345 iounmap(info->nand.IO_ADDR_R);
1349 release_mem_region(info->phys_base, NAND_IO_SIZE); 1346 release_mem_region(info->phys_base, NAND_IO_SIZE);
1350 kfree(&info->mtd); 1347 kfree(&info->mtd);
1351 return 0; 1348 return 0;
1352 } 1349 }
1353 1350
1354 static struct platform_driver omap_nand_driver = { 1351 static struct platform_driver omap_nand_driver = {
1355 .probe = omap_nand_probe, 1352 .probe = omap_nand_probe,
1356 .remove = omap_nand_remove, 1353 .remove = omap_nand_remove,
1357 .driver = { 1354 .driver = {
1358 .name = DRIVER_NAME, 1355 .name = DRIVER_NAME,
1359 .owner = THIS_MODULE, 1356 .owner = THIS_MODULE,
1360 }, 1357 },
1361 }; 1358 };
1362 1359
1363 static int __init omap_nand_init(void) 1360 static int __init omap_nand_init(void)
1364 { 1361 {
1365 pr_info("%s driver initializing\n", DRIVER_NAME); 1362 pr_info("%s driver initializing\n", DRIVER_NAME);
1366 1363
1367 return platform_driver_register(&omap_nand_driver); 1364 return platform_driver_register(&omap_nand_driver);
1368 } 1365 }
1369 1366
1370 static void __exit omap_nand_exit(void) 1367 static void __exit omap_nand_exit(void)
1371 { 1368 {
1372 platform_driver_unregister(&omap_nand_driver); 1369 platform_driver_unregister(&omap_nand_driver);
1373 } 1370 }
1374 1371
1375 module_init(omap_nand_init); 1372 module_init(omap_nand_init);
1376 module_exit(omap_nand_exit); 1373 module_exit(omap_nand_exit);
1377 1374
1378 MODULE_ALIAS("platform:" DRIVER_NAME); 1375 MODULE_ALIAS("platform:" DRIVER_NAME);
1379 MODULE_LICENSE("GPL"); 1376 MODULE_LICENSE("GPL");
1380 MODULE_DESCRIPTION("Glue layer for NAND flash on TI OMAP boards"); 1377 MODULE_DESCRIPTION("Glue layer for NAND flash on TI OMAP boards");
1381 1378