Commit 070c32223ae8a724a190ea769104ea41567e3673

Authored by Harald Nordgard-Hansen
Committed by Artem Bityutskiy
1 parent ff3206b245

mtd: fix recovery after failed write-buffer operation in cfi_cmdset_0002.c

When working on a problem with some flash chips that lock up during
write-buffer operations, I think there may be a bug in the linux
handling of chips using cfi_cmdset_0002.c.

The datasheets I have found for a number of these chips all specify that
when aborting a write-buffer command, it is not enough to use the
standard reset.  Rather a "write-to-buffer-reset command" is needed.
This command is quite similar for all chips, the main variance seem to
be if the final 0xF0 can go to any address or must go to addr_unlock1.

The bug is then in the recovery handling when timing out at the end of
do_write_buffer, where using the normal reset command is not sufficient.

Without this change, if the write-buffer command fails then any
following operations on the flash also fail.

Signed-off-by: Harald Nordgard-Hansen <hhansen@pvv.org>
Signed-off-by: Artem Bityutskiy <artem.bityutskiy@linux.intel.com>

Showing 1 changed file with 14 additions and 2 deletions Inline Diff

drivers/mtd/chips/cfi_cmdset_0002.c
1 /* 1 /*
2 * Common Flash Interface support: 2 * Common Flash Interface support:
3 * AMD & Fujitsu Standard Vendor Command Set (ID 0x0002) 3 * AMD & Fujitsu Standard Vendor Command Set (ID 0x0002)
4 * 4 *
5 * Copyright (C) 2000 Crossnet Co. <info@crossnet.co.jp> 5 * Copyright (C) 2000 Crossnet Co. <info@crossnet.co.jp>
6 * Copyright (C) 2004 Arcom Control Systems Ltd <linux@arcom.com> 6 * Copyright (C) 2004 Arcom Control Systems Ltd <linux@arcom.com>
7 * Copyright (C) 2005 MontaVista Software Inc. <source@mvista.com> 7 * Copyright (C) 2005 MontaVista Software Inc. <source@mvista.com>
8 * 8 *
9 * 2_by_8 routines added by Simon Munton 9 * 2_by_8 routines added by Simon Munton
10 * 10 *
11 * 4_by_16 work by Carolyn J. Smith 11 * 4_by_16 work by Carolyn J. Smith
12 * 12 *
13 * XIP support hooks by Vitaly Wool (based on code for Intel flash 13 * XIP support hooks by Vitaly Wool (based on code for Intel flash
14 * by Nicolas Pitre) 14 * by Nicolas Pitre)
15 * 15 *
16 * 25/09/2008 Christopher Moore: TopBottom fixup for many Macronix with CFI V1.0 16 * 25/09/2008 Christopher Moore: TopBottom fixup for many Macronix with CFI V1.0
17 * 17 *
18 * Occasionally maintained by Thayne Harbaugh tharbaugh at lnxi dot com 18 * Occasionally maintained by Thayne Harbaugh tharbaugh at lnxi dot com
19 * 19 *
20 * This code is GPL 20 * This code is GPL
21 */ 21 */
22 22
23 #include <linux/module.h> 23 #include <linux/module.h>
24 #include <linux/types.h> 24 #include <linux/types.h>
25 #include <linux/kernel.h> 25 #include <linux/kernel.h>
26 #include <linux/sched.h> 26 #include <linux/sched.h>
27 #include <linux/init.h> 27 #include <linux/init.h>
28 #include <asm/io.h> 28 #include <asm/io.h>
29 #include <asm/byteorder.h> 29 #include <asm/byteorder.h>
30 30
31 #include <linux/errno.h> 31 #include <linux/errno.h>
32 #include <linux/slab.h> 32 #include <linux/slab.h>
33 #include <linux/delay.h> 33 #include <linux/delay.h>
34 #include <linux/interrupt.h> 34 #include <linux/interrupt.h>
35 #include <linux/reboot.h> 35 #include <linux/reboot.h>
36 #include <linux/mtd/map.h> 36 #include <linux/mtd/map.h>
37 #include <linux/mtd/mtd.h> 37 #include <linux/mtd/mtd.h>
38 #include <linux/mtd/cfi.h> 38 #include <linux/mtd/cfi.h>
39 #include <linux/mtd/xip.h> 39 #include <linux/mtd/xip.h>
40 40
41 #define AMD_BOOTLOC_BUG 41 #define AMD_BOOTLOC_BUG
42 #define FORCE_WORD_WRITE 0 42 #define FORCE_WORD_WRITE 0
43 43
44 #define MAX_WORD_RETRIES 3 44 #define MAX_WORD_RETRIES 3
45 45
46 #define SST49LF004B 0x0060 46 #define SST49LF004B 0x0060
47 #define SST49LF040B 0x0050 47 #define SST49LF040B 0x0050
48 #define SST49LF008A 0x005a 48 #define SST49LF008A 0x005a
49 #define AT49BV6416 0x00d6 49 #define AT49BV6416 0x00d6
50 50
51 static int cfi_amdstd_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *); 51 static int cfi_amdstd_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
52 static int cfi_amdstd_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *); 52 static int cfi_amdstd_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
53 static int cfi_amdstd_write_buffers(struct mtd_info *, loff_t, size_t, size_t *, const u_char *); 53 static int cfi_amdstd_write_buffers(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
54 static int cfi_amdstd_erase_chip(struct mtd_info *, struct erase_info *); 54 static int cfi_amdstd_erase_chip(struct mtd_info *, struct erase_info *);
55 static int cfi_amdstd_erase_varsize(struct mtd_info *, struct erase_info *); 55 static int cfi_amdstd_erase_varsize(struct mtd_info *, struct erase_info *);
56 static void cfi_amdstd_sync (struct mtd_info *); 56 static void cfi_amdstd_sync (struct mtd_info *);
57 static int cfi_amdstd_suspend (struct mtd_info *); 57 static int cfi_amdstd_suspend (struct mtd_info *);
58 static void cfi_amdstd_resume (struct mtd_info *); 58 static void cfi_amdstd_resume (struct mtd_info *);
59 static int cfi_amdstd_reboot(struct notifier_block *, unsigned long, void *); 59 static int cfi_amdstd_reboot(struct notifier_block *, unsigned long, void *);
60 static int cfi_amdstd_secsi_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *); 60 static int cfi_amdstd_secsi_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
61 61
62 static int cfi_amdstd_panic_write(struct mtd_info *mtd, loff_t to, size_t len, 62 static int cfi_amdstd_panic_write(struct mtd_info *mtd, loff_t to, size_t len,
63 size_t *retlen, const u_char *buf); 63 size_t *retlen, const u_char *buf);
64 64
65 static void cfi_amdstd_destroy(struct mtd_info *); 65 static void cfi_amdstd_destroy(struct mtd_info *);
66 66
67 struct mtd_info *cfi_cmdset_0002(struct map_info *, int); 67 struct mtd_info *cfi_cmdset_0002(struct map_info *, int);
68 static struct mtd_info *cfi_amdstd_setup (struct mtd_info *); 68 static struct mtd_info *cfi_amdstd_setup (struct mtd_info *);
69 69
70 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode); 70 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode);
71 static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr); 71 static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr);
72 #include "fwh_lock.h" 72 #include "fwh_lock.h"
73 73
74 static int cfi_atmel_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len); 74 static int cfi_atmel_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
75 static int cfi_atmel_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len); 75 static int cfi_atmel_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
76 76
77 static struct mtd_chip_driver cfi_amdstd_chipdrv = { 77 static struct mtd_chip_driver cfi_amdstd_chipdrv = {
78 .probe = NULL, /* Not usable directly */ 78 .probe = NULL, /* Not usable directly */
79 .destroy = cfi_amdstd_destroy, 79 .destroy = cfi_amdstd_destroy,
80 .name = "cfi_cmdset_0002", 80 .name = "cfi_cmdset_0002",
81 .module = THIS_MODULE 81 .module = THIS_MODULE
82 }; 82 };
83 83
84 84
85 /* #define DEBUG_CFI_FEATURES */ 85 /* #define DEBUG_CFI_FEATURES */
86 86
87 87
88 #ifdef DEBUG_CFI_FEATURES 88 #ifdef DEBUG_CFI_FEATURES
89 static void cfi_tell_features(struct cfi_pri_amdstd *extp) 89 static void cfi_tell_features(struct cfi_pri_amdstd *extp)
90 { 90 {
91 const char* erase_suspend[3] = { 91 const char* erase_suspend[3] = {
92 "Not supported", "Read only", "Read/write" 92 "Not supported", "Read only", "Read/write"
93 }; 93 };
94 const char* top_bottom[6] = { 94 const char* top_bottom[6] = {
95 "No WP", "8x8KiB sectors at top & bottom, no WP", 95 "No WP", "8x8KiB sectors at top & bottom, no WP",
96 "Bottom boot", "Top boot", 96 "Bottom boot", "Top boot",
97 "Uniform, Bottom WP", "Uniform, Top WP" 97 "Uniform, Bottom WP", "Uniform, Top WP"
98 }; 98 };
99 99
100 printk(" Silicon revision: %d\n", extp->SiliconRevision >> 1); 100 printk(" Silicon revision: %d\n", extp->SiliconRevision >> 1);
101 printk(" Address sensitive unlock: %s\n", 101 printk(" Address sensitive unlock: %s\n",
102 (extp->SiliconRevision & 1) ? "Not required" : "Required"); 102 (extp->SiliconRevision & 1) ? "Not required" : "Required");
103 103
104 if (extp->EraseSuspend < ARRAY_SIZE(erase_suspend)) 104 if (extp->EraseSuspend < ARRAY_SIZE(erase_suspend))
105 printk(" Erase Suspend: %s\n", erase_suspend[extp->EraseSuspend]); 105 printk(" Erase Suspend: %s\n", erase_suspend[extp->EraseSuspend]);
106 else 106 else
107 printk(" Erase Suspend: Unknown value %d\n", extp->EraseSuspend); 107 printk(" Erase Suspend: Unknown value %d\n", extp->EraseSuspend);
108 108
109 if (extp->BlkProt == 0) 109 if (extp->BlkProt == 0)
110 printk(" Block protection: Not supported\n"); 110 printk(" Block protection: Not supported\n");
111 else 111 else
112 printk(" Block protection: %d sectors per group\n", extp->BlkProt); 112 printk(" Block protection: %d sectors per group\n", extp->BlkProt);
113 113
114 114
115 printk(" Temporary block unprotect: %s\n", 115 printk(" Temporary block unprotect: %s\n",
116 extp->TmpBlkUnprotect ? "Supported" : "Not supported"); 116 extp->TmpBlkUnprotect ? "Supported" : "Not supported");
117 printk(" Block protect/unprotect scheme: %d\n", extp->BlkProtUnprot); 117 printk(" Block protect/unprotect scheme: %d\n", extp->BlkProtUnprot);
118 printk(" Number of simultaneous operations: %d\n", extp->SimultaneousOps); 118 printk(" Number of simultaneous operations: %d\n", extp->SimultaneousOps);
119 printk(" Burst mode: %s\n", 119 printk(" Burst mode: %s\n",
120 extp->BurstMode ? "Supported" : "Not supported"); 120 extp->BurstMode ? "Supported" : "Not supported");
121 if (extp->PageMode == 0) 121 if (extp->PageMode == 0)
122 printk(" Page mode: Not supported\n"); 122 printk(" Page mode: Not supported\n");
123 else 123 else
124 printk(" Page mode: %d word page\n", extp->PageMode << 2); 124 printk(" Page mode: %d word page\n", extp->PageMode << 2);
125 125
126 printk(" Vpp Supply Minimum Program/Erase Voltage: %d.%d V\n", 126 printk(" Vpp Supply Minimum Program/Erase Voltage: %d.%d V\n",
127 extp->VppMin >> 4, extp->VppMin & 0xf); 127 extp->VppMin >> 4, extp->VppMin & 0xf);
128 printk(" Vpp Supply Maximum Program/Erase Voltage: %d.%d V\n", 128 printk(" Vpp Supply Maximum Program/Erase Voltage: %d.%d V\n",
129 extp->VppMax >> 4, extp->VppMax & 0xf); 129 extp->VppMax >> 4, extp->VppMax & 0xf);
130 130
131 if (extp->TopBottom < ARRAY_SIZE(top_bottom)) 131 if (extp->TopBottom < ARRAY_SIZE(top_bottom))
132 printk(" Top/Bottom Boot Block: %s\n", top_bottom[extp->TopBottom]); 132 printk(" Top/Bottom Boot Block: %s\n", top_bottom[extp->TopBottom]);
133 else 133 else
134 printk(" Top/Bottom Boot Block: Unknown value %d\n", extp->TopBottom); 134 printk(" Top/Bottom Boot Block: Unknown value %d\n", extp->TopBottom);
135 } 135 }
136 #endif 136 #endif
137 137
138 #ifdef AMD_BOOTLOC_BUG 138 #ifdef AMD_BOOTLOC_BUG
139 /* Wheee. Bring me the head of someone at AMD. */ 139 /* Wheee. Bring me the head of someone at AMD. */
140 static void fixup_amd_bootblock(struct mtd_info *mtd) 140 static void fixup_amd_bootblock(struct mtd_info *mtd)
141 { 141 {
142 struct map_info *map = mtd->priv; 142 struct map_info *map = mtd->priv;
143 struct cfi_private *cfi = map->fldrv_priv; 143 struct cfi_private *cfi = map->fldrv_priv;
144 struct cfi_pri_amdstd *extp = cfi->cmdset_priv; 144 struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
145 __u8 major = extp->MajorVersion; 145 __u8 major = extp->MajorVersion;
146 __u8 minor = extp->MinorVersion; 146 __u8 minor = extp->MinorVersion;
147 147
148 if (((major << 8) | minor) < 0x3131) { 148 if (((major << 8) | minor) < 0x3131) {
149 /* CFI version 1.0 => don't trust bootloc */ 149 /* CFI version 1.0 => don't trust bootloc */
150 150
151 pr_debug("%s: JEDEC Vendor ID is 0x%02X Device ID is 0x%02X\n", 151 pr_debug("%s: JEDEC Vendor ID is 0x%02X Device ID is 0x%02X\n",
152 map->name, cfi->mfr, cfi->id); 152 map->name, cfi->mfr, cfi->id);
153 153
154 /* AFAICS all 29LV400 with a bottom boot block have a device ID 154 /* AFAICS all 29LV400 with a bottom boot block have a device ID
155 * of 0x22BA in 16-bit mode and 0xBA in 8-bit mode. 155 * of 0x22BA in 16-bit mode and 0xBA in 8-bit mode.
156 * These were badly detected as they have the 0x80 bit set 156 * These were badly detected as they have the 0x80 bit set
157 * so treat them as a special case. 157 * so treat them as a special case.
158 */ 158 */
159 if (((cfi->id == 0xBA) || (cfi->id == 0x22BA)) && 159 if (((cfi->id == 0xBA) || (cfi->id == 0x22BA)) &&
160 160
161 /* Macronix added CFI to their 2nd generation 161 /* Macronix added CFI to their 2nd generation
162 * MX29LV400C B/T but AFAICS no other 29LV400 (AMD, 162 * MX29LV400C B/T but AFAICS no other 29LV400 (AMD,
163 * Fujitsu, Spansion, EON, ESI and older Macronix) 163 * Fujitsu, Spansion, EON, ESI and older Macronix)
164 * has CFI. 164 * has CFI.
165 * 165 *
166 * Therefore also check the manufacturer. 166 * Therefore also check the manufacturer.
167 * This reduces the risk of false detection due to 167 * This reduces the risk of false detection due to
168 * the 8-bit device ID. 168 * the 8-bit device ID.
169 */ 169 */
170 (cfi->mfr == CFI_MFR_MACRONIX)) { 170 (cfi->mfr == CFI_MFR_MACRONIX)) {
171 pr_debug("%s: Macronix MX29LV400C with bottom boot block" 171 pr_debug("%s: Macronix MX29LV400C with bottom boot block"
172 " detected\n", map->name); 172 " detected\n", map->name);
173 extp->TopBottom = 2; /* bottom boot */ 173 extp->TopBottom = 2; /* bottom boot */
174 } else 174 } else
175 if (cfi->id & 0x80) { 175 if (cfi->id & 0x80) {
176 printk(KERN_WARNING "%s: JEDEC Device ID is 0x%02X. Assuming broken CFI table.\n", map->name, cfi->id); 176 printk(KERN_WARNING "%s: JEDEC Device ID is 0x%02X. Assuming broken CFI table.\n", map->name, cfi->id);
177 extp->TopBottom = 3; /* top boot */ 177 extp->TopBottom = 3; /* top boot */
178 } else { 178 } else {
179 extp->TopBottom = 2; /* bottom boot */ 179 extp->TopBottom = 2; /* bottom boot */
180 } 180 }
181 181
182 pr_debug("%s: AMD CFI PRI V%c.%c has no boot block field;" 182 pr_debug("%s: AMD CFI PRI V%c.%c has no boot block field;"
183 " deduced %s from Device ID\n", map->name, major, minor, 183 " deduced %s from Device ID\n", map->name, major, minor,
184 extp->TopBottom == 2 ? "bottom" : "top"); 184 extp->TopBottom == 2 ? "bottom" : "top");
185 } 185 }
186 } 186 }
187 #endif 187 #endif
188 188
189 static void fixup_use_write_buffers(struct mtd_info *mtd) 189 static void fixup_use_write_buffers(struct mtd_info *mtd)
190 { 190 {
191 struct map_info *map = mtd->priv; 191 struct map_info *map = mtd->priv;
192 struct cfi_private *cfi = map->fldrv_priv; 192 struct cfi_private *cfi = map->fldrv_priv;
193 if (cfi->cfiq->BufWriteTimeoutTyp) { 193 if (cfi->cfiq->BufWriteTimeoutTyp) {
194 pr_debug("Using buffer write method\n" ); 194 pr_debug("Using buffer write method\n" );
195 mtd->_write = cfi_amdstd_write_buffers; 195 mtd->_write = cfi_amdstd_write_buffers;
196 } 196 }
197 } 197 }
198 198
199 /* Atmel chips don't use the same PRI format as AMD chips */ 199 /* Atmel chips don't use the same PRI format as AMD chips */
200 static void fixup_convert_atmel_pri(struct mtd_info *mtd) 200 static void fixup_convert_atmel_pri(struct mtd_info *mtd)
201 { 201 {
202 struct map_info *map = mtd->priv; 202 struct map_info *map = mtd->priv;
203 struct cfi_private *cfi = map->fldrv_priv; 203 struct cfi_private *cfi = map->fldrv_priv;
204 struct cfi_pri_amdstd *extp = cfi->cmdset_priv; 204 struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
205 struct cfi_pri_atmel atmel_pri; 205 struct cfi_pri_atmel atmel_pri;
206 206
207 memcpy(&atmel_pri, extp, sizeof(atmel_pri)); 207 memcpy(&atmel_pri, extp, sizeof(atmel_pri));
208 memset((char *)extp + 5, 0, sizeof(*extp) - 5); 208 memset((char *)extp + 5, 0, sizeof(*extp) - 5);
209 209
210 if (atmel_pri.Features & 0x02) 210 if (atmel_pri.Features & 0x02)
211 extp->EraseSuspend = 2; 211 extp->EraseSuspend = 2;
212 212
213 /* Some chips got it backwards... */ 213 /* Some chips got it backwards... */
214 if (cfi->id == AT49BV6416) { 214 if (cfi->id == AT49BV6416) {
215 if (atmel_pri.BottomBoot) 215 if (atmel_pri.BottomBoot)
216 extp->TopBottom = 3; 216 extp->TopBottom = 3;
217 else 217 else
218 extp->TopBottom = 2; 218 extp->TopBottom = 2;
219 } else { 219 } else {
220 if (atmel_pri.BottomBoot) 220 if (atmel_pri.BottomBoot)
221 extp->TopBottom = 2; 221 extp->TopBottom = 2;
222 else 222 else
223 extp->TopBottom = 3; 223 extp->TopBottom = 3;
224 } 224 }
225 225
226 /* burst write mode not supported */ 226 /* burst write mode not supported */
227 cfi->cfiq->BufWriteTimeoutTyp = 0; 227 cfi->cfiq->BufWriteTimeoutTyp = 0;
228 cfi->cfiq->BufWriteTimeoutMax = 0; 228 cfi->cfiq->BufWriteTimeoutMax = 0;
229 } 229 }
230 230
231 static void fixup_use_secsi(struct mtd_info *mtd) 231 static void fixup_use_secsi(struct mtd_info *mtd)
232 { 232 {
233 /* Setup for chips with a secsi area */ 233 /* Setup for chips with a secsi area */
234 mtd->_read_user_prot_reg = cfi_amdstd_secsi_read; 234 mtd->_read_user_prot_reg = cfi_amdstd_secsi_read;
235 mtd->_read_fact_prot_reg = cfi_amdstd_secsi_read; 235 mtd->_read_fact_prot_reg = cfi_amdstd_secsi_read;
236 } 236 }
237 237
238 static void fixup_use_erase_chip(struct mtd_info *mtd) 238 static void fixup_use_erase_chip(struct mtd_info *mtd)
239 { 239 {
240 struct map_info *map = mtd->priv; 240 struct map_info *map = mtd->priv;
241 struct cfi_private *cfi = map->fldrv_priv; 241 struct cfi_private *cfi = map->fldrv_priv;
242 if ((cfi->cfiq->NumEraseRegions == 1) && 242 if ((cfi->cfiq->NumEraseRegions == 1) &&
243 ((cfi->cfiq->EraseRegionInfo[0] & 0xffff) == 0)) { 243 ((cfi->cfiq->EraseRegionInfo[0] & 0xffff) == 0)) {
244 mtd->_erase = cfi_amdstd_erase_chip; 244 mtd->_erase = cfi_amdstd_erase_chip;
245 } 245 }
246 246
247 } 247 }
248 248
249 /* 249 /*
250 * Some Atmel chips (e.g. the AT49BV6416) power-up with all sectors 250 * Some Atmel chips (e.g. the AT49BV6416) power-up with all sectors
251 * locked by default. 251 * locked by default.
252 */ 252 */
253 static void fixup_use_atmel_lock(struct mtd_info *mtd) 253 static void fixup_use_atmel_lock(struct mtd_info *mtd)
254 { 254 {
255 mtd->_lock = cfi_atmel_lock; 255 mtd->_lock = cfi_atmel_lock;
256 mtd->_unlock = cfi_atmel_unlock; 256 mtd->_unlock = cfi_atmel_unlock;
257 mtd->flags |= MTD_POWERUP_LOCK; 257 mtd->flags |= MTD_POWERUP_LOCK;
258 } 258 }
259 259
260 static void fixup_old_sst_eraseregion(struct mtd_info *mtd) 260 static void fixup_old_sst_eraseregion(struct mtd_info *mtd)
261 { 261 {
262 struct map_info *map = mtd->priv; 262 struct map_info *map = mtd->priv;
263 struct cfi_private *cfi = map->fldrv_priv; 263 struct cfi_private *cfi = map->fldrv_priv;
264 264
265 /* 265 /*
266 * These flashes report two separate eraseblock regions based on the 266 * These flashes report two separate eraseblock regions based on the
267 * sector_erase-size and block_erase-size, although they both operate on the 267 * sector_erase-size and block_erase-size, although they both operate on the
268 * same memory. This is not allowed according to CFI, so we just pick the 268 * same memory. This is not allowed according to CFI, so we just pick the
269 * sector_erase-size. 269 * sector_erase-size.
270 */ 270 */
271 cfi->cfiq->NumEraseRegions = 1; 271 cfi->cfiq->NumEraseRegions = 1;
272 } 272 }
273 273
274 static void fixup_sst39vf(struct mtd_info *mtd) 274 static void fixup_sst39vf(struct mtd_info *mtd)
275 { 275 {
276 struct map_info *map = mtd->priv; 276 struct map_info *map = mtd->priv;
277 struct cfi_private *cfi = map->fldrv_priv; 277 struct cfi_private *cfi = map->fldrv_priv;
278 278
279 fixup_old_sst_eraseregion(mtd); 279 fixup_old_sst_eraseregion(mtd);
280 280
281 cfi->addr_unlock1 = 0x5555; 281 cfi->addr_unlock1 = 0x5555;
282 cfi->addr_unlock2 = 0x2AAA; 282 cfi->addr_unlock2 = 0x2AAA;
283 } 283 }
284 284
285 static void fixup_sst39vf_rev_b(struct mtd_info *mtd) 285 static void fixup_sst39vf_rev_b(struct mtd_info *mtd)
286 { 286 {
287 struct map_info *map = mtd->priv; 287 struct map_info *map = mtd->priv;
288 struct cfi_private *cfi = map->fldrv_priv; 288 struct cfi_private *cfi = map->fldrv_priv;
289 289
290 fixup_old_sst_eraseregion(mtd); 290 fixup_old_sst_eraseregion(mtd);
291 291
292 cfi->addr_unlock1 = 0x555; 292 cfi->addr_unlock1 = 0x555;
293 cfi->addr_unlock2 = 0x2AA; 293 cfi->addr_unlock2 = 0x2AA;
294 294
295 cfi->sector_erase_cmd = CMD(0x50); 295 cfi->sector_erase_cmd = CMD(0x50);
296 } 296 }
297 297
298 static void fixup_sst38vf640x_sectorsize(struct mtd_info *mtd) 298 static void fixup_sst38vf640x_sectorsize(struct mtd_info *mtd)
299 { 299 {
300 struct map_info *map = mtd->priv; 300 struct map_info *map = mtd->priv;
301 struct cfi_private *cfi = map->fldrv_priv; 301 struct cfi_private *cfi = map->fldrv_priv;
302 302
303 fixup_sst39vf_rev_b(mtd); 303 fixup_sst39vf_rev_b(mtd);
304 304
305 /* 305 /*
306 * CFI reports 1024 sectors (0x03ff+1) of 64KBytes (0x0100*256) where 306 * CFI reports 1024 sectors (0x03ff+1) of 64KBytes (0x0100*256) where
307 * it should report a size of 8KBytes (0x0020*256). 307 * it should report a size of 8KBytes (0x0020*256).
308 */ 308 */
309 cfi->cfiq->EraseRegionInfo[0] = 0x002003ff; 309 cfi->cfiq->EraseRegionInfo[0] = 0x002003ff;
310 pr_warning("%s: Bad 38VF640x CFI data; adjusting sector size from 64 to 8KiB\n", mtd->name); 310 pr_warning("%s: Bad 38VF640x CFI data; adjusting sector size from 64 to 8KiB\n", mtd->name);
311 } 311 }
312 312
313 static void fixup_s29gl064n_sectors(struct mtd_info *mtd) 313 static void fixup_s29gl064n_sectors(struct mtd_info *mtd)
314 { 314 {
315 struct map_info *map = mtd->priv; 315 struct map_info *map = mtd->priv;
316 struct cfi_private *cfi = map->fldrv_priv; 316 struct cfi_private *cfi = map->fldrv_priv;
317 317
318 if ((cfi->cfiq->EraseRegionInfo[0] & 0xffff) == 0x003f) { 318 if ((cfi->cfiq->EraseRegionInfo[0] & 0xffff) == 0x003f) {
319 cfi->cfiq->EraseRegionInfo[0] |= 0x0040; 319 cfi->cfiq->EraseRegionInfo[0] |= 0x0040;
320 pr_warning("%s: Bad S29GL064N CFI data; adjust from 64 to 128 sectors\n", mtd->name); 320 pr_warning("%s: Bad S29GL064N CFI data; adjust from 64 to 128 sectors\n", mtd->name);
321 } 321 }
322 } 322 }
323 323
324 static void fixup_s29gl032n_sectors(struct mtd_info *mtd) 324 static void fixup_s29gl032n_sectors(struct mtd_info *mtd)
325 { 325 {
326 struct map_info *map = mtd->priv; 326 struct map_info *map = mtd->priv;
327 struct cfi_private *cfi = map->fldrv_priv; 327 struct cfi_private *cfi = map->fldrv_priv;
328 328
329 if ((cfi->cfiq->EraseRegionInfo[1] & 0xffff) == 0x007e) { 329 if ((cfi->cfiq->EraseRegionInfo[1] & 0xffff) == 0x007e) {
330 cfi->cfiq->EraseRegionInfo[1] &= ~0x0040; 330 cfi->cfiq->EraseRegionInfo[1] &= ~0x0040;
331 pr_warning("%s: Bad S29GL032N CFI data; adjust from 127 to 63 sectors\n", mtd->name); 331 pr_warning("%s: Bad S29GL032N CFI data; adjust from 127 to 63 sectors\n", mtd->name);
332 } 332 }
333 } 333 }
334 334
335 static void fixup_s29ns512p_sectors(struct mtd_info *mtd) 335 static void fixup_s29ns512p_sectors(struct mtd_info *mtd)
336 { 336 {
337 struct map_info *map = mtd->priv; 337 struct map_info *map = mtd->priv;
338 struct cfi_private *cfi = map->fldrv_priv; 338 struct cfi_private *cfi = map->fldrv_priv;
339 339
340 /* 340 /*
341 * S29NS512P flash uses more than 8bits to report number of sectors, 341 * S29NS512P flash uses more than 8bits to report number of sectors,
342 * which is not permitted by CFI. 342 * which is not permitted by CFI.
343 */ 343 */
344 cfi->cfiq->EraseRegionInfo[0] = 0x020001ff; 344 cfi->cfiq->EraseRegionInfo[0] = 0x020001ff;
345 pr_warning("%s: Bad S29NS512P CFI data; adjust to 512 sectors\n", mtd->name); 345 pr_warning("%s: Bad S29NS512P CFI data; adjust to 512 sectors\n", mtd->name);
346 } 346 }
347 347
348 /* Used to fix CFI-Tables of chips without Extended Query Tables */ 348 /* Used to fix CFI-Tables of chips without Extended Query Tables */
349 static struct cfi_fixup cfi_nopri_fixup_table[] = { 349 static struct cfi_fixup cfi_nopri_fixup_table[] = {
350 { CFI_MFR_SST, 0x234a, fixup_sst39vf }, /* SST39VF1602 */ 350 { CFI_MFR_SST, 0x234a, fixup_sst39vf }, /* SST39VF1602 */
351 { CFI_MFR_SST, 0x234b, fixup_sst39vf }, /* SST39VF1601 */ 351 { CFI_MFR_SST, 0x234b, fixup_sst39vf }, /* SST39VF1601 */
352 { CFI_MFR_SST, 0x235a, fixup_sst39vf }, /* SST39VF3202 */ 352 { CFI_MFR_SST, 0x235a, fixup_sst39vf }, /* SST39VF3202 */
353 { CFI_MFR_SST, 0x235b, fixup_sst39vf }, /* SST39VF3201 */ 353 { CFI_MFR_SST, 0x235b, fixup_sst39vf }, /* SST39VF3201 */
354 { CFI_MFR_SST, 0x235c, fixup_sst39vf_rev_b }, /* SST39VF3202B */ 354 { CFI_MFR_SST, 0x235c, fixup_sst39vf_rev_b }, /* SST39VF3202B */
355 { CFI_MFR_SST, 0x235d, fixup_sst39vf_rev_b }, /* SST39VF3201B */ 355 { CFI_MFR_SST, 0x235d, fixup_sst39vf_rev_b }, /* SST39VF3201B */
356 { CFI_MFR_SST, 0x236c, fixup_sst39vf_rev_b }, /* SST39VF6402B */ 356 { CFI_MFR_SST, 0x236c, fixup_sst39vf_rev_b }, /* SST39VF6402B */
357 { CFI_MFR_SST, 0x236d, fixup_sst39vf_rev_b }, /* SST39VF6401B */ 357 { CFI_MFR_SST, 0x236d, fixup_sst39vf_rev_b }, /* SST39VF6401B */
358 { 0, 0, NULL } 358 { 0, 0, NULL }
359 }; 359 };
360 360
361 static struct cfi_fixup cfi_fixup_table[] = { 361 static struct cfi_fixup cfi_fixup_table[] = {
362 { CFI_MFR_ATMEL, CFI_ID_ANY, fixup_convert_atmel_pri }, 362 { CFI_MFR_ATMEL, CFI_ID_ANY, fixup_convert_atmel_pri },
363 #ifdef AMD_BOOTLOC_BUG 363 #ifdef AMD_BOOTLOC_BUG
364 { CFI_MFR_AMD, CFI_ID_ANY, fixup_amd_bootblock }, 364 { CFI_MFR_AMD, CFI_ID_ANY, fixup_amd_bootblock },
365 { CFI_MFR_AMIC, CFI_ID_ANY, fixup_amd_bootblock }, 365 { CFI_MFR_AMIC, CFI_ID_ANY, fixup_amd_bootblock },
366 { CFI_MFR_MACRONIX, CFI_ID_ANY, fixup_amd_bootblock }, 366 { CFI_MFR_MACRONIX, CFI_ID_ANY, fixup_amd_bootblock },
367 #endif 367 #endif
368 { CFI_MFR_AMD, 0x0050, fixup_use_secsi }, 368 { CFI_MFR_AMD, 0x0050, fixup_use_secsi },
369 { CFI_MFR_AMD, 0x0053, fixup_use_secsi }, 369 { CFI_MFR_AMD, 0x0053, fixup_use_secsi },
370 { CFI_MFR_AMD, 0x0055, fixup_use_secsi }, 370 { CFI_MFR_AMD, 0x0055, fixup_use_secsi },
371 { CFI_MFR_AMD, 0x0056, fixup_use_secsi }, 371 { CFI_MFR_AMD, 0x0056, fixup_use_secsi },
372 { CFI_MFR_AMD, 0x005C, fixup_use_secsi }, 372 { CFI_MFR_AMD, 0x005C, fixup_use_secsi },
373 { CFI_MFR_AMD, 0x005F, fixup_use_secsi }, 373 { CFI_MFR_AMD, 0x005F, fixup_use_secsi },
374 { CFI_MFR_AMD, 0x0c01, fixup_s29gl064n_sectors }, 374 { CFI_MFR_AMD, 0x0c01, fixup_s29gl064n_sectors },
375 { CFI_MFR_AMD, 0x1301, fixup_s29gl064n_sectors }, 375 { CFI_MFR_AMD, 0x1301, fixup_s29gl064n_sectors },
376 { CFI_MFR_AMD, 0x1a00, fixup_s29gl032n_sectors }, 376 { CFI_MFR_AMD, 0x1a00, fixup_s29gl032n_sectors },
377 { CFI_MFR_AMD, 0x1a01, fixup_s29gl032n_sectors }, 377 { CFI_MFR_AMD, 0x1a01, fixup_s29gl032n_sectors },
378 { CFI_MFR_AMD, 0x3f00, fixup_s29ns512p_sectors }, 378 { CFI_MFR_AMD, 0x3f00, fixup_s29ns512p_sectors },
379 { CFI_MFR_SST, 0x536a, fixup_sst38vf640x_sectorsize }, /* SST38VF6402 */ 379 { CFI_MFR_SST, 0x536a, fixup_sst38vf640x_sectorsize }, /* SST38VF6402 */
380 { CFI_MFR_SST, 0x536b, fixup_sst38vf640x_sectorsize }, /* SST38VF6401 */ 380 { CFI_MFR_SST, 0x536b, fixup_sst38vf640x_sectorsize }, /* SST38VF6401 */
381 { CFI_MFR_SST, 0x536c, fixup_sst38vf640x_sectorsize }, /* SST38VF6404 */ 381 { CFI_MFR_SST, 0x536c, fixup_sst38vf640x_sectorsize }, /* SST38VF6404 */
382 { CFI_MFR_SST, 0x536d, fixup_sst38vf640x_sectorsize }, /* SST38VF6403 */ 382 { CFI_MFR_SST, 0x536d, fixup_sst38vf640x_sectorsize }, /* SST38VF6403 */
383 #if !FORCE_WORD_WRITE 383 #if !FORCE_WORD_WRITE
384 { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_write_buffers }, 384 { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_write_buffers },
385 #endif 385 #endif
386 { 0, 0, NULL } 386 { 0, 0, NULL }
387 }; 387 };
388 static struct cfi_fixup jedec_fixup_table[] = { 388 static struct cfi_fixup jedec_fixup_table[] = {
389 { CFI_MFR_SST, SST49LF004B, fixup_use_fwh_lock }, 389 { CFI_MFR_SST, SST49LF004B, fixup_use_fwh_lock },
390 { CFI_MFR_SST, SST49LF040B, fixup_use_fwh_lock }, 390 { CFI_MFR_SST, SST49LF040B, fixup_use_fwh_lock },
391 { CFI_MFR_SST, SST49LF008A, fixup_use_fwh_lock }, 391 { CFI_MFR_SST, SST49LF008A, fixup_use_fwh_lock },
392 { 0, 0, NULL } 392 { 0, 0, NULL }
393 }; 393 };
394 394
395 static struct cfi_fixup fixup_table[] = { 395 static struct cfi_fixup fixup_table[] = {
396 /* The CFI vendor ids and the JEDEC vendor IDs appear 396 /* The CFI vendor ids and the JEDEC vendor IDs appear
397 * to be common. It is like the devices id's are as 397 * to be common. It is like the devices id's are as
398 * well. This table is to pick all cases where 398 * well. This table is to pick all cases where
399 * we know that is the case. 399 * we know that is the case.
400 */ 400 */
401 { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_erase_chip }, 401 { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_erase_chip },
402 { CFI_MFR_ATMEL, AT49BV6416, fixup_use_atmel_lock }, 402 { CFI_MFR_ATMEL, AT49BV6416, fixup_use_atmel_lock },
403 { 0, 0, NULL } 403 { 0, 0, NULL }
404 }; 404 };
405 405
406 406
407 static void cfi_fixup_major_minor(struct cfi_private *cfi, 407 static void cfi_fixup_major_minor(struct cfi_private *cfi,
408 struct cfi_pri_amdstd *extp) 408 struct cfi_pri_amdstd *extp)
409 { 409 {
410 if (cfi->mfr == CFI_MFR_SAMSUNG) { 410 if (cfi->mfr == CFI_MFR_SAMSUNG) {
411 if ((extp->MajorVersion == '0' && extp->MinorVersion == '0') || 411 if ((extp->MajorVersion == '0' && extp->MinorVersion == '0') ||
412 (extp->MajorVersion == '3' && extp->MinorVersion == '3')) { 412 (extp->MajorVersion == '3' && extp->MinorVersion == '3')) {
413 /* 413 /*
414 * Samsung K8P2815UQB and K8D6x16UxM chips 414 * Samsung K8P2815UQB and K8D6x16UxM chips
415 * report major=0 / minor=0. 415 * report major=0 / minor=0.
416 * K8D3x16UxC chips report major=3 / minor=3. 416 * K8D3x16UxC chips report major=3 / minor=3.
417 */ 417 */
418 printk(KERN_NOTICE " Fixing Samsung's Amd/Fujitsu" 418 printk(KERN_NOTICE " Fixing Samsung's Amd/Fujitsu"
419 " Extended Query version to 1.%c\n", 419 " Extended Query version to 1.%c\n",
420 extp->MinorVersion); 420 extp->MinorVersion);
421 extp->MajorVersion = '1'; 421 extp->MajorVersion = '1';
422 } 422 }
423 } 423 }
424 424
425 /* 425 /*
426 * SST 38VF640x chips report major=0xFF / minor=0xFF. 426 * SST 38VF640x chips report major=0xFF / minor=0xFF.
427 */ 427 */
428 if (cfi->mfr == CFI_MFR_SST && (cfi->id >> 4) == 0x0536) { 428 if (cfi->mfr == CFI_MFR_SST && (cfi->id >> 4) == 0x0536) {
429 extp->MajorVersion = '1'; 429 extp->MajorVersion = '1';
430 extp->MinorVersion = '0'; 430 extp->MinorVersion = '0';
431 } 431 }
432 } 432 }
433 433
434 static int is_m29ew(struct cfi_private *cfi) 434 static int is_m29ew(struct cfi_private *cfi)
435 { 435 {
436 if (cfi->mfr == CFI_MFR_INTEL && 436 if (cfi->mfr == CFI_MFR_INTEL &&
437 ((cfi->device_type == CFI_DEVICETYPE_X8 && (cfi->id & 0xff) == 0x7e) || 437 ((cfi->device_type == CFI_DEVICETYPE_X8 && (cfi->id & 0xff) == 0x7e) ||
438 (cfi->device_type == CFI_DEVICETYPE_X16 && cfi->id == 0x227e))) 438 (cfi->device_type == CFI_DEVICETYPE_X16 && cfi->id == 0x227e)))
439 return 1; 439 return 1;
440 return 0; 440 return 0;
441 } 441 }
442 442
443 /* 443 /*
444 * From TN-13-07: Patching the Linux Kernel and U-Boot for M29 Flash, page 20: 444 * From TN-13-07: Patching the Linux Kernel and U-Boot for M29 Flash, page 20:
445 * Some revisions of the M29EW suffer from erase suspend hang ups. In 445 * Some revisions of the M29EW suffer from erase suspend hang ups. In
446 * particular, it can occur when the sequence 446 * particular, it can occur when the sequence
447 * Erase Confirm -> Suspend -> Program -> Resume 447 * Erase Confirm -> Suspend -> Program -> Resume
448 * causes a lockup due to internal timing issues. The consequence is that the 448 * causes a lockup due to internal timing issues. The consequence is that the
449 * erase cannot be resumed without inserting a dummy command after programming 449 * erase cannot be resumed without inserting a dummy command after programming
450 * and prior to resuming. [...] The work-around is to issue a dummy write cycle 450 * and prior to resuming. [...] The work-around is to issue a dummy write cycle
451 * that writes an F0 command code before the RESUME command. 451 * that writes an F0 command code before the RESUME command.
452 */ 452 */
453 static void cfi_fixup_m29ew_erase_suspend(struct map_info *map, 453 static void cfi_fixup_m29ew_erase_suspend(struct map_info *map,
454 unsigned long adr) 454 unsigned long adr)
455 { 455 {
456 struct cfi_private *cfi = map->fldrv_priv; 456 struct cfi_private *cfi = map->fldrv_priv;
457 /* before resume, insert a dummy 0xF0 cycle for Micron M29EW devices */ 457 /* before resume, insert a dummy 0xF0 cycle for Micron M29EW devices */
458 if (is_m29ew(cfi)) 458 if (is_m29ew(cfi))
459 map_write(map, CMD(0xF0), adr); 459 map_write(map, CMD(0xF0), adr);
460 } 460 }
461 461
462 /* 462 /*
463 * From TN-13-07: Patching the Linux Kernel and U-Boot for M29 Flash, page 22: 463 * From TN-13-07: Patching the Linux Kernel and U-Boot for M29 Flash, page 22:
464 * 464 *
465 * Some revisions of the M29EW (for example, A1 and A2 step revisions) 465 * Some revisions of the M29EW (for example, A1 and A2 step revisions)
466 * are affected by a problem that could cause a hang up when an ERASE SUSPEND 466 * are affected by a problem that could cause a hang up when an ERASE SUSPEND
467 * command is issued after an ERASE RESUME operation without waiting for a 467 * command is issued after an ERASE RESUME operation without waiting for a
468 * minimum delay. The result is that once the ERASE seems to be completed 468 * minimum delay. The result is that once the ERASE seems to be completed
469 * (no bits are toggling), the contents of the Flash memory block on which 469 * (no bits are toggling), the contents of the Flash memory block on which
470 * the erase was ongoing could be inconsistent with the expected values 470 * the erase was ongoing could be inconsistent with the expected values
471 * (typically, the array value is stuck to the 0xC0, 0xC4, 0x80, or 0x84 471 * (typically, the array value is stuck to the 0xC0, 0xC4, 0x80, or 0x84
472 * values), causing a consequent failure of the ERASE operation. 472 * values), causing a consequent failure of the ERASE operation.
473 * The occurrence of this issue could be high, especially when file system 473 * The occurrence of this issue could be high, especially when file system
474 * operations on the Flash are intensive. As a result, it is recommended 474 * operations on the Flash are intensive. As a result, it is recommended
475 * that a patch be applied. Intensive file system operations can cause many 475 * that a patch be applied. Intensive file system operations can cause many
476 * calls to the garbage routine to free Flash space (also by erasing physical 476 * calls to the garbage routine to free Flash space (also by erasing physical
477 * Flash blocks) and as a result, many consecutive SUSPEND and RESUME 477 * Flash blocks) and as a result, many consecutive SUSPEND and RESUME
478 * commands can occur. The problem disappears when a delay is inserted after 478 * commands can occur. The problem disappears when a delay is inserted after
479 * the RESUME command by using the udelay() function available in Linux. 479 * the RESUME command by using the udelay() function available in Linux.
480 * The DELAY value must be tuned based on the customer's platform. 480 * The DELAY value must be tuned based on the customer's platform.
481 * The maximum value that fixes the problem in all cases is 500us. 481 * The maximum value that fixes the problem in all cases is 500us.
482 * But, in our experience, a delay of 30 ยตs to 50 ยตs is sufficient 482 * But, in our experience, a delay of 30 ยตs to 50 ยตs is sufficient
483 * in most cases. 483 * in most cases.
484 * We have chosen 500ยตs because this latency is acceptable. 484 * We have chosen 500ยตs because this latency is acceptable.
485 */ 485 */
486 static void cfi_fixup_m29ew_delay_after_resume(struct cfi_private *cfi) 486 static void cfi_fixup_m29ew_delay_after_resume(struct cfi_private *cfi)
487 { 487 {
488 /* 488 /*
489 * Resolving the Delay After Resume Issue see Micron TN-13-07 489 * Resolving the Delay After Resume Issue see Micron TN-13-07
490 * Worst case delay must be 500ยตs but 30-50ยตs should be ok as well 490 * Worst case delay must be 500ยตs but 30-50ยตs should be ok as well
491 */ 491 */
492 if (is_m29ew(cfi)) 492 if (is_m29ew(cfi))
493 cfi_udelay(500); 493 cfi_udelay(500);
494 } 494 }
495 495
496 struct mtd_info *cfi_cmdset_0002(struct map_info *map, int primary) 496 struct mtd_info *cfi_cmdset_0002(struct map_info *map, int primary)
497 { 497 {
498 struct cfi_private *cfi = map->fldrv_priv; 498 struct cfi_private *cfi = map->fldrv_priv;
499 struct mtd_info *mtd; 499 struct mtd_info *mtd;
500 int i; 500 int i;
501 501
502 mtd = kzalloc(sizeof(*mtd), GFP_KERNEL); 502 mtd = kzalloc(sizeof(*mtd), GFP_KERNEL);
503 if (!mtd) { 503 if (!mtd) {
504 printk(KERN_WARNING "Failed to allocate memory for MTD device\n"); 504 printk(KERN_WARNING "Failed to allocate memory for MTD device\n");
505 return NULL; 505 return NULL;
506 } 506 }
507 mtd->priv = map; 507 mtd->priv = map;
508 mtd->type = MTD_NORFLASH; 508 mtd->type = MTD_NORFLASH;
509 509
510 /* Fill in the default mtd operations */ 510 /* Fill in the default mtd operations */
511 mtd->_erase = cfi_amdstd_erase_varsize; 511 mtd->_erase = cfi_amdstd_erase_varsize;
512 mtd->_write = cfi_amdstd_write_words; 512 mtd->_write = cfi_amdstd_write_words;
513 mtd->_read = cfi_amdstd_read; 513 mtd->_read = cfi_amdstd_read;
514 mtd->_sync = cfi_amdstd_sync; 514 mtd->_sync = cfi_amdstd_sync;
515 mtd->_suspend = cfi_amdstd_suspend; 515 mtd->_suspend = cfi_amdstd_suspend;
516 mtd->_resume = cfi_amdstd_resume; 516 mtd->_resume = cfi_amdstd_resume;
517 mtd->flags = MTD_CAP_NORFLASH; 517 mtd->flags = MTD_CAP_NORFLASH;
518 mtd->name = map->name; 518 mtd->name = map->name;
519 mtd->writesize = 1; 519 mtd->writesize = 1;
520 mtd->writebufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize; 520 mtd->writebufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
521 521
522 pr_debug("MTD %s(): write buffer size %d\n", __func__, 522 pr_debug("MTD %s(): write buffer size %d\n", __func__,
523 mtd->writebufsize); 523 mtd->writebufsize);
524 524
525 mtd->_panic_write = cfi_amdstd_panic_write; 525 mtd->_panic_write = cfi_amdstd_panic_write;
526 mtd->reboot_notifier.notifier_call = cfi_amdstd_reboot; 526 mtd->reboot_notifier.notifier_call = cfi_amdstd_reboot;
527 527
528 if (cfi->cfi_mode==CFI_MODE_CFI){ 528 if (cfi->cfi_mode==CFI_MODE_CFI){
529 unsigned char bootloc; 529 unsigned char bootloc;
530 __u16 adr = primary?cfi->cfiq->P_ADR:cfi->cfiq->A_ADR; 530 __u16 adr = primary?cfi->cfiq->P_ADR:cfi->cfiq->A_ADR;
531 struct cfi_pri_amdstd *extp; 531 struct cfi_pri_amdstd *extp;
532 532
533 extp = (struct cfi_pri_amdstd*)cfi_read_pri(map, adr, sizeof(*extp), "Amd/Fujitsu"); 533 extp = (struct cfi_pri_amdstd*)cfi_read_pri(map, adr, sizeof(*extp), "Amd/Fujitsu");
534 if (extp) { 534 if (extp) {
535 /* 535 /*
536 * It's a real CFI chip, not one for which the probe 536 * It's a real CFI chip, not one for which the probe
537 * routine faked a CFI structure. 537 * routine faked a CFI structure.
538 */ 538 */
539 cfi_fixup_major_minor(cfi, extp); 539 cfi_fixup_major_minor(cfi, extp);
540 540
541 /* 541 /*
542 * Valid primary extension versions are: 1.0, 1.1, 1.2, 1.3, 1.4, 1.5 542 * Valid primary extension versions are: 1.0, 1.1, 1.2, 1.3, 1.4, 1.5
543 * see: http://cs.ozerki.net/zap/pub/axim-x5/docs/cfi_r20.pdf, page 19 543 * see: http://cs.ozerki.net/zap/pub/axim-x5/docs/cfi_r20.pdf, page 19
544 * http://www.spansion.com/Support/AppNotes/cfi_100_20011201.pdf 544 * http://www.spansion.com/Support/AppNotes/cfi_100_20011201.pdf
545 * http://www.spansion.com/Support/Datasheets/s29ws-p_00_a12_e.pdf 545 * http://www.spansion.com/Support/Datasheets/s29ws-p_00_a12_e.pdf
546 * http://www.spansion.com/Support/Datasheets/S29GL_128S_01GS_00_02_e.pdf 546 * http://www.spansion.com/Support/Datasheets/S29GL_128S_01GS_00_02_e.pdf
547 */ 547 */
548 if (extp->MajorVersion != '1' || 548 if (extp->MajorVersion != '1' ||
549 (extp->MajorVersion == '1' && (extp->MinorVersion < '0' || extp->MinorVersion > '5'))) { 549 (extp->MajorVersion == '1' && (extp->MinorVersion < '0' || extp->MinorVersion > '5'))) {
550 printk(KERN_ERR " Unknown Amd/Fujitsu Extended Query " 550 printk(KERN_ERR " Unknown Amd/Fujitsu Extended Query "
551 "version %c.%c (%#02x/%#02x).\n", 551 "version %c.%c (%#02x/%#02x).\n",
552 extp->MajorVersion, extp->MinorVersion, 552 extp->MajorVersion, extp->MinorVersion,
553 extp->MajorVersion, extp->MinorVersion); 553 extp->MajorVersion, extp->MinorVersion);
554 kfree(extp); 554 kfree(extp);
555 kfree(mtd); 555 kfree(mtd);
556 return NULL; 556 return NULL;
557 } 557 }
558 558
559 printk(KERN_INFO " Amd/Fujitsu Extended Query version %c.%c.\n", 559 printk(KERN_INFO " Amd/Fujitsu Extended Query version %c.%c.\n",
560 extp->MajorVersion, extp->MinorVersion); 560 extp->MajorVersion, extp->MinorVersion);
561 561
562 /* Install our own private info structure */ 562 /* Install our own private info structure */
563 cfi->cmdset_priv = extp; 563 cfi->cmdset_priv = extp;
564 564
565 /* Apply cfi device specific fixups */ 565 /* Apply cfi device specific fixups */
566 cfi_fixup(mtd, cfi_fixup_table); 566 cfi_fixup(mtd, cfi_fixup_table);
567 567
568 #ifdef DEBUG_CFI_FEATURES 568 #ifdef DEBUG_CFI_FEATURES
569 /* Tell the user about it in lots of lovely detail */ 569 /* Tell the user about it in lots of lovely detail */
570 cfi_tell_features(extp); 570 cfi_tell_features(extp);
571 #endif 571 #endif
572 572
573 bootloc = extp->TopBottom; 573 bootloc = extp->TopBottom;
574 if ((bootloc < 2) || (bootloc > 5)) { 574 if ((bootloc < 2) || (bootloc > 5)) {
575 printk(KERN_WARNING "%s: CFI contains unrecognised boot " 575 printk(KERN_WARNING "%s: CFI contains unrecognised boot "
576 "bank location (%d). Assuming bottom.\n", 576 "bank location (%d). Assuming bottom.\n",
577 map->name, bootloc); 577 map->name, bootloc);
578 bootloc = 2; 578 bootloc = 2;
579 } 579 }
580 580
581 if (bootloc == 3 && cfi->cfiq->NumEraseRegions > 1) { 581 if (bootloc == 3 && cfi->cfiq->NumEraseRegions > 1) {
582 printk(KERN_WARNING "%s: Swapping erase regions for top-boot CFI table.\n", map->name); 582 printk(KERN_WARNING "%s: Swapping erase regions for top-boot CFI table.\n", map->name);
583 583
584 for (i=0; i<cfi->cfiq->NumEraseRegions / 2; i++) { 584 for (i=0; i<cfi->cfiq->NumEraseRegions / 2; i++) {
585 int j = (cfi->cfiq->NumEraseRegions-1)-i; 585 int j = (cfi->cfiq->NumEraseRegions-1)-i;
586 __u32 swap; 586 __u32 swap;
587 587
588 swap = cfi->cfiq->EraseRegionInfo[i]; 588 swap = cfi->cfiq->EraseRegionInfo[i];
589 cfi->cfiq->EraseRegionInfo[i] = cfi->cfiq->EraseRegionInfo[j]; 589 cfi->cfiq->EraseRegionInfo[i] = cfi->cfiq->EraseRegionInfo[j];
590 cfi->cfiq->EraseRegionInfo[j] = swap; 590 cfi->cfiq->EraseRegionInfo[j] = swap;
591 } 591 }
592 } 592 }
593 /* Set the default CFI lock/unlock addresses */ 593 /* Set the default CFI lock/unlock addresses */
594 cfi->addr_unlock1 = 0x555; 594 cfi->addr_unlock1 = 0x555;
595 cfi->addr_unlock2 = 0x2aa; 595 cfi->addr_unlock2 = 0x2aa;
596 } 596 }
597 cfi_fixup(mtd, cfi_nopri_fixup_table); 597 cfi_fixup(mtd, cfi_nopri_fixup_table);
598 598
599 if (!cfi->addr_unlock1 || !cfi->addr_unlock2) { 599 if (!cfi->addr_unlock1 || !cfi->addr_unlock2) {
600 kfree(mtd); 600 kfree(mtd);
601 return NULL; 601 return NULL;
602 } 602 }
603 603
604 } /* CFI mode */ 604 } /* CFI mode */
605 else if (cfi->cfi_mode == CFI_MODE_JEDEC) { 605 else if (cfi->cfi_mode == CFI_MODE_JEDEC) {
606 /* Apply jedec specific fixups */ 606 /* Apply jedec specific fixups */
607 cfi_fixup(mtd, jedec_fixup_table); 607 cfi_fixup(mtd, jedec_fixup_table);
608 } 608 }
609 /* Apply generic fixups */ 609 /* Apply generic fixups */
610 cfi_fixup(mtd, fixup_table); 610 cfi_fixup(mtd, fixup_table);
611 611
612 for (i=0; i< cfi->numchips; i++) { 612 for (i=0; i< cfi->numchips; i++) {
613 cfi->chips[i].word_write_time = 1<<cfi->cfiq->WordWriteTimeoutTyp; 613 cfi->chips[i].word_write_time = 1<<cfi->cfiq->WordWriteTimeoutTyp;
614 cfi->chips[i].buffer_write_time = 1<<cfi->cfiq->BufWriteTimeoutTyp; 614 cfi->chips[i].buffer_write_time = 1<<cfi->cfiq->BufWriteTimeoutTyp;
615 cfi->chips[i].erase_time = 1<<cfi->cfiq->BlockEraseTimeoutTyp; 615 cfi->chips[i].erase_time = 1<<cfi->cfiq->BlockEraseTimeoutTyp;
616 cfi->chips[i].ref_point_counter = 0; 616 cfi->chips[i].ref_point_counter = 0;
617 init_waitqueue_head(&(cfi->chips[i].wq)); 617 init_waitqueue_head(&(cfi->chips[i].wq));
618 } 618 }
619 619
620 map->fldrv = &cfi_amdstd_chipdrv; 620 map->fldrv = &cfi_amdstd_chipdrv;
621 621
622 return cfi_amdstd_setup(mtd); 622 return cfi_amdstd_setup(mtd);
623 } 623 }
624 struct mtd_info *cfi_cmdset_0006(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0002"))); 624 struct mtd_info *cfi_cmdset_0006(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0002")));
625 struct mtd_info *cfi_cmdset_0701(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0002"))); 625 struct mtd_info *cfi_cmdset_0701(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0002")));
626 EXPORT_SYMBOL_GPL(cfi_cmdset_0002); 626 EXPORT_SYMBOL_GPL(cfi_cmdset_0002);
627 EXPORT_SYMBOL_GPL(cfi_cmdset_0006); 627 EXPORT_SYMBOL_GPL(cfi_cmdset_0006);
628 EXPORT_SYMBOL_GPL(cfi_cmdset_0701); 628 EXPORT_SYMBOL_GPL(cfi_cmdset_0701);
629 629
630 static struct mtd_info *cfi_amdstd_setup(struct mtd_info *mtd) 630 static struct mtd_info *cfi_amdstd_setup(struct mtd_info *mtd)
631 { 631 {
632 struct map_info *map = mtd->priv; 632 struct map_info *map = mtd->priv;
633 struct cfi_private *cfi = map->fldrv_priv; 633 struct cfi_private *cfi = map->fldrv_priv;
634 unsigned long devsize = (1<<cfi->cfiq->DevSize) * cfi->interleave; 634 unsigned long devsize = (1<<cfi->cfiq->DevSize) * cfi->interleave;
635 unsigned long offset = 0; 635 unsigned long offset = 0;
636 int i,j; 636 int i,j;
637 637
638 printk(KERN_NOTICE "number of %s chips: %d\n", 638 printk(KERN_NOTICE "number of %s chips: %d\n",
639 (cfi->cfi_mode == CFI_MODE_CFI)?"CFI":"JEDEC",cfi->numchips); 639 (cfi->cfi_mode == CFI_MODE_CFI)?"CFI":"JEDEC",cfi->numchips);
640 /* Select the correct geometry setup */ 640 /* Select the correct geometry setup */
641 mtd->size = devsize * cfi->numchips; 641 mtd->size = devsize * cfi->numchips;
642 642
643 mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips; 643 mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips;
644 mtd->eraseregions = kmalloc(sizeof(struct mtd_erase_region_info) 644 mtd->eraseregions = kmalloc(sizeof(struct mtd_erase_region_info)
645 * mtd->numeraseregions, GFP_KERNEL); 645 * mtd->numeraseregions, GFP_KERNEL);
646 if (!mtd->eraseregions) { 646 if (!mtd->eraseregions) {
647 printk(KERN_WARNING "Failed to allocate memory for MTD erase region info\n"); 647 printk(KERN_WARNING "Failed to allocate memory for MTD erase region info\n");
648 goto setup_err; 648 goto setup_err;
649 } 649 }
650 650
651 for (i=0; i<cfi->cfiq->NumEraseRegions; i++) { 651 for (i=0; i<cfi->cfiq->NumEraseRegions; i++) {
652 unsigned long ernum, ersize; 652 unsigned long ernum, ersize;
653 ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave; 653 ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave;
654 ernum = (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1; 654 ernum = (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1;
655 655
656 if (mtd->erasesize < ersize) { 656 if (mtd->erasesize < ersize) {
657 mtd->erasesize = ersize; 657 mtd->erasesize = ersize;
658 } 658 }
659 for (j=0; j<cfi->numchips; j++) { 659 for (j=0; j<cfi->numchips; j++) {
660 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].offset = (j*devsize)+offset; 660 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].offset = (j*devsize)+offset;
661 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].erasesize = ersize; 661 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].erasesize = ersize;
662 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].numblocks = ernum; 662 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].numblocks = ernum;
663 } 663 }
664 offset += (ersize * ernum); 664 offset += (ersize * ernum);
665 } 665 }
666 if (offset != devsize) { 666 if (offset != devsize) {
667 /* Argh */ 667 /* Argh */
668 printk(KERN_WARNING "Sum of regions (%lx) != total size of set of interleaved chips (%lx)\n", offset, devsize); 668 printk(KERN_WARNING "Sum of regions (%lx) != total size of set of interleaved chips (%lx)\n", offset, devsize);
669 goto setup_err; 669 goto setup_err;
670 } 670 }
671 671
672 __module_get(THIS_MODULE); 672 __module_get(THIS_MODULE);
673 register_reboot_notifier(&mtd->reboot_notifier); 673 register_reboot_notifier(&mtd->reboot_notifier);
674 return mtd; 674 return mtd;
675 675
676 setup_err: 676 setup_err:
677 kfree(mtd->eraseregions); 677 kfree(mtd->eraseregions);
678 kfree(mtd); 678 kfree(mtd);
679 kfree(cfi->cmdset_priv); 679 kfree(cfi->cmdset_priv);
680 kfree(cfi->cfiq); 680 kfree(cfi->cfiq);
681 return NULL; 681 return NULL;
682 } 682 }
683 683
684 /* 684 /*
685 * Return true if the chip is ready. 685 * Return true if the chip is ready.
686 * 686 *
687 * Ready is one of: read mode, query mode, erase-suspend-read mode (in any 687 * Ready is one of: read mode, query mode, erase-suspend-read mode (in any
688 * non-suspended sector) and is indicated by no toggle bits toggling. 688 * non-suspended sector) and is indicated by no toggle bits toggling.
689 * 689 *
690 * Note that anything more complicated than checking if no bits are toggling 690 * Note that anything more complicated than checking if no bits are toggling
691 * (including checking DQ5 for an error status) is tricky to get working 691 * (including checking DQ5 for an error status) is tricky to get working
692 * correctly and is therefore not done (particularly with interleaved chips 692 * correctly and is therefore not done (particularly with interleaved chips
693 * as each chip must be checked independently of the others). 693 * as each chip must be checked independently of the others).
694 */ 694 */
695 static int __xipram chip_ready(struct map_info *map, unsigned long addr) 695 static int __xipram chip_ready(struct map_info *map, unsigned long addr)
696 { 696 {
697 map_word d, t; 697 map_word d, t;
698 698
699 d = map_read(map, addr); 699 d = map_read(map, addr);
700 t = map_read(map, addr); 700 t = map_read(map, addr);
701 701
702 return map_word_equal(map, d, t); 702 return map_word_equal(map, d, t);
703 } 703 }
704 704
705 /* 705 /*
706 * Return true if the chip is ready and has the correct value. 706 * Return true if the chip is ready and has the correct value.
707 * 707 *
708 * Ready is one of: read mode, query mode, erase-suspend-read mode (in any 708 * Ready is one of: read mode, query mode, erase-suspend-read mode (in any
709 * non-suspended sector) and it is indicated by no bits toggling. 709 * non-suspended sector) and it is indicated by no bits toggling.
710 * 710 *
711 * Error are indicated by toggling bits or bits held with the wrong value, 711 * Error are indicated by toggling bits or bits held with the wrong value,
712 * or with bits toggling. 712 * or with bits toggling.
713 * 713 *
714 * Note that anything more complicated than checking if no bits are toggling 714 * Note that anything more complicated than checking if no bits are toggling
715 * (including checking DQ5 for an error status) is tricky to get working 715 * (including checking DQ5 for an error status) is tricky to get working
716 * correctly and is therefore not done (particularly with interleaved chips 716 * correctly and is therefore not done (particularly with interleaved chips
717 * as each chip must be checked independently of the others). 717 * as each chip must be checked independently of the others).
718 * 718 *
719 */ 719 */
720 static int __xipram chip_good(struct map_info *map, unsigned long addr, map_word expected) 720 static int __xipram chip_good(struct map_info *map, unsigned long addr, map_word expected)
721 { 721 {
722 map_word oldd, curd; 722 map_word oldd, curd;
723 723
724 oldd = map_read(map, addr); 724 oldd = map_read(map, addr);
725 curd = map_read(map, addr); 725 curd = map_read(map, addr);
726 726
727 return map_word_equal(map, oldd, curd) && 727 return map_word_equal(map, oldd, curd) &&
728 map_word_equal(map, curd, expected); 728 map_word_equal(map, curd, expected);
729 } 729 }
730 730
731 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode) 731 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode)
732 { 732 {
733 DECLARE_WAITQUEUE(wait, current); 733 DECLARE_WAITQUEUE(wait, current);
734 struct cfi_private *cfi = map->fldrv_priv; 734 struct cfi_private *cfi = map->fldrv_priv;
735 unsigned long timeo; 735 unsigned long timeo;
736 struct cfi_pri_amdstd *cfip = (struct cfi_pri_amdstd *)cfi->cmdset_priv; 736 struct cfi_pri_amdstd *cfip = (struct cfi_pri_amdstd *)cfi->cmdset_priv;
737 737
738 resettime: 738 resettime:
739 timeo = jiffies + HZ; 739 timeo = jiffies + HZ;
740 retry: 740 retry:
741 switch (chip->state) { 741 switch (chip->state) {
742 742
743 case FL_STATUS: 743 case FL_STATUS:
744 for (;;) { 744 for (;;) {
745 if (chip_ready(map, adr)) 745 if (chip_ready(map, adr))
746 break; 746 break;
747 747
748 if (time_after(jiffies, timeo)) { 748 if (time_after(jiffies, timeo)) {
749 printk(KERN_ERR "Waiting for chip to be ready timed out.\n"); 749 printk(KERN_ERR "Waiting for chip to be ready timed out.\n");
750 return -EIO; 750 return -EIO;
751 } 751 }
752 mutex_unlock(&chip->mutex); 752 mutex_unlock(&chip->mutex);
753 cfi_udelay(1); 753 cfi_udelay(1);
754 mutex_lock(&chip->mutex); 754 mutex_lock(&chip->mutex);
755 /* Someone else might have been playing with it. */ 755 /* Someone else might have been playing with it. */
756 goto retry; 756 goto retry;
757 } 757 }
758 758
759 case FL_READY: 759 case FL_READY:
760 case FL_CFI_QUERY: 760 case FL_CFI_QUERY:
761 case FL_JEDEC_QUERY: 761 case FL_JEDEC_QUERY:
762 return 0; 762 return 0;
763 763
764 case FL_ERASING: 764 case FL_ERASING:
765 if (!cfip || !(cfip->EraseSuspend & (0x1|0x2)) || 765 if (!cfip || !(cfip->EraseSuspend & (0x1|0x2)) ||
766 !(mode == FL_READY || mode == FL_POINT || 766 !(mode == FL_READY || mode == FL_POINT ||
767 (mode == FL_WRITING && (cfip->EraseSuspend & 0x2)))) 767 (mode == FL_WRITING && (cfip->EraseSuspend & 0x2))))
768 goto sleep; 768 goto sleep;
769 769
770 /* We could check to see if we're trying to access the sector 770 /* We could check to see if we're trying to access the sector
771 * that is currently being erased. However, no user will try 771 * that is currently being erased. However, no user will try
772 * anything like that so we just wait for the timeout. */ 772 * anything like that so we just wait for the timeout. */
773 773
774 /* Erase suspend */ 774 /* Erase suspend */
775 /* It's harmless to issue the Erase-Suspend and Erase-Resume 775 /* It's harmless to issue the Erase-Suspend and Erase-Resume
776 * commands when the erase algorithm isn't in progress. */ 776 * commands when the erase algorithm isn't in progress. */
777 map_write(map, CMD(0xB0), chip->in_progress_block_addr); 777 map_write(map, CMD(0xB0), chip->in_progress_block_addr);
778 chip->oldstate = FL_ERASING; 778 chip->oldstate = FL_ERASING;
779 chip->state = FL_ERASE_SUSPENDING; 779 chip->state = FL_ERASE_SUSPENDING;
780 chip->erase_suspended = 1; 780 chip->erase_suspended = 1;
781 for (;;) { 781 for (;;) {
782 if (chip_ready(map, adr)) 782 if (chip_ready(map, adr))
783 break; 783 break;
784 784
785 if (time_after(jiffies, timeo)) { 785 if (time_after(jiffies, timeo)) {
786 /* Should have suspended the erase by now. 786 /* Should have suspended the erase by now.
787 * Send an Erase-Resume command as either 787 * Send an Erase-Resume command as either
788 * there was an error (so leave the erase 788 * there was an error (so leave the erase
789 * routine to recover from it) or we trying to 789 * routine to recover from it) or we trying to
790 * use the erase-in-progress sector. */ 790 * use the erase-in-progress sector. */
791 put_chip(map, chip, adr); 791 put_chip(map, chip, adr);
792 printk(KERN_ERR "MTD %s(): chip not ready after erase suspend\n", __func__); 792 printk(KERN_ERR "MTD %s(): chip not ready after erase suspend\n", __func__);
793 return -EIO; 793 return -EIO;
794 } 794 }
795 795
796 mutex_unlock(&chip->mutex); 796 mutex_unlock(&chip->mutex);
797 cfi_udelay(1); 797 cfi_udelay(1);
798 mutex_lock(&chip->mutex); 798 mutex_lock(&chip->mutex);
799 /* Nobody will touch it while it's in state FL_ERASE_SUSPENDING. 799 /* Nobody will touch it while it's in state FL_ERASE_SUSPENDING.
800 So we can just loop here. */ 800 So we can just loop here. */
801 } 801 }
802 chip->state = FL_READY; 802 chip->state = FL_READY;
803 return 0; 803 return 0;
804 804
805 case FL_XIP_WHILE_ERASING: 805 case FL_XIP_WHILE_ERASING:
806 if (mode != FL_READY && mode != FL_POINT && 806 if (mode != FL_READY && mode != FL_POINT &&
807 (!cfip || !(cfip->EraseSuspend&2))) 807 (!cfip || !(cfip->EraseSuspend&2)))
808 goto sleep; 808 goto sleep;
809 chip->oldstate = chip->state; 809 chip->oldstate = chip->state;
810 chip->state = FL_READY; 810 chip->state = FL_READY;
811 return 0; 811 return 0;
812 812
813 case FL_SHUTDOWN: 813 case FL_SHUTDOWN:
814 /* The machine is rebooting */ 814 /* The machine is rebooting */
815 return -EIO; 815 return -EIO;
816 816
817 case FL_POINT: 817 case FL_POINT:
818 /* Only if there's no operation suspended... */ 818 /* Only if there's no operation suspended... */
819 if (mode == FL_READY && chip->oldstate == FL_READY) 819 if (mode == FL_READY && chip->oldstate == FL_READY)
820 return 0; 820 return 0;
821 821
822 default: 822 default:
823 sleep: 823 sleep:
824 set_current_state(TASK_UNINTERRUPTIBLE); 824 set_current_state(TASK_UNINTERRUPTIBLE);
825 add_wait_queue(&chip->wq, &wait); 825 add_wait_queue(&chip->wq, &wait);
826 mutex_unlock(&chip->mutex); 826 mutex_unlock(&chip->mutex);
827 schedule(); 827 schedule();
828 remove_wait_queue(&chip->wq, &wait); 828 remove_wait_queue(&chip->wq, &wait);
829 mutex_lock(&chip->mutex); 829 mutex_lock(&chip->mutex);
830 goto resettime; 830 goto resettime;
831 } 831 }
832 } 832 }
833 833
834 834
835 static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr) 835 static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr)
836 { 836 {
837 struct cfi_private *cfi = map->fldrv_priv; 837 struct cfi_private *cfi = map->fldrv_priv;
838 838
839 switch(chip->oldstate) { 839 switch(chip->oldstate) {
840 case FL_ERASING: 840 case FL_ERASING:
841 cfi_fixup_m29ew_erase_suspend(map, 841 cfi_fixup_m29ew_erase_suspend(map,
842 chip->in_progress_block_addr); 842 chip->in_progress_block_addr);
843 map_write(map, cfi->sector_erase_cmd, chip->in_progress_block_addr); 843 map_write(map, cfi->sector_erase_cmd, chip->in_progress_block_addr);
844 cfi_fixup_m29ew_delay_after_resume(cfi); 844 cfi_fixup_m29ew_delay_after_resume(cfi);
845 chip->oldstate = FL_READY; 845 chip->oldstate = FL_READY;
846 chip->state = FL_ERASING; 846 chip->state = FL_ERASING;
847 break; 847 break;
848 848
849 case FL_XIP_WHILE_ERASING: 849 case FL_XIP_WHILE_ERASING:
850 chip->state = chip->oldstate; 850 chip->state = chip->oldstate;
851 chip->oldstate = FL_READY; 851 chip->oldstate = FL_READY;
852 break; 852 break;
853 853
854 case FL_READY: 854 case FL_READY:
855 case FL_STATUS: 855 case FL_STATUS:
856 break; 856 break;
857 default: 857 default:
858 printk(KERN_ERR "MTD: put_chip() called with oldstate %d!!\n", chip->oldstate); 858 printk(KERN_ERR "MTD: put_chip() called with oldstate %d!!\n", chip->oldstate);
859 } 859 }
860 wake_up(&chip->wq); 860 wake_up(&chip->wq);
861 } 861 }
862 862
863 #ifdef CONFIG_MTD_XIP 863 #ifdef CONFIG_MTD_XIP
864 864
865 /* 865 /*
866 * No interrupt what so ever can be serviced while the flash isn't in array 866 * No interrupt what so ever can be serviced while the flash isn't in array
867 * mode. This is ensured by the xip_disable() and xip_enable() functions 867 * mode. This is ensured by the xip_disable() and xip_enable() functions
868 * enclosing any code path where the flash is known not to be in array mode. 868 * enclosing any code path where the flash is known not to be in array mode.
869 * And within a XIP disabled code path, only functions marked with __xipram 869 * And within a XIP disabled code path, only functions marked with __xipram
870 * may be called and nothing else (it's a good thing to inspect generated 870 * may be called and nothing else (it's a good thing to inspect generated
871 * assembly to make sure inline functions were actually inlined and that gcc 871 * assembly to make sure inline functions were actually inlined and that gcc
872 * didn't emit calls to its own support functions). Also configuring MTD CFI 872 * didn't emit calls to its own support functions). Also configuring MTD CFI
873 * support to a single buswidth and a single interleave is also recommended. 873 * support to a single buswidth and a single interleave is also recommended.
874 */ 874 */
875 875
876 static void xip_disable(struct map_info *map, struct flchip *chip, 876 static void xip_disable(struct map_info *map, struct flchip *chip,
877 unsigned long adr) 877 unsigned long adr)
878 { 878 {
879 /* TODO: chips with no XIP use should ignore and return */ 879 /* TODO: chips with no XIP use should ignore and return */
880 (void) map_read(map, adr); /* ensure mmu mapping is up to date */ 880 (void) map_read(map, adr); /* ensure mmu mapping is up to date */
881 local_irq_disable(); 881 local_irq_disable();
882 } 882 }
883 883
884 static void __xipram xip_enable(struct map_info *map, struct flchip *chip, 884 static void __xipram xip_enable(struct map_info *map, struct flchip *chip,
885 unsigned long adr) 885 unsigned long adr)
886 { 886 {
887 struct cfi_private *cfi = map->fldrv_priv; 887 struct cfi_private *cfi = map->fldrv_priv;
888 888
889 if (chip->state != FL_POINT && chip->state != FL_READY) { 889 if (chip->state != FL_POINT && chip->state != FL_READY) {
890 map_write(map, CMD(0xf0), adr); 890 map_write(map, CMD(0xf0), adr);
891 chip->state = FL_READY; 891 chip->state = FL_READY;
892 } 892 }
893 (void) map_read(map, adr); 893 (void) map_read(map, adr);
894 xip_iprefetch(); 894 xip_iprefetch();
895 local_irq_enable(); 895 local_irq_enable();
896 } 896 }
897 897
898 /* 898 /*
899 * When a delay is required for the flash operation to complete, the 899 * When a delay is required for the flash operation to complete, the
900 * xip_udelay() function is polling for both the given timeout and pending 900 * xip_udelay() function is polling for both the given timeout and pending
901 * (but still masked) hardware interrupts. Whenever there is an interrupt 901 * (but still masked) hardware interrupts. Whenever there is an interrupt
902 * pending then the flash erase operation is suspended, array mode restored 902 * pending then the flash erase operation is suspended, array mode restored
903 * and interrupts unmasked. Task scheduling might also happen at that 903 * and interrupts unmasked. Task scheduling might also happen at that
904 * point. The CPU eventually returns from the interrupt or the call to 904 * point. The CPU eventually returns from the interrupt or the call to
905 * schedule() and the suspended flash operation is resumed for the remaining 905 * schedule() and the suspended flash operation is resumed for the remaining
906 * of the delay period. 906 * of the delay period.
907 * 907 *
908 * Warning: this function _will_ fool interrupt latency tracing tools. 908 * Warning: this function _will_ fool interrupt latency tracing tools.
909 */ 909 */
910 910
911 static void __xipram xip_udelay(struct map_info *map, struct flchip *chip, 911 static void __xipram xip_udelay(struct map_info *map, struct flchip *chip,
912 unsigned long adr, int usec) 912 unsigned long adr, int usec)
913 { 913 {
914 struct cfi_private *cfi = map->fldrv_priv; 914 struct cfi_private *cfi = map->fldrv_priv;
915 struct cfi_pri_amdstd *extp = cfi->cmdset_priv; 915 struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
916 map_word status, OK = CMD(0x80); 916 map_word status, OK = CMD(0x80);
917 unsigned long suspended, start = xip_currtime(); 917 unsigned long suspended, start = xip_currtime();
918 flstate_t oldstate; 918 flstate_t oldstate;
919 919
920 do { 920 do {
921 cpu_relax(); 921 cpu_relax();
922 if (xip_irqpending() && extp && 922 if (xip_irqpending() && extp &&
923 ((chip->state == FL_ERASING && (extp->EraseSuspend & 2))) && 923 ((chip->state == FL_ERASING && (extp->EraseSuspend & 2))) &&
924 (cfi_interleave_is_1(cfi) || chip->oldstate == FL_READY)) { 924 (cfi_interleave_is_1(cfi) || chip->oldstate == FL_READY)) {
925 /* 925 /*
926 * Let's suspend the erase operation when supported. 926 * Let's suspend the erase operation when supported.
927 * Note that we currently don't try to suspend 927 * Note that we currently don't try to suspend
928 * interleaved chips if there is already another 928 * interleaved chips if there is already another
929 * operation suspended (imagine what happens 929 * operation suspended (imagine what happens
930 * when one chip was already done with the current 930 * when one chip was already done with the current
931 * operation while another chip suspended it, then 931 * operation while another chip suspended it, then
932 * we resume the whole thing at once). Yes, it 932 * we resume the whole thing at once). Yes, it
933 * can happen! 933 * can happen!
934 */ 934 */
935 map_write(map, CMD(0xb0), adr); 935 map_write(map, CMD(0xb0), adr);
936 usec -= xip_elapsed_since(start); 936 usec -= xip_elapsed_since(start);
937 suspended = xip_currtime(); 937 suspended = xip_currtime();
938 do { 938 do {
939 if (xip_elapsed_since(suspended) > 100000) { 939 if (xip_elapsed_since(suspended) > 100000) {
940 /* 940 /*
941 * The chip doesn't want to suspend 941 * The chip doesn't want to suspend
942 * after waiting for 100 msecs. 942 * after waiting for 100 msecs.
943 * This is a critical error but there 943 * This is a critical error but there
944 * is not much we can do here. 944 * is not much we can do here.
945 */ 945 */
946 return; 946 return;
947 } 947 }
948 status = map_read(map, adr); 948 status = map_read(map, adr);
949 } while (!map_word_andequal(map, status, OK, OK)); 949 } while (!map_word_andequal(map, status, OK, OK));
950 950
951 /* Suspend succeeded */ 951 /* Suspend succeeded */
952 oldstate = chip->state; 952 oldstate = chip->state;
953 if (!map_word_bitsset(map, status, CMD(0x40))) 953 if (!map_word_bitsset(map, status, CMD(0x40)))
954 break; 954 break;
955 chip->state = FL_XIP_WHILE_ERASING; 955 chip->state = FL_XIP_WHILE_ERASING;
956 chip->erase_suspended = 1; 956 chip->erase_suspended = 1;
957 map_write(map, CMD(0xf0), adr); 957 map_write(map, CMD(0xf0), adr);
958 (void) map_read(map, adr); 958 (void) map_read(map, adr);
959 xip_iprefetch(); 959 xip_iprefetch();
960 local_irq_enable(); 960 local_irq_enable();
961 mutex_unlock(&chip->mutex); 961 mutex_unlock(&chip->mutex);
962 xip_iprefetch(); 962 xip_iprefetch();
963 cond_resched(); 963 cond_resched();
964 964
965 /* 965 /*
966 * We're back. However someone else might have 966 * We're back. However someone else might have
967 * decided to go write to the chip if we are in 967 * decided to go write to the chip if we are in
968 * a suspended erase state. If so let's wait 968 * a suspended erase state. If so let's wait
969 * until it's done. 969 * until it's done.
970 */ 970 */
971 mutex_lock(&chip->mutex); 971 mutex_lock(&chip->mutex);
972 while (chip->state != FL_XIP_WHILE_ERASING) { 972 while (chip->state != FL_XIP_WHILE_ERASING) {
973 DECLARE_WAITQUEUE(wait, current); 973 DECLARE_WAITQUEUE(wait, current);
974 set_current_state(TASK_UNINTERRUPTIBLE); 974 set_current_state(TASK_UNINTERRUPTIBLE);
975 add_wait_queue(&chip->wq, &wait); 975 add_wait_queue(&chip->wq, &wait);
976 mutex_unlock(&chip->mutex); 976 mutex_unlock(&chip->mutex);
977 schedule(); 977 schedule();
978 remove_wait_queue(&chip->wq, &wait); 978 remove_wait_queue(&chip->wq, &wait);
979 mutex_lock(&chip->mutex); 979 mutex_lock(&chip->mutex);
980 } 980 }
981 /* Disallow XIP again */ 981 /* Disallow XIP again */
982 local_irq_disable(); 982 local_irq_disable();
983 983
984 /* Correct Erase Suspend Hangups for M29EW */ 984 /* Correct Erase Suspend Hangups for M29EW */
985 cfi_fixup_m29ew_erase_suspend(map, adr); 985 cfi_fixup_m29ew_erase_suspend(map, adr);
986 /* Resume the write or erase operation */ 986 /* Resume the write or erase operation */
987 map_write(map, cfi->sector_erase_cmd, adr); 987 map_write(map, cfi->sector_erase_cmd, adr);
988 chip->state = oldstate; 988 chip->state = oldstate;
989 start = xip_currtime(); 989 start = xip_currtime();
990 } else if (usec >= 1000000/HZ) { 990 } else if (usec >= 1000000/HZ) {
991 /* 991 /*
992 * Try to save on CPU power when waiting delay 992 * Try to save on CPU power when waiting delay
993 * is at least a system timer tick period. 993 * is at least a system timer tick period.
994 * No need to be extremely accurate here. 994 * No need to be extremely accurate here.
995 */ 995 */
996 xip_cpu_idle(); 996 xip_cpu_idle();
997 } 997 }
998 status = map_read(map, adr); 998 status = map_read(map, adr);
999 } while (!map_word_andequal(map, status, OK, OK) 999 } while (!map_word_andequal(map, status, OK, OK)
1000 && xip_elapsed_since(start) < usec); 1000 && xip_elapsed_since(start) < usec);
1001 } 1001 }
1002 1002
1003 #define UDELAY(map, chip, adr, usec) xip_udelay(map, chip, adr, usec) 1003 #define UDELAY(map, chip, adr, usec) xip_udelay(map, chip, adr, usec)
1004 1004
1005 /* 1005 /*
1006 * The INVALIDATE_CACHED_RANGE() macro is normally used in parallel while 1006 * The INVALIDATE_CACHED_RANGE() macro is normally used in parallel while
1007 * the flash is actively programming or erasing since we have to poll for 1007 * the flash is actively programming or erasing since we have to poll for
1008 * the operation to complete anyway. We can't do that in a generic way with 1008 * the operation to complete anyway. We can't do that in a generic way with
1009 * a XIP setup so do it before the actual flash operation in this case 1009 * a XIP setup so do it before the actual flash operation in this case
1010 * and stub it out from INVALIDATE_CACHE_UDELAY. 1010 * and stub it out from INVALIDATE_CACHE_UDELAY.
1011 */ 1011 */
1012 #define XIP_INVAL_CACHED_RANGE(map, from, size) \ 1012 #define XIP_INVAL_CACHED_RANGE(map, from, size) \
1013 INVALIDATE_CACHED_RANGE(map, from, size) 1013 INVALIDATE_CACHED_RANGE(map, from, size)
1014 1014
1015 #define INVALIDATE_CACHE_UDELAY(map, chip, adr, len, usec) \ 1015 #define INVALIDATE_CACHE_UDELAY(map, chip, adr, len, usec) \
1016 UDELAY(map, chip, adr, usec) 1016 UDELAY(map, chip, adr, usec)
1017 1017
1018 /* 1018 /*
1019 * Extra notes: 1019 * Extra notes:
1020 * 1020 *
1021 * Activating this XIP support changes the way the code works a bit. For 1021 * Activating this XIP support changes the way the code works a bit. For
1022 * example the code to suspend the current process when concurrent access 1022 * example the code to suspend the current process when concurrent access
1023 * happens is never executed because xip_udelay() will always return with the 1023 * happens is never executed because xip_udelay() will always return with the
1024 * same chip state as it was entered with. This is why there is no care for 1024 * same chip state as it was entered with. This is why there is no care for
1025 * the presence of add_wait_queue() or schedule() calls from within a couple 1025 * the presence of add_wait_queue() or schedule() calls from within a couple
1026 * xip_disable()'d areas of code, like in do_erase_oneblock for example. 1026 * xip_disable()'d areas of code, like in do_erase_oneblock for example.
1027 * The queueing and scheduling are always happening within xip_udelay(). 1027 * The queueing and scheduling are always happening within xip_udelay().
1028 * 1028 *
1029 * Similarly, get_chip() and put_chip() just happen to always be executed 1029 * Similarly, get_chip() and put_chip() just happen to always be executed
1030 * with chip->state set to FL_READY (or FL_XIP_WHILE_*) where flash state 1030 * with chip->state set to FL_READY (or FL_XIP_WHILE_*) where flash state
1031 * is in array mode, therefore never executing many cases therein and not 1031 * is in array mode, therefore never executing many cases therein and not
1032 * causing any problem with XIP. 1032 * causing any problem with XIP.
1033 */ 1033 */
1034 1034
1035 #else 1035 #else
1036 1036
1037 #define xip_disable(map, chip, adr) 1037 #define xip_disable(map, chip, adr)
1038 #define xip_enable(map, chip, adr) 1038 #define xip_enable(map, chip, adr)
1039 #define XIP_INVAL_CACHED_RANGE(x...) 1039 #define XIP_INVAL_CACHED_RANGE(x...)
1040 1040
1041 #define UDELAY(map, chip, adr, usec) \ 1041 #define UDELAY(map, chip, adr, usec) \
1042 do { \ 1042 do { \
1043 mutex_unlock(&chip->mutex); \ 1043 mutex_unlock(&chip->mutex); \
1044 cfi_udelay(usec); \ 1044 cfi_udelay(usec); \
1045 mutex_lock(&chip->mutex); \ 1045 mutex_lock(&chip->mutex); \
1046 } while (0) 1046 } while (0)
1047 1047
1048 #define INVALIDATE_CACHE_UDELAY(map, chip, adr, len, usec) \ 1048 #define INVALIDATE_CACHE_UDELAY(map, chip, adr, len, usec) \
1049 do { \ 1049 do { \
1050 mutex_unlock(&chip->mutex); \ 1050 mutex_unlock(&chip->mutex); \
1051 INVALIDATE_CACHED_RANGE(map, adr, len); \ 1051 INVALIDATE_CACHED_RANGE(map, adr, len); \
1052 cfi_udelay(usec); \ 1052 cfi_udelay(usec); \
1053 mutex_lock(&chip->mutex); \ 1053 mutex_lock(&chip->mutex); \
1054 } while (0) 1054 } while (0)
1055 1055
1056 #endif 1056 #endif
1057 1057
1058 static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf) 1058 static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
1059 { 1059 {
1060 unsigned long cmd_addr; 1060 unsigned long cmd_addr;
1061 struct cfi_private *cfi = map->fldrv_priv; 1061 struct cfi_private *cfi = map->fldrv_priv;
1062 int ret; 1062 int ret;
1063 1063
1064 adr += chip->start; 1064 adr += chip->start;
1065 1065
1066 /* Ensure cmd read/writes are aligned. */ 1066 /* Ensure cmd read/writes are aligned. */
1067 cmd_addr = adr & ~(map_bankwidth(map)-1); 1067 cmd_addr = adr & ~(map_bankwidth(map)-1);
1068 1068
1069 mutex_lock(&chip->mutex); 1069 mutex_lock(&chip->mutex);
1070 ret = get_chip(map, chip, cmd_addr, FL_READY); 1070 ret = get_chip(map, chip, cmd_addr, FL_READY);
1071 if (ret) { 1071 if (ret) {
1072 mutex_unlock(&chip->mutex); 1072 mutex_unlock(&chip->mutex);
1073 return ret; 1073 return ret;
1074 } 1074 }
1075 1075
1076 if (chip->state != FL_POINT && chip->state != FL_READY) { 1076 if (chip->state != FL_POINT && chip->state != FL_READY) {
1077 map_write(map, CMD(0xf0), cmd_addr); 1077 map_write(map, CMD(0xf0), cmd_addr);
1078 chip->state = FL_READY; 1078 chip->state = FL_READY;
1079 } 1079 }
1080 1080
1081 map_copy_from(map, buf, adr, len); 1081 map_copy_from(map, buf, adr, len);
1082 1082
1083 put_chip(map, chip, cmd_addr); 1083 put_chip(map, chip, cmd_addr);
1084 1084
1085 mutex_unlock(&chip->mutex); 1085 mutex_unlock(&chip->mutex);
1086 return 0; 1086 return 0;
1087 } 1087 }
1088 1088
1089 1089
1090 static int cfi_amdstd_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf) 1090 static int cfi_amdstd_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
1091 { 1091 {
1092 struct map_info *map = mtd->priv; 1092 struct map_info *map = mtd->priv;
1093 struct cfi_private *cfi = map->fldrv_priv; 1093 struct cfi_private *cfi = map->fldrv_priv;
1094 unsigned long ofs; 1094 unsigned long ofs;
1095 int chipnum; 1095 int chipnum;
1096 int ret = 0; 1096 int ret = 0;
1097 1097
1098 /* ofs: offset within the first chip that the first read should start */ 1098 /* ofs: offset within the first chip that the first read should start */
1099 chipnum = (from >> cfi->chipshift); 1099 chipnum = (from >> cfi->chipshift);
1100 ofs = from - (chipnum << cfi->chipshift); 1100 ofs = from - (chipnum << cfi->chipshift);
1101 1101
1102 while (len) { 1102 while (len) {
1103 unsigned long thislen; 1103 unsigned long thislen;
1104 1104
1105 if (chipnum >= cfi->numchips) 1105 if (chipnum >= cfi->numchips)
1106 break; 1106 break;
1107 1107
1108 if ((len + ofs -1) >> cfi->chipshift) 1108 if ((len + ofs -1) >> cfi->chipshift)
1109 thislen = (1<<cfi->chipshift) - ofs; 1109 thislen = (1<<cfi->chipshift) - ofs;
1110 else 1110 else
1111 thislen = len; 1111 thislen = len;
1112 1112
1113 ret = do_read_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf); 1113 ret = do_read_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
1114 if (ret) 1114 if (ret)
1115 break; 1115 break;
1116 1116
1117 *retlen += thislen; 1117 *retlen += thislen;
1118 len -= thislen; 1118 len -= thislen;
1119 buf += thislen; 1119 buf += thislen;
1120 1120
1121 ofs = 0; 1121 ofs = 0;
1122 chipnum++; 1122 chipnum++;
1123 } 1123 }
1124 return ret; 1124 return ret;
1125 } 1125 }
1126 1126
1127 1127
1128 static inline int do_read_secsi_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf) 1128 static inline int do_read_secsi_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
1129 { 1129 {
1130 DECLARE_WAITQUEUE(wait, current); 1130 DECLARE_WAITQUEUE(wait, current);
1131 unsigned long timeo = jiffies + HZ; 1131 unsigned long timeo = jiffies + HZ;
1132 struct cfi_private *cfi = map->fldrv_priv; 1132 struct cfi_private *cfi = map->fldrv_priv;
1133 1133
1134 retry: 1134 retry:
1135 mutex_lock(&chip->mutex); 1135 mutex_lock(&chip->mutex);
1136 1136
1137 if (chip->state != FL_READY){ 1137 if (chip->state != FL_READY){
1138 set_current_state(TASK_UNINTERRUPTIBLE); 1138 set_current_state(TASK_UNINTERRUPTIBLE);
1139 add_wait_queue(&chip->wq, &wait); 1139 add_wait_queue(&chip->wq, &wait);
1140 1140
1141 mutex_unlock(&chip->mutex); 1141 mutex_unlock(&chip->mutex);
1142 1142
1143 schedule(); 1143 schedule();
1144 remove_wait_queue(&chip->wq, &wait); 1144 remove_wait_queue(&chip->wq, &wait);
1145 timeo = jiffies + HZ; 1145 timeo = jiffies + HZ;
1146 1146
1147 goto retry; 1147 goto retry;
1148 } 1148 }
1149 1149
1150 adr += chip->start; 1150 adr += chip->start;
1151 1151
1152 chip->state = FL_READY; 1152 chip->state = FL_READY;
1153 1153
1154 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 1154 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1155 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL); 1155 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1156 cfi_send_gen_cmd(0x88, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 1156 cfi_send_gen_cmd(0x88, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1157 1157
1158 map_copy_from(map, buf, adr, len); 1158 map_copy_from(map, buf, adr, len);
1159 1159
1160 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 1160 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1161 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL); 1161 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1162 cfi_send_gen_cmd(0x90, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 1162 cfi_send_gen_cmd(0x90, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1163 cfi_send_gen_cmd(0x00, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 1163 cfi_send_gen_cmd(0x00, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1164 1164
1165 wake_up(&chip->wq); 1165 wake_up(&chip->wq);
1166 mutex_unlock(&chip->mutex); 1166 mutex_unlock(&chip->mutex);
1167 1167
1168 return 0; 1168 return 0;
1169 } 1169 }
1170 1170
1171 static int cfi_amdstd_secsi_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf) 1171 static int cfi_amdstd_secsi_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
1172 { 1172 {
1173 struct map_info *map = mtd->priv; 1173 struct map_info *map = mtd->priv;
1174 struct cfi_private *cfi = map->fldrv_priv; 1174 struct cfi_private *cfi = map->fldrv_priv;
1175 unsigned long ofs; 1175 unsigned long ofs;
1176 int chipnum; 1176 int chipnum;
1177 int ret = 0; 1177 int ret = 0;
1178 1178
1179 /* ofs: offset within the first chip that the first read should start */ 1179 /* ofs: offset within the first chip that the first read should start */
1180 /* 8 secsi bytes per chip */ 1180 /* 8 secsi bytes per chip */
1181 chipnum=from>>3; 1181 chipnum=from>>3;
1182 ofs=from & 7; 1182 ofs=from & 7;
1183 1183
1184 while (len) { 1184 while (len) {
1185 unsigned long thislen; 1185 unsigned long thislen;
1186 1186
1187 if (chipnum >= cfi->numchips) 1187 if (chipnum >= cfi->numchips)
1188 break; 1188 break;
1189 1189
1190 if ((len + ofs -1) >> 3) 1190 if ((len + ofs -1) >> 3)
1191 thislen = (1<<3) - ofs; 1191 thislen = (1<<3) - ofs;
1192 else 1192 else
1193 thislen = len; 1193 thislen = len;
1194 1194
1195 ret = do_read_secsi_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf); 1195 ret = do_read_secsi_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
1196 if (ret) 1196 if (ret)
1197 break; 1197 break;
1198 1198
1199 *retlen += thislen; 1199 *retlen += thislen;
1200 len -= thislen; 1200 len -= thislen;
1201 buf += thislen; 1201 buf += thislen;
1202 1202
1203 ofs = 0; 1203 ofs = 0;
1204 chipnum++; 1204 chipnum++;
1205 } 1205 }
1206 return ret; 1206 return ret;
1207 } 1207 }
1208 1208
1209 1209
1210 static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip, unsigned long adr, map_word datum) 1210 static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip, unsigned long adr, map_word datum)
1211 { 1211 {
1212 struct cfi_private *cfi = map->fldrv_priv; 1212 struct cfi_private *cfi = map->fldrv_priv;
1213 unsigned long timeo = jiffies + HZ; 1213 unsigned long timeo = jiffies + HZ;
1214 /* 1214 /*
1215 * We use a 1ms + 1 jiffies generic timeout for writes (most devices 1215 * We use a 1ms + 1 jiffies generic timeout for writes (most devices
1216 * have a max write time of a few hundreds usec). However, we should 1216 * have a max write time of a few hundreds usec). However, we should
1217 * use the maximum timeout value given by the chip at probe time 1217 * use the maximum timeout value given by the chip at probe time
1218 * instead. Unfortunately, struct flchip does have a field for 1218 * instead. Unfortunately, struct flchip does have a field for
1219 * maximum timeout, only for typical which can be far too short 1219 * maximum timeout, only for typical which can be far too short
1220 * depending of the conditions. The ' + 1' is to avoid having a 1220 * depending of the conditions. The ' + 1' is to avoid having a
1221 * timeout of 0 jiffies if HZ is smaller than 1000. 1221 * timeout of 0 jiffies if HZ is smaller than 1000.
1222 */ 1222 */
1223 unsigned long uWriteTimeout = ( HZ / 1000 ) + 1; 1223 unsigned long uWriteTimeout = ( HZ / 1000 ) + 1;
1224 int ret = 0; 1224 int ret = 0;
1225 map_word oldd; 1225 map_word oldd;
1226 int retry_cnt = 0; 1226 int retry_cnt = 0;
1227 1227
1228 adr += chip->start; 1228 adr += chip->start;
1229 1229
1230 mutex_lock(&chip->mutex); 1230 mutex_lock(&chip->mutex);
1231 ret = get_chip(map, chip, adr, FL_WRITING); 1231 ret = get_chip(map, chip, adr, FL_WRITING);
1232 if (ret) { 1232 if (ret) {
1233 mutex_unlock(&chip->mutex); 1233 mutex_unlock(&chip->mutex);
1234 return ret; 1234 return ret;
1235 } 1235 }
1236 1236
1237 pr_debug("MTD %s(): WRITE 0x%.8lx(0x%.8lx)\n", 1237 pr_debug("MTD %s(): WRITE 0x%.8lx(0x%.8lx)\n",
1238 __func__, adr, datum.x[0] ); 1238 __func__, adr, datum.x[0] );
1239 1239
1240 /* 1240 /*
1241 * Check for a NOP for the case when the datum to write is already 1241 * Check for a NOP for the case when the datum to write is already
1242 * present - it saves time and works around buggy chips that corrupt 1242 * present - it saves time and works around buggy chips that corrupt
1243 * data at other locations when 0xff is written to a location that 1243 * data at other locations when 0xff is written to a location that
1244 * already contains 0xff. 1244 * already contains 0xff.
1245 */ 1245 */
1246 oldd = map_read(map, adr); 1246 oldd = map_read(map, adr);
1247 if (map_word_equal(map, oldd, datum)) { 1247 if (map_word_equal(map, oldd, datum)) {
1248 pr_debug("MTD %s(): NOP\n", 1248 pr_debug("MTD %s(): NOP\n",
1249 __func__); 1249 __func__);
1250 goto op_done; 1250 goto op_done;
1251 } 1251 }
1252 1252
1253 XIP_INVAL_CACHED_RANGE(map, adr, map_bankwidth(map)); 1253 XIP_INVAL_CACHED_RANGE(map, adr, map_bankwidth(map));
1254 ENABLE_VPP(map); 1254 ENABLE_VPP(map);
1255 xip_disable(map, chip, adr); 1255 xip_disable(map, chip, adr);
1256 retry: 1256 retry:
1257 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 1257 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1258 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL); 1258 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1259 cfi_send_gen_cmd(0xA0, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 1259 cfi_send_gen_cmd(0xA0, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1260 map_write(map, datum, adr); 1260 map_write(map, datum, adr);
1261 chip->state = FL_WRITING; 1261 chip->state = FL_WRITING;
1262 1262
1263 INVALIDATE_CACHE_UDELAY(map, chip, 1263 INVALIDATE_CACHE_UDELAY(map, chip,
1264 adr, map_bankwidth(map), 1264 adr, map_bankwidth(map),
1265 chip->word_write_time); 1265 chip->word_write_time);
1266 1266
1267 /* See comment above for timeout value. */ 1267 /* See comment above for timeout value. */
1268 timeo = jiffies + uWriteTimeout; 1268 timeo = jiffies + uWriteTimeout;
1269 for (;;) { 1269 for (;;) {
1270 if (chip->state != FL_WRITING) { 1270 if (chip->state != FL_WRITING) {
1271 /* Someone's suspended the write. Sleep */ 1271 /* Someone's suspended the write. Sleep */
1272 DECLARE_WAITQUEUE(wait, current); 1272 DECLARE_WAITQUEUE(wait, current);
1273 1273
1274 set_current_state(TASK_UNINTERRUPTIBLE); 1274 set_current_state(TASK_UNINTERRUPTIBLE);
1275 add_wait_queue(&chip->wq, &wait); 1275 add_wait_queue(&chip->wq, &wait);
1276 mutex_unlock(&chip->mutex); 1276 mutex_unlock(&chip->mutex);
1277 schedule(); 1277 schedule();
1278 remove_wait_queue(&chip->wq, &wait); 1278 remove_wait_queue(&chip->wq, &wait);
1279 timeo = jiffies + (HZ / 2); /* FIXME */ 1279 timeo = jiffies + (HZ / 2); /* FIXME */
1280 mutex_lock(&chip->mutex); 1280 mutex_lock(&chip->mutex);
1281 continue; 1281 continue;
1282 } 1282 }
1283 1283
1284 if (time_after(jiffies, timeo) && !chip_ready(map, adr)){ 1284 if (time_after(jiffies, timeo) && !chip_ready(map, adr)){
1285 xip_enable(map, chip, adr); 1285 xip_enable(map, chip, adr);
1286 printk(KERN_WARNING "MTD %s(): software timeout\n", __func__); 1286 printk(KERN_WARNING "MTD %s(): software timeout\n", __func__);
1287 xip_disable(map, chip, adr); 1287 xip_disable(map, chip, adr);
1288 break; 1288 break;
1289 } 1289 }
1290 1290
1291 if (chip_ready(map, adr)) 1291 if (chip_ready(map, adr))
1292 break; 1292 break;
1293 1293
1294 /* Latency issues. Drop the lock, wait a while and retry */ 1294 /* Latency issues. Drop the lock, wait a while and retry */
1295 UDELAY(map, chip, adr, 1); 1295 UDELAY(map, chip, adr, 1);
1296 } 1296 }
1297 /* Did we succeed? */ 1297 /* Did we succeed? */
1298 if (!chip_good(map, adr, datum)) { 1298 if (!chip_good(map, adr, datum)) {
1299 /* reset on all failures. */ 1299 /* reset on all failures. */
1300 map_write( map, CMD(0xF0), chip->start ); 1300 map_write( map, CMD(0xF0), chip->start );
1301 /* FIXME - should have reset delay before continuing */ 1301 /* FIXME - should have reset delay before continuing */
1302 1302
1303 if (++retry_cnt <= MAX_WORD_RETRIES) 1303 if (++retry_cnt <= MAX_WORD_RETRIES)
1304 goto retry; 1304 goto retry;
1305 1305
1306 ret = -EIO; 1306 ret = -EIO;
1307 } 1307 }
1308 xip_enable(map, chip, adr); 1308 xip_enable(map, chip, adr);
1309 op_done: 1309 op_done:
1310 chip->state = FL_READY; 1310 chip->state = FL_READY;
1311 DISABLE_VPP(map); 1311 DISABLE_VPP(map);
1312 put_chip(map, chip, adr); 1312 put_chip(map, chip, adr);
1313 mutex_unlock(&chip->mutex); 1313 mutex_unlock(&chip->mutex);
1314 1314
1315 return ret; 1315 return ret;
1316 } 1316 }
1317 1317
1318 1318
1319 static int cfi_amdstd_write_words(struct mtd_info *mtd, loff_t to, size_t len, 1319 static int cfi_amdstd_write_words(struct mtd_info *mtd, loff_t to, size_t len,
1320 size_t *retlen, const u_char *buf) 1320 size_t *retlen, const u_char *buf)
1321 { 1321 {
1322 struct map_info *map = mtd->priv; 1322 struct map_info *map = mtd->priv;
1323 struct cfi_private *cfi = map->fldrv_priv; 1323 struct cfi_private *cfi = map->fldrv_priv;
1324 int ret = 0; 1324 int ret = 0;
1325 int chipnum; 1325 int chipnum;
1326 unsigned long ofs, chipstart; 1326 unsigned long ofs, chipstart;
1327 DECLARE_WAITQUEUE(wait, current); 1327 DECLARE_WAITQUEUE(wait, current);
1328 1328
1329 chipnum = to >> cfi->chipshift; 1329 chipnum = to >> cfi->chipshift;
1330 ofs = to - (chipnum << cfi->chipshift); 1330 ofs = to - (chipnum << cfi->chipshift);
1331 chipstart = cfi->chips[chipnum].start; 1331 chipstart = cfi->chips[chipnum].start;
1332 1332
1333 /* If it's not bus-aligned, do the first byte write */ 1333 /* If it's not bus-aligned, do the first byte write */
1334 if (ofs & (map_bankwidth(map)-1)) { 1334 if (ofs & (map_bankwidth(map)-1)) {
1335 unsigned long bus_ofs = ofs & ~(map_bankwidth(map)-1); 1335 unsigned long bus_ofs = ofs & ~(map_bankwidth(map)-1);
1336 int i = ofs - bus_ofs; 1336 int i = ofs - bus_ofs;
1337 int n = 0; 1337 int n = 0;
1338 map_word tmp_buf; 1338 map_word tmp_buf;
1339 1339
1340 retry: 1340 retry:
1341 mutex_lock(&cfi->chips[chipnum].mutex); 1341 mutex_lock(&cfi->chips[chipnum].mutex);
1342 1342
1343 if (cfi->chips[chipnum].state != FL_READY) { 1343 if (cfi->chips[chipnum].state != FL_READY) {
1344 set_current_state(TASK_UNINTERRUPTIBLE); 1344 set_current_state(TASK_UNINTERRUPTIBLE);
1345 add_wait_queue(&cfi->chips[chipnum].wq, &wait); 1345 add_wait_queue(&cfi->chips[chipnum].wq, &wait);
1346 1346
1347 mutex_unlock(&cfi->chips[chipnum].mutex); 1347 mutex_unlock(&cfi->chips[chipnum].mutex);
1348 1348
1349 schedule(); 1349 schedule();
1350 remove_wait_queue(&cfi->chips[chipnum].wq, &wait); 1350 remove_wait_queue(&cfi->chips[chipnum].wq, &wait);
1351 goto retry; 1351 goto retry;
1352 } 1352 }
1353 1353
1354 /* Load 'tmp_buf' with old contents of flash */ 1354 /* Load 'tmp_buf' with old contents of flash */
1355 tmp_buf = map_read(map, bus_ofs+chipstart); 1355 tmp_buf = map_read(map, bus_ofs+chipstart);
1356 1356
1357 mutex_unlock(&cfi->chips[chipnum].mutex); 1357 mutex_unlock(&cfi->chips[chipnum].mutex);
1358 1358
1359 /* Number of bytes to copy from buffer */ 1359 /* Number of bytes to copy from buffer */
1360 n = min_t(int, len, map_bankwidth(map)-i); 1360 n = min_t(int, len, map_bankwidth(map)-i);
1361 1361
1362 tmp_buf = map_word_load_partial(map, tmp_buf, buf, i, n); 1362 tmp_buf = map_word_load_partial(map, tmp_buf, buf, i, n);
1363 1363
1364 ret = do_write_oneword(map, &cfi->chips[chipnum], 1364 ret = do_write_oneword(map, &cfi->chips[chipnum],
1365 bus_ofs, tmp_buf); 1365 bus_ofs, tmp_buf);
1366 if (ret) 1366 if (ret)
1367 return ret; 1367 return ret;
1368 1368
1369 ofs += n; 1369 ofs += n;
1370 buf += n; 1370 buf += n;
1371 (*retlen) += n; 1371 (*retlen) += n;
1372 len -= n; 1372 len -= n;
1373 1373
1374 if (ofs >> cfi->chipshift) { 1374 if (ofs >> cfi->chipshift) {
1375 chipnum ++; 1375 chipnum ++;
1376 ofs = 0; 1376 ofs = 0;
1377 if (chipnum == cfi->numchips) 1377 if (chipnum == cfi->numchips)
1378 return 0; 1378 return 0;
1379 } 1379 }
1380 } 1380 }
1381 1381
1382 /* We are now aligned, write as much as possible */ 1382 /* We are now aligned, write as much as possible */
1383 while(len >= map_bankwidth(map)) { 1383 while(len >= map_bankwidth(map)) {
1384 map_word datum; 1384 map_word datum;
1385 1385
1386 datum = map_word_load(map, buf); 1386 datum = map_word_load(map, buf);
1387 1387
1388 ret = do_write_oneword(map, &cfi->chips[chipnum], 1388 ret = do_write_oneword(map, &cfi->chips[chipnum],
1389 ofs, datum); 1389 ofs, datum);
1390 if (ret) 1390 if (ret)
1391 return ret; 1391 return ret;
1392 1392
1393 ofs += map_bankwidth(map); 1393 ofs += map_bankwidth(map);
1394 buf += map_bankwidth(map); 1394 buf += map_bankwidth(map);
1395 (*retlen) += map_bankwidth(map); 1395 (*retlen) += map_bankwidth(map);
1396 len -= map_bankwidth(map); 1396 len -= map_bankwidth(map);
1397 1397
1398 if (ofs >> cfi->chipshift) { 1398 if (ofs >> cfi->chipshift) {
1399 chipnum ++; 1399 chipnum ++;
1400 ofs = 0; 1400 ofs = 0;
1401 if (chipnum == cfi->numchips) 1401 if (chipnum == cfi->numchips)
1402 return 0; 1402 return 0;
1403 chipstart = cfi->chips[chipnum].start; 1403 chipstart = cfi->chips[chipnum].start;
1404 } 1404 }
1405 } 1405 }
1406 1406
1407 /* Write the trailing bytes if any */ 1407 /* Write the trailing bytes if any */
1408 if (len & (map_bankwidth(map)-1)) { 1408 if (len & (map_bankwidth(map)-1)) {
1409 map_word tmp_buf; 1409 map_word tmp_buf;
1410 1410
1411 retry1: 1411 retry1:
1412 mutex_lock(&cfi->chips[chipnum].mutex); 1412 mutex_lock(&cfi->chips[chipnum].mutex);
1413 1413
1414 if (cfi->chips[chipnum].state != FL_READY) { 1414 if (cfi->chips[chipnum].state != FL_READY) {
1415 set_current_state(TASK_UNINTERRUPTIBLE); 1415 set_current_state(TASK_UNINTERRUPTIBLE);
1416 add_wait_queue(&cfi->chips[chipnum].wq, &wait); 1416 add_wait_queue(&cfi->chips[chipnum].wq, &wait);
1417 1417
1418 mutex_unlock(&cfi->chips[chipnum].mutex); 1418 mutex_unlock(&cfi->chips[chipnum].mutex);
1419 1419
1420 schedule(); 1420 schedule();
1421 remove_wait_queue(&cfi->chips[chipnum].wq, &wait); 1421 remove_wait_queue(&cfi->chips[chipnum].wq, &wait);
1422 goto retry1; 1422 goto retry1;
1423 } 1423 }
1424 1424
1425 tmp_buf = map_read(map, ofs + chipstart); 1425 tmp_buf = map_read(map, ofs + chipstart);
1426 1426
1427 mutex_unlock(&cfi->chips[chipnum].mutex); 1427 mutex_unlock(&cfi->chips[chipnum].mutex);
1428 1428
1429 tmp_buf = map_word_load_partial(map, tmp_buf, buf, 0, len); 1429 tmp_buf = map_word_load_partial(map, tmp_buf, buf, 0, len);
1430 1430
1431 ret = do_write_oneword(map, &cfi->chips[chipnum], 1431 ret = do_write_oneword(map, &cfi->chips[chipnum],
1432 ofs, tmp_buf); 1432 ofs, tmp_buf);
1433 if (ret) 1433 if (ret)
1434 return ret; 1434 return ret;
1435 1435
1436 (*retlen) += len; 1436 (*retlen) += len;
1437 } 1437 }
1438 1438
1439 return 0; 1439 return 0;
1440 } 1440 }
1441 1441
1442 1442
1443 /* 1443 /*
1444 * FIXME: interleaved mode not tested, and probably not supported! 1444 * FIXME: interleaved mode not tested, and probably not supported!
1445 */ 1445 */
1446 static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip, 1446 static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
1447 unsigned long adr, const u_char *buf, 1447 unsigned long adr, const u_char *buf,
1448 int len) 1448 int len)
1449 { 1449 {
1450 struct cfi_private *cfi = map->fldrv_priv; 1450 struct cfi_private *cfi = map->fldrv_priv;
1451 unsigned long timeo = jiffies + HZ; 1451 unsigned long timeo = jiffies + HZ;
1452 /* see comments in do_write_oneword() regarding uWriteTimeo. */ 1452 /* see comments in do_write_oneword() regarding uWriteTimeo. */
1453 unsigned long uWriteTimeout = ( HZ / 1000 ) + 1; 1453 unsigned long uWriteTimeout = ( HZ / 1000 ) + 1;
1454 int ret = -EIO; 1454 int ret = -EIO;
1455 unsigned long cmd_adr; 1455 unsigned long cmd_adr;
1456 int z, words; 1456 int z, words;
1457 map_word datum; 1457 map_word datum;
1458 1458
1459 adr += chip->start; 1459 adr += chip->start;
1460 cmd_adr = adr; 1460 cmd_adr = adr;
1461 1461
1462 mutex_lock(&chip->mutex); 1462 mutex_lock(&chip->mutex);
1463 ret = get_chip(map, chip, adr, FL_WRITING); 1463 ret = get_chip(map, chip, adr, FL_WRITING);
1464 if (ret) { 1464 if (ret) {
1465 mutex_unlock(&chip->mutex); 1465 mutex_unlock(&chip->mutex);
1466 return ret; 1466 return ret;
1467 } 1467 }
1468 1468
1469 datum = map_word_load(map, buf); 1469 datum = map_word_load(map, buf);
1470 1470
1471 pr_debug("MTD %s(): WRITE 0x%.8lx(0x%.8lx)\n", 1471 pr_debug("MTD %s(): WRITE 0x%.8lx(0x%.8lx)\n",
1472 __func__, adr, datum.x[0] ); 1472 __func__, adr, datum.x[0] );
1473 1473
1474 XIP_INVAL_CACHED_RANGE(map, adr, len); 1474 XIP_INVAL_CACHED_RANGE(map, adr, len);
1475 ENABLE_VPP(map); 1475 ENABLE_VPP(map);
1476 xip_disable(map, chip, cmd_adr); 1476 xip_disable(map, chip, cmd_adr);
1477 1477
1478 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 1478 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1479 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL); 1479 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1480 1480
1481 /* Write Buffer Load */ 1481 /* Write Buffer Load */
1482 map_write(map, CMD(0x25), cmd_adr); 1482 map_write(map, CMD(0x25), cmd_adr);
1483 1483
1484 chip->state = FL_WRITING_TO_BUFFER; 1484 chip->state = FL_WRITING_TO_BUFFER;
1485 1485
1486 /* Write length of data to come */ 1486 /* Write length of data to come */
1487 words = len / map_bankwidth(map); 1487 words = len / map_bankwidth(map);
1488 map_write(map, CMD(words - 1), cmd_adr); 1488 map_write(map, CMD(words - 1), cmd_adr);
1489 /* Write data */ 1489 /* Write data */
1490 z = 0; 1490 z = 0;
1491 while(z < words * map_bankwidth(map)) { 1491 while(z < words * map_bankwidth(map)) {
1492 datum = map_word_load(map, buf); 1492 datum = map_word_load(map, buf);
1493 map_write(map, datum, adr + z); 1493 map_write(map, datum, adr + z);
1494 1494
1495 z += map_bankwidth(map); 1495 z += map_bankwidth(map);
1496 buf += map_bankwidth(map); 1496 buf += map_bankwidth(map);
1497 } 1497 }
1498 z -= map_bankwidth(map); 1498 z -= map_bankwidth(map);
1499 1499
1500 adr += z; 1500 adr += z;
1501 1501
1502 /* Write Buffer Program Confirm: GO GO GO */ 1502 /* Write Buffer Program Confirm: GO GO GO */
1503 map_write(map, CMD(0x29), cmd_adr); 1503 map_write(map, CMD(0x29), cmd_adr);
1504 chip->state = FL_WRITING; 1504 chip->state = FL_WRITING;
1505 1505
1506 INVALIDATE_CACHE_UDELAY(map, chip, 1506 INVALIDATE_CACHE_UDELAY(map, chip,
1507 adr, map_bankwidth(map), 1507 adr, map_bankwidth(map),
1508 chip->word_write_time); 1508 chip->word_write_time);
1509 1509
1510 timeo = jiffies + uWriteTimeout; 1510 timeo = jiffies + uWriteTimeout;
1511 1511
1512 for (;;) { 1512 for (;;) {
1513 if (chip->state != FL_WRITING) { 1513 if (chip->state != FL_WRITING) {
1514 /* Someone's suspended the write. Sleep */ 1514 /* Someone's suspended the write. Sleep */
1515 DECLARE_WAITQUEUE(wait, current); 1515 DECLARE_WAITQUEUE(wait, current);
1516 1516
1517 set_current_state(TASK_UNINTERRUPTIBLE); 1517 set_current_state(TASK_UNINTERRUPTIBLE);
1518 add_wait_queue(&chip->wq, &wait); 1518 add_wait_queue(&chip->wq, &wait);
1519 mutex_unlock(&chip->mutex); 1519 mutex_unlock(&chip->mutex);
1520 schedule(); 1520 schedule();
1521 remove_wait_queue(&chip->wq, &wait); 1521 remove_wait_queue(&chip->wq, &wait);
1522 timeo = jiffies + (HZ / 2); /* FIXME */ 1522 timeo = jiffies + (HZ / 2); /* FIXME */
1523 mutex_lock(&chip->mutex); 1523 mutex_lock(&chip->mutex);
1524 continue; 1524 continue;
1525 } 1525 }
1526 1526
1527 if (time_after(jiffies, timeo) && !chip_ready(map, adr)) 1527 if (time_after(jiffies, timeo) && !chip_ready(map, adr))
1528 break; 1528 break;
1529 1529
1530 if (chip_ready(map, adr)) { 1530 if (chip_ready(map, adr)) {
1531 xip_enable(map, chip, adr); 1531 xip_enable(map, chip, adr);
1532 goto op_done; 1532 goto op_done;
1533 } 1533 }
1534 1534
1535 /* Latency issues. Drop the lock, wait a while and retry */ 1535 /* Latency issues. Drop the lock, wait a while and retry */
1536 UDELAY(map, chip, adr, 1); 1536 UDELAY(map, chip, adr, 1);
1537 } 1537 }
1538 1538
1539 /* reset on all failures. */ 1539 /*
1540 map_write( map, CMD(0xF0), chip->start ); 1540 * Recovery from write-buffer programming failures requires
1541 * the write-to-buffer-reset sequence. Since the last part
1542 * of the sequence also works as a normal reset, we can run
1543 * the same commands regardless of why we are here.
1544 * See e.g.
1545 * http://www.spansion.com/Support/Application%20Notes/MirrorBit_Write_Buffer_Prog_Page_Buffer_Read_AN.pdf
1546 */
1547 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
1548 cfi->device_type, NULL);
1549 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi,
1550 cfi->device_type, NULL);
1551 cfi_send_gen_cmd(0xF0, cfi->addr_unlock1, chip->start, map, cfi,
1552 cfi->device_type, NULL);
1541 xip_enable(map, chip, adr); 1553 xip_enable(map, chip, adr);
1542 /* FIXME - should have reset delay before continuing */ 1554 /* FIXME - should have reset delay before continuing */
1543 1555
1544 printk(KERN_WARNING "MTD %s(): software timeout\n", 1556 printk(KERN_WARNING "MTD %s(): software timeout\n",
1545 __func__ ); 1557 __func__ );
1546 1558
1547 ret = -EIO; 1559 ret = -EIO;
1548 op_done: 1560 op_done:
1549 chip->state = FL_READY; 1561 chip->state = FL_READY;
1550 DISABLE_VPP(map); 1562 DISABLE_VPP(map);
1551 put_chip(map, chip, adr); 1563 put_chip(map, chip, adr);
1552 mutex_unlock(&chip->mutex); 1564 mutex_unlock(&chip->mutex);
1553 1565
1554 return ret; 1566 return ret;
1555 } 1567 }
1556 1568
1557 1569
1558 static int cfi_amdstd_write_buffers(struct mtd_info *mtd, loff_t to, size_t len, 1570 static int cfi_amdstd_write_buffers(struct mtd_info *mtd, loff_t to, size_t len,
1559 size_t *retlen, const u_char *buf) 1571 size_t *retlen, const u_char *buf)
1560 { 1572 {
1561 struct map_info *map = mtd->priv; 1573 struct map_info *map = mtd->priv;
1562 struct cfi_private *cfi = map->fldrv_priv; 1574 struct cfi_private *cfi = map->fldrv_priv;
1563 int wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize; 1575 int wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
1564 int ret = 0; 1576 int ret = 0;
1565 int chipnum; 1577 int chipnum;
1566 unsigned long ofs; 1578 unsigned long ofs;
1567 1579
1568 chipnum = to >> cfi->chipshift; 1580 chipnum = to >> cfi->chipshift;
1569 ofs = to - (chipnum << cfi->chipshift); 1581 ofs = to - (chipnum << cfi->chipshift);
1570 1582
1571 /* If it's not bus-aligned, do the first word write */ 1583 /* If it's not bus-aligned, do the first word write */
1572 if (ofs & (map_bankwidth(map)-1)) { 1584 if (ofs & (map_bankwidth(map)-1)) {
1573 size_t local_len = (-ofs)&(map_bankwidth(map)-1); 1585 size_t local_len = (-ofs)&(map_bankwidth(map)-1);
1574 if (local_len > len) 1586 if (local_len > len)
1575 local_len = len; 1587 local_len = len;
1576 ret = cfi_amdstd_write_words(mtd, ofs + (chipnum<<cfi->chipshift), 1588 ret = cfi_amdstd_write_words(mtd, ofs + (chipnum<<cfi->chipshift),
1577 local_len, retlen, buf); 1589 local_len, retlen, buf);
1578 if (ret) 1590 if (ret)
1579 return ret; 1591 return ret;
1580 ofs += local_len; 1592 ofs += local_len;
1581 buf += local_len; 1593 buf += local_len;
1582 len -= local_len; 1594 len -= local_len;
1583 1595
1584 if (ofs >> cfi->chipshift) { 1596 if (ofs >> cfi->chipshift) {
1585 chipnum ++; 1597 chipnum ++;
1586 ofs = 0; 1598 ofs = 0;
1587 if (chipnum == cfi->numchips) 1599 if (chipnum == cfi->numchips)
1588 return 0; 1600 return 0;
1589 } 1601 }
1590 } 1602 }
1591 1603
1592 /* Write buffer is worth it only if more than one word to write... */ 1604 /* Write buffer is worth it only if more than one word to write... */
1593 while (len >= map_bankwidth(map) * 2) { 1605 while (len >= map_bankwidth(map) * 2) {
1594 /* We must not cross write block boundaries */ 1606 /* We must not cross write block boundaries */
1595 int size = wbufsize - (ofs & (wbufsize-1)); 1607 int size = wbufsize - (ofs & (wbufsize-1));
1596 1608
1597 if (size > len) 1609 if (size > len)
1598 size = len; 1610 size = len;
1599 if (size % map_bankwidth(map)) 1611 if (size % map_bankwidth(map))
1600 size -= size % map_bankwidth(map); 1612 size -= size % map_bankwidth(map);
1601 1613
1602 ret = do_write_buffer(map, &cfi->chips[chipnum], 1614 ret = do_write_buffer(map, &cfi->chips[chipnum],
1603 ofs, buf, size); 1615 ofs, buf, size);
1604 if (ret) 1616 if (ret)
1605 return ret; 1617 return ret;
1606 1618
1607 ofs += size; 1619 ofs += size;
1608 buf += size; 1620 buf += size;
1609 (*retlen) += size; 1621 (*retlen) += size;
1610 len -= size; 1622 len -= size;
1611 1623
1612 if (ofs >> cfi->chipshift) { 1624 if (ofs >> cfi->chipshift) {
1613 chipnum ++; 1625 chipnum ++;
1614 ofs = 0; 1626 ofs = 0;
1615 if (chipnum == cfi->numchips) 1627 if (chipnum == cfi->numchips)
1616 return 0; 1628 return 0;
1617 } 1629 }
1618 } 1630 }
1619 1631
1620 if (len) { 1632 if (len) {
1621 size_t retlen_dregs = 0; 1633 size_t retlen_dregs = 0;
1622 1634
1623 ret = cfi_amdstd_write_words(mtd, ofs + (chipnum<<cfi->chipshift), 1635 ret = cfi_amdstd_write_words(mtd, ofs + (chipnum<<cfi->chipshift),
1624 len, &retlen_dregs, buf); 1636 len, &retlen_dregs, buf);
1625 1637
1626 *retlen += retlen_dregs; 1638 *retlen += retlen_dregs;
1627 return ret; 1639 return ret;
1628 } 1640 }
1629 1641
1630 return 0; 1642 return 0;
1631 } 1643 }
1632 1644
1633 /* 1645 /*
1634 * Wait for the flash chip to become ready to write data 1646 * Wait for the flash chip to become ready to write data
1635 * 1647 *
1636 * This is only called during the panic_write() path. When panic_write() 1648 * This is only called during the panic_write() path. When panic_write()
1637 * is called, the kernel is in the process of a panic, and will soon be 1649 * is called, the kernel is in the process of a panic, and will soon be
1638 * dead. Therefore we don't take any locks, and attempt to get access 1650 * dead. Therefore we don't take any locks, and attempt to get access
1639 * to the chip as soon as possible. 1651 * to the chip as soon as possible.
1640 */ 1652 */
1641 static int cfi_amdstd_panic_wait(struct map_info *map, struct flchip *chip, 1653 static int cfi_amdstd_panic_wait(struct map_info *map, struct flchip *chip,
1642 unsigned long adr) 1654 unsigned long adr)
1643 { 1655 {
1644 struct cfi_private *cfi = map->fldrv_priv; 1656 struct cfi_private *cfi = map->fldrv_priv;
1645 int retries = 10; 1657 int retries = 10;
1646 int i; 1658 int i;
1647 1659
1648 /* 1660 /*
1649 * If the driver thinks the chip is idle, and no toggle bits 1661 * If the driver thinks the chip is idle, and no toggle bits
1650 * are changing, then the chip is actually idle for sure. 1662 * are changing, then the chip is actually idle for sure.
1651 */ 1663 */
1652 if (chip->state == FL_READY && chip_ready(map, adr)) 1664 if (chip->state == FL_READY && chip_ready(map, adr))
1653 return 0; 1665 return 0;
1654 1666
1655 /* 1667 /*
1656 * Try several times to reset the chip and then wait for it 1668 * Try several times to reset the chip and then wait for it
1657 * to become idle. The upper limit of a few milliseconds of 1669 * to become idle. The upper limit of a few milliseconds of
1658 * delay isn't a big problem: the kernel is dying anyway. It 1670 * delay isn't a big problem: the kernel is dying anyway. It
1659 * is more important to save the messages. 1671 * is more important to save the messages.
1660 */ 1672 */
1661 while (retries > 0) { 1673 while (retries > 0) {
1662 const unsigned long timeo = (HZ / 1000) + 1; 1674 const unsigned long timeo = (HZ / 1000) + 1;
1663 1675
1664 /* send the reset command */ 1676 /* send the reset command */
1665 map_write(map, CMD(0xF0), chip->start); 1677 map_write(map, CMD(0xF0), chip->start);
1666 1678
1667 /* wait for the chip to become ready */ 1679 /* wait for the chip to become ready */
1668 for (i = 0; i < jiffies_to_usecs(timeo); i++) { 1680 for (i = 0; i < jiffies_to_usecs(timeo); i++) {
1669 if (chip_ready(map, adr)) 1681 if (chip_ready(map, adr))
1670 return 0; 1682 return 0;
1671 1683
1672 udelay(1); 1684 udelay(1);
1673 } 1685 }
1674 } 1686 }
1675 1687
1676 /* the chip never became ready */ 1688 /* the chip never became ready */
1677 return -EBUSY; 1689 return -EBUSY;
1678 } 1690 }
1679 1691
1680 /* 1692 /*
1681 * Write out one word of data to a single flash chip during a kernel panic 1693 * Write out one word of data to a single flash chip during a kernel panic
1682 * 1694 *
1683 * This is only called during the panic_write() path. When panic_write() 1695 * This is only called during the panic_write() path. When panic_write()
1684 * is called, the kernel is in the process of a panic, and will soon be 1696 * is called, the kernel is in the process of a panic, and will soon be
1685 * dead. Therefore we don't take any locks, and attempt to get access 1697 * dead. Therefore we don't take any locks, and attempt to get access
1686 * to the chip as soon as possible. 1698 * to the chip as soon as possible.
1687 * 1699 *
1688 * The implementation of this routine is intentionally similar to 1700 * The implementation of this routine is intentionally similar to
1689 * do_write_oneword(), in order to ease code maintenance. 1701 * do_write_oneword(), in order to ease code maintenance.
1690 */ 1702 */
1691 static int do_panic_write_oneword(struct map_info *map, struct flchip *chip, 1703 static int do_panic_write_oneword(struct map_info *map, struct flchip *chip,
1692 unsigned long adr, map_word datum) 1704 unsigned long adr, map_word datum)
1693 { 1705 {
1694 const unsigned long uWriteTimeout = (HZ / 1000) + 1; 1706 const unsigned long uWriteTimeout = (HZ / 1000) + 1;
1695 struct cfi_private *cfi = map->fldrv_priv; 1707 struct cfi_private *cfi = map->fldrv_priv;
1696 int retry_cnt = 0; 1708 int retry_cnt = 0;
1697 map_word oldd; 1709 map_word oldd;
1698 int ret = 0; 1710 int ret = 0;
1699 int i; 1711 int i;
1700 1712
1701 adr += chip->start; 1713 adr += chip->start;
1702 1714
1703 ret = cfi_amdstd_panic_wait(map, chip, adr); 1715 ret = cfi_amdstd_panic_wait(map, chip, adr);
1704 if (ret) 1716 if (ret)
1705 return ret; 1717 return ret;
1706 1718
1707 pr_debug("MTD %s(): PANIC WRITE 0x%.8lx(0x%.8lx)\n", 1719 pr_debug("MTD %s(): PANIC WRITE 0x%.8lx(0x%.8lx)\n",
1708 __func__, adr, datum.x[0]); 1720 __func__, adr, datum.x[0]);
1709 1721
1710 /* 1722 /*
1711 * Check for a NOP for the case when the datum to write is already 1723 * Check for a NOP for the case when the datum to write is already
1712 * present - it saves time and works around buggy chips that corrupt 1724 * present - it saves time and works around buggy chips that corrupt
1713 * data at other locations when 0xff is written to a location that 1725 * data at other locations when 0xff is written to a location that
1714 * already contains 0xff. 1726 * already contains 0xff.
1715 */ 1727 */
1716 oldd = map_read(map, adr); 1728 oldd = map_read(map, adr);
1717 if (map_word_equal(map, oldd, datum)) { 1729 if (map_word_equal(map, oldd, datum)) {
1718 pr_debug("MTD %s(): NOP\n", __func__); 1730 pr_debug("MTD %s(): NOP\n", __func__);
1719 goto op_done; 1731 goto op_done;
1720 } 1732 }
1721 1733
1722 ENABLE_VPP(map); 1734 ENABLE_VPP(map);
1723 1735
1724 retry: 1736 retry:
1725 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 1737 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1726 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL); 1738 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1727 cfi_send_gen_cmd(0xA0, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 1739 cfi_send_gen_cmd(0xA0, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1728 map_write(map, datum, adr); 1740 map_write(map, datum, adr);
1729 1741
1730 for (i = 0; i < jiffies_to_usecs(uWriteTimeout); i++) { 1742 for (i = 0; i < jiffies_to_usecs(uWriteTimeout); i++) {
1731 if (chip_ready(map, adr)) 1743 if (chip_ready(map, adr))
1732 break; 1744 break;
1733 1745
1734 udelay(1); 1746 udelay(1);
1735 } 1747 }
1736 1748
1737 if (!chip_good(map, adr, datum)) { 1749 if (!chip_good(map, adr, datum)) {
1738 /* reset on all failures. */ 1750 /* reset on all failures. */
1739 map_write(map, CMD(0xF0), chip->start); 1751 map_write(map, CMD(0xF0), chip->start);
1740 /* FIXME - should have reset delay before continuing */ 1752 /* FIXME - should have reset delay before continuing */
1741 1753
1742 if (++retry_cnt <= MAX_WORD_RETRIES) 1754 if (++retry_cnt <= MAX_WORD_RETRIES)
1743 goto retry; 1755 goto retry;
1744 1756
1745 ret = -EIO; 1757 ret = -EIO;
1746 } 1758 }
1747 1759
1748 op_done: 1760 op_done:
1749 DISABLE_VPP(map); 1761 DISABLE_VPP(map);
1750 return ret; 1762 return ret;
1751 } 1763 }
1752 1764
1753 /* 1765 /*
1754 * Write out some data during a kernel panic 1766 * Write out some data during a kernel panic
1755 * 1767 *
1756 * This is used by the mtdoops driver to save the dying messages from a 1768 * This is used by the mtdoops driver to save the dying messages from a
1757 * kernel which has panic'd. 1769 * kernel which has panic'd.
1758 * 1770 *
1759 * This routine ignores all of the locking used throughout the rest of the 1771 * This routine ignores all of the locking used throughout the rest of the
1760 * driver, in order to ensure that the data gets written out no matter what 1772 * driver, in order to ensure that the data gets written out no matter what
1761 * state this driver (and the flash chip itself) was in when the kernel crashed. 1773 * state this driver (and the flash chip itself) was in when the kernel crashed.
1762 * 1774 *
1763 * The implementation of this routine is intentionally similar to 1775 * The implementation of this routine is intentionally similar to
1764 * cfi_amdstd_write_words(), in order to ease code maintenance. 1776 * cfi_amdstd_write_words(), in order to ease code maintenance.
1765 */ 1777 */
1766 static int cfi_amdstd_panic_write(struct mtd_info *mtd, loff_t to, size_t len, 1778 static int cfi_amdstd_panic_write(struct mtd_info *mtd, loff_t to, size_t len,
1767 size_t *retlen, const u_char *buf) 1779 size_t *retlen, const u_char *buf)
1768 { 1780 {
1769 struct map_info *map = mtd->priv; 1781 struct map_info *map = mtd->priv;
1770 struct cfi_private *cfi = map->fldrv_priv; 1782 struct cfi_private *cfi = map->fldrv_priv;
1771 unsigned long ofs, chipstart; 1783 unsigned long ofs, chipstart;
1772 int ret = 0; 1784 int ret = 0;
1773 int chipnum; 1785 int chipnum;
1774 1786
1775 chipnum = to >> cfi->chipshift; 1787 chipnum = to >> cfi->chipshift;
1776 ofs = to - (chipnum << cfi->chipshift); 1788 ofs = to - (chipnum << cfi->chipshift);
1777 chipstart = cfi->chips[chipnum].start; 1789 chipstart = cfi->chips[chipnum].start;
1778 1790
1779 /* If it's not bus aligned, do the first byte write */ 1791 /* If it's not bus aligned, do the first byte write */
1780 if (ofs & (map_bankwidth(map) - 1)) { 1792 if (ofs & (map_bankwidth(map) - 1)) {
1781 unsigned long bus_ofs = ofs & ~(map_bankwidth(map) - 1); 1793 unsigned long bus_ofs = ofs & ~(map_bankwidth(map) - 1);
1782 int i = ofs - bus_ofs; 1794 int i = ofs - bus_ofs;
1783 int n = 0; 1795 int n = 0;
1784 map_word tmp_buf; 1796 map_word tmp_buf;
1785 1797
1786 ret = cfi_amdstd_panic_wait(map, &cfi->chips[chipnum], bus_ofs); 1798 ret = cfi_amdstd_panic_wait(map, &cfi->chips[chipnum], bus_ofs);
1787 if (ret) 1799 if (ret)
1788 return ret; 1800 return ret;
1789 1801
1790 /* Load 'tmp_buf' with old contents of flash */ 1802 /* Load 'tmp_buf' with old contents of flash */
1791 tmp_buf = map_read(map, bus_ofs + chipstart); 1803 tmp_buf = map_read(map, bus_ofs + chipstart);
1792 1804
1793 /* Number of bytes to copy from buffer */ 1805 /* Number of bytes to copy from buffer */
1794 n = min_t(int, len, map_bankwidth(map) - i); 1806 n = min_t(int, len, map_bankwidth(map) - i);
1795 1807
1796 tmp_buf = map_word_load_partial(map, tmp_buf, buf, i, n); 1808 tmp_buf = map_word_load_partial(map, tmp_buf, buf, i, n);
1797 1809
1798 ret = do_panic_write_oneword(map, &cfi->chips[chipnum], 1810 ret = do_panic_write_oneword(map, &cfi->chips[chipnum],
1799 bus_ofs, tmp_buf); 1811 bus_ofs, tmp_buf);
1800 if (ret) 1812 if (ret)
1801 return ret; 1813 return ret;
1802 1814
1803 ofs += n; 1815 ofs += n;
1804 buf += n; 1816 buf += n;
1805 (*retlen) += n; 1817 (*retlen) += n;
1806 len -= n; 1818 len -= n;
1807 1819
1808 if (ofs >> cfi->chipshift) { 1820 if (ofs >> cfi->chipshift) {
1809 chipnum++; 1821 chipnum++;
1810 ofs = 0; 1822 ofs = 0;
1811 if (chipnum == cfi->numchips) 1823 if (chipnum == cfi->numchips)
1812 return 0; 1824 return 0;
1813 } 1825 }
1814 } 1826 }
1815 1827
1816 /* We are now aligned, write as much as possible */ 1828 /* We are now aligned, write as much as possible */
1817 while (len >= map_bankwidth(map)) { 1829 while (len >= map_bankwidth(map)) {
1818 map_word datum; 1830 map_word datum;
1819 1831
1820 datum = map_word_load(map, buf); 1832 datum = map_word_load(map, buf);
1821 1833
1822 ret = do_panic_write_oneword(map, &cfi->chips[chipnum], 1834 ret = do_panic_write_oneword(map, &cfi->chips[chipnum],
1823 ofs, datum); 1835 ofs, datum);
1824 if (ret) 1836 if (ret)
1825 return ret; 1837 return ret;
1826 1838
1827 ofs += map_bankwidth(map); 1839 ofs += map_bankwidth(map);
1828 buf += map_bankwidth(map); 1840 buf += map_bankwidth(map);
1829 (*retlen) += map_bankwidth(map); 1841 (*retlen) += map_bankwidth(map);
1830 len -= map_bankwidth(map); 1842 len -= map_bankwidth(map);
1831 1843
1832 if (ofs >> cfi->chipshift) { 1844 if (ofs >> cfi->chipshift) {
1833 chipnum++; 1845 chipnum++;
1834 ofs = 0; 1846 ofs = 0;
1835 if (chipnum == cfi->numchips) 1847 if (chipnum == cfi->numchips)
1836 return 0; 1848 return 0;
1837 1849
1838 chipstart = cfi->chips[chipnum].start; 1850 chipstart = cfi->chips[chipnum].start;
1839 } 1851 }
1840 } 1852 }
1841 1853
1842 /* Write the trailing bytes if any */ 1854 /* Write the trailing bytes if any */
1843 if (len & (map_bankwidth(map) - 1)) { 1855 if (len & (map_bankwidth(map) - 1)) {
1844 map_word tmp_buf; 1856 map_word tmp_buf;
1845 1857
1846 ret = cfi_amdstd_panic_wait(map, &cfi->chips[chipnum], ofs); 1858 ret = cfi_amdstd_panic_wait(map, &cfi->chips[chipnum], ofs);
1847 if (ret) 1859 if (ret)
1848 return ret; 1860 return ret;
1849 1861
1850 tmp_buf = map_read(map, ofs + chipstart); 1862 tmp_buf = map_read(map, ofs + chipstart);
1851 1863
1852 tmp_buf = map_word_load_partial(map, tmp_buf, buf, 0, len); 1864 tmp_buf = map_word_load_partial(map, tmp_buf, buf, 0, len);
1853 1865
1854 ret = do_panic_write_oneword(map, &cfi->chips[chipnum], 1866 ret = do_panic_write_oneword(map, &cfi->chips[chipnum],
1855 ofs, tmp_buf); 1867 ofs, tmp_buf);
1856 if (ret) 1868 if (ret)
1857 return ret; 1869 return ret;
1858 1870
1859 (*retlen) += len; 1871 (*retlen) += len;
1860 } 1872 }
1861 1873
1862 return 0; 1874 return 0;
1863 } 1875 }
1864 1876
1865 1877
1866 /* 1878 /*
1867 * Handle devices with one erase region, that only implement 1879 * Handle devices with one erase region, that only implement
1868 * the chip erase command. 1880 * the chip erase command.
1869 */ 1881 */
1870 static int __xipram do_erase_chip(struct map_info *map, struct flchip *chip) 1882 static int __xipram do_erase_chip(struct map_info *map, struct flchip *chip)
1871 { 1883 {
1872 struct cfi_private *cfi = map->fldrv_priv; 1884 struct cfi_private *cfi = map->fldrv_priv;
1873 unsigned long timeo = jiffies + HZ; 1885 unsigned long timeo = jiffies + HZ;
1874 unsigned long int adr; 1886 unsigned long int adr;
1875 DECLARE_WAITQUEUE(wait, current); 1887 DECLARE_WAITQUEUE(wait, current);
1876 int ret = 0; 1888 int ret = 0;
1877 1889
1878 adr = cfi->addr_unlock1; 1890 adr = cfi->addr_unlock1;
1879 1891
1880 mutex_lock(&chip->mutex); 1892 mutex_lock(&chip->mutex);
1881 ret = get_chip(map, chip, adr, FL_WRITING); 1893 ret = get_chip(map, chip, adr, FL_WRITING);
1882 if (ret) { 1894 if (ret) {
1883 mutex_unlock(&chip->mutex); 1895 mutex_unlock(&chip->mutex);
1884 return ret; 1896 return ret;
1885 } 1897 }
1886 1898
1887 pr_debug("MTD %s(): ERASE 0x%.8lx\n", 1899 pr_debug("MTD %s(): ERASE 0x%.8lx\n",
1888 __func__, chip->start ); 1900 __func__, chip->start );
1889 1901
1890 XIP_INVAL_CACHED_RANGE(map, adr, map->size); 1902 XIP_INVAL_CACHED_RANGE(map, adr, map->size);
1891 ENABLE_VPP(map); 1903 ENABLE_VPP(map);
1892 xip_disable(map, chip, adr); 1904 xip_disable(map, chip, adr);
1893 1905
1894 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 1906 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1895 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL); 1907 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1896 cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 1908 cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1897 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 1909 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1898 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL); 1910 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1899 cfi_send_gen_cmd(0x10, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 1911 cfi_send_gen_cmd(0x10, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1900 1912
1901 chip->state = FL_ERASING; 1913 chip->state = FL_ERASING;
1902 chip->erase_suspended = 0; 1914 chip->erase_suspended = 0;
1903 chip->in_progress_block_addr = adr; 1915 chip->in_progress_block_addr = adr;
1904 1916
1905 INVALIDATE_CACHE_UDELAY(map, chip, 1917 INVALIDATE_CACHE_UDELAY(map, chip,
1906 adr, map->size, 1918 adr, map->size,
1907 chip->erase_time*500); 1919 chip->erase_time*500);
1908 1920
1909 timeo = jiffies + (HZ*20); 1921 timeo = jiffies + (HZ*20);
1910 1922
1911 for (;;) { 1923 for (;;) {
1912 if (chip->state != FL_ERASING) { 1924 if (chip->state != FL_ERASING) {
1913 /* Someone's suspended the erase. Sleep */ 1925 /* Someone's suspended the erase. Sleep */
1914 set_current_state(TASK_UNINTERRUPTIBLE); 1926 set_current_state(TASK_UNINTERRUPTIBLE);
1915 add_wait_queue(&chip->wq, &wait); 1927 add_wait_queue(&chip->wq, &wait);
1916 mutex_unlock(&chip->mutex); 1928 mutex_unlock(&chip->mutex);
1917 schedule(); 1929 schedule();
1918 remove_wait_queue(&chip->wq, &wait); 1930 remove_wait_queue(&chip->wq, &wait);
1919 mutex_lock(&chip->mutex); 1931 mutex_lock(&chip->mutex);
1920 continue; 1932 continue;
1921 } 1933 }
1922 if (chip->erase_suspended) { 1934 if (chip->erase_suspended) {
1923 /* This erase was suspended and resumed. 1935 /* This erase was suspended and resumed.
1924 Adjust the timeout */ 1936 Adjust the timeout */
1925 timeo = jiffies + (HZ*20); /* FIXME */ 1937 timeo = jiffies + (HZ*20); /* FIXME */
1926 chip->erase_suspended = 0; 1938 chip->erase_suspended = 0;
1927 } 1939 }
1928 1940
1929 if (chip_ready(map, adr)) 1941 if (chip_ready(map, adr))
1930 break; 1942 break;
1931 1943
1932 if (time_after(jiffies, timeo)) { 1944 if (time_after(jiffies, timeo)) {
1933 printk(KERN_WARNING "MTD %s(): software timeout\n", 1945 printk(KERN_WARNING "MTD %s(): software timeout\n",
1934 __func__ ); 1946 __func__ );
1935 break; 1947 break;
1936 } 1948 }
1937 1949
1938 /* Latency issues. Drop the lock, wait a while and retry */ 1950 /* Latency issues. Drop the lock, wait a while and retry */
1939 UDELAY(map, chip, adr, 1000000/HZ); 1951 UDELAY(map, chip, adr, 1000000/HZ);
1940 } 1952 }
1941 /* Did we succeed? */ 1953 /* Did we succeed? */
1942 if (!chip_good(map, adr, map_word_ff(map))) { 1954 if (!chip_good(map, adr, map_word_ff(map))) {
1943 /* reset on all failures. */ 1955 /* reset on all failures. */
1944 map_write( map, CMD(0xF0), chip->start ); 1956 map_write( map, CMD(0xF0), chip->start );
1945 /* FIXME - should have reset delay before continuing */ 1957 /* FIXME - should have reset delay before continuing */
1946 1958
1947 ret = -EIO; 1959 ret = -EIO;
1948 } 1960 }
1949 1961
1950 chip->state = FL_READY; 1962 chip->state = FL_READY;
1951 xip_enable(map, chip, adr); 1963 xip_enable(map, chip, adr);
1952 DISABLE_VPP(map); 1964 DISABLE_VPP(map);
1953 put_chip(map, chip, adr); 1965 put_chip(map, chip, adr);
1954 mutex_unlock(&chip->mutex); 1966 mutex_unlock(&chip->mutex);
1955 1967
1956 return ret; 1968 return ret;
1957 } 1969 }
1958 1970
1959 1971
1960 static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip, unsigned long adr, int len, void *thunk) 1972 static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip, unsigned long adr, int len, void *thunk)
1961 { 1973 {
1962 struct cfi_private *cfi = map->fldrv_priv; 1974 struct cfi_private *cfi = map->fldrv_priv;
1963 unsigned long timeo = jiffies + HZ; 1975 unsigned long timeo = jiffies + HZ;
1964 DECLARE_WAITQUEUE(wait, current); 1976 DECLARE_WAITQUEUE(wait, current);
1965 int ret = 0; 1977 int ret = 0;
1966 1978
1967 adr += chip->start; 1979 adr += chip->start;
1968 1980
1969 mutex_lock(&chip->mutex); 1981 mutex_lock(&chip->mutex);
1970 ret = get_chip(map, chip, adr, FL_ERASING); 1982 ret = get_chip(map, chip, adr, FL_ERASING);
1971 if (ret) { 1983 if (ret) {
1972 mutex_unlock(&chip->mutex); 1984 mutex_unlock(&chip->mutex);
1973 return ret; 1985 return ret;
1974 } 1986 }
1975 1987
1976 pr_debug("MTD %s(): ERASE 0x%.8lx\n", 1988 pr_debug("MTD %s(): ERASE 0x%.8lx\n",
1977 __func__, adr ); 1989 __func__, adr );
1978 1990
1979 XIP_INVAL_CACHED_RANGE(map, adr, len); 1991 XIP_INVAL_CACHED_RANGE(map, adr, len);
1980 ENABLE_VPP(map); 1992 ENABLE_VPP(map);
1981 xip_disable(map, chip, adr); 1993 xip_disable(map, chip, adr);
1982 1994
1983 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 1995 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1984 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL); 1996 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1985 cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 1997 cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1986 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 1998 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1987 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL); 1999 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1988 map_write(map, cfi->sector_erase_cmd, adr); 2000 map_write(map, cfi->sector_erase_cmd, adr);
1989 2001
1990 chip->state = FL_ERASING; 2002 chip->state = FL_ERASING;
1991 chip->erase_suspended = 0; 2003 chip->erase_suspended = 0;
1992 chip->in_progress_block_addr = adr; 2004 chip->in_progress_block_addr = adr;
1993 2005
1994 INVALIDATE_CACHE_UDELAY(map, chip, 2006 INVALIDATE_CACHE_UDELAY(map, chip,
1995 adr, len, 2007 adr, len,
1996 chip->erase_time*500); 2008 chip->erase_time*500);
1997 2009
1998 timeo = jiffies + (HZ*20); 2010 timeo = jiffies + (HZ*20);
1999 2011
2000 for (;;) { 2012 for (;;) {
2001 if (chip->state != FL_ERASING) { 2013 if (chip->state != FL_ERASING) {
2002 /* Someone's suspended the erase. Sleep */ 2014 /* Someone's suspended the erase. Sleep */
2003 set_current_state(TASK_UNINTERRUPTIBLE); 2015 set_current_state(TASK_UNINTERRUPTIBLE);
2004 add_wait_queue(&chip->wq, &wait); 2016 add_wait_queue(&chip->wq, &wait);
2005 mutex_unlock(&chip->mutex); 2017 mutex_unlock(&chip->mutex);
2006 schedule(); 2018 schedule();
2007 remove_wait_queue(&chip->wq, &wait); 2019 remove_wait_queue(&chip->wq, &wait);
2008 mutex_lock(&chip->mutex); 2020 mutex_lock(&chip->mutex);
2009 continue; 2021 continue;
2010 } 2022 }
2011 if (chip->erase_suspended) { 2023 if (chip->erase_suspended) {
2012 /* This erase was suspended and resumed. 2024 /* This erase was suspended and resumed.
2013 Adjust the timeout */ 2025 Adjust the timeout */
2014 timeo = jiffies + (HZ*20); /* FIXME */ 2026 timeo = jiffies + (HZ*20); /* FIXME */
2015 chip->erase_suspended = 0; 2027 chip->erase_suspended = 0;
2016 } 2028 }
2017 2029
2018 if (chip_ready(map, adr)) { 2030 if (chip_ready(map, adr)) {
2019 xip_enable(map, chip, adr); 2031 xip_enable(map, chip, adr);
2020 break; 2032 break;
2021 } 2033 }
2022 2034
2023 if (time_after(jiffies, timeo)) { 2035 if (time_after(jiffies, timeo)) {
2024 xip_enable(map, chip, adr); 2036 xip_enable(map, chip, adr);
2025 printk(KERN_WARNING "MTD %s(): software timeout\n", 2037 printk(KERN_WARNING "MTD %s(): software timeout\n",
2026 __func__ ); 2038 __func__ );
2027 break; 2039 break;
2028 } 2040 }
2029 2041
2030 /* Latency issues. Drop the lock, wait a while and retry */ 2042 /* Latency issues. Drop the lock, wait a while and retry */
2031 UDELAY(map, chip, adr, 1000000/HZ); 2043 UDELAY(map, chip, adr, 1000000/HZ);
2032 } 2044 }
2033 /* Did we succeed? */ 2045 /* Did we succeed? */
2034 if (!chip_good(map, adr, map_word_ff(map))) { 2046 if (!chip_good(map, adr, map_word_ff(map))) {
2035 /* reset on all failures. */ 2047 /* reset on all failures. */
2036 map_write( map, CMD(0xF0), chip->start ); 2048 map_write( map, CMD(0xF0), chip->start );
2037 /* FIXME - should have reset delay before continuing */ 2049 /* FIXME - should have reset delay before continuing */
2038 2050
2039 ret = -EIO; 2051 ret = -EIO;
2040 } 2052 }
2041 2053
2042 chip->state = FL_READY; 2054 chip->state = FL_READY;
2043 DISABLE_VPP(map); 2055 DISABLE_VPP(map);
2044 put_chip(map, chip, adr); 2056 put_chip(map, chip, adr);
2045 mutex_unlock(&chip->mutex); 2057 mutex_unlock(&chip->mutex);
2046 return ret; 2058 return ret;
2047 } 2059 }
2048 2060
2049 2061
2050 static int cfi_amdstd_erase_varsize(struct mtd_info *mtd, struct erase_info *instr) 2062 static int cfi_amdstd_erase_varsize(struct mtd_info *mtd, struct erase_info *instr)
2051 { 2063 {
2052 unsigned long ofs, len; 2064 unsigned long ofs, len;
2053 int ret; 2065 int ret;
2054 2066
2055 ofs = instr->addr; 2067 ofs = instr->addr;
2056 len = instr->len; 2068 len = instr->len;
2057 2069
2058 ret = cfi_varsize_frob(mtd, do_erase_oneblock, ofs, len, NULL); 2070 ret = cfi_varsize_frob(mtd, do_erase_oneblock, ofs, len, NULL);
2059 if (ret) 2071 if (ret)
2060 return ret; 2072 return ret;
2061 2073
2062 instr->state = MTD_ERASE_DONE; 2074 instr->state = MTD_ERASE_DONE;
2063 mtd_erase_callback(instr); 2075 mtd_erase_callback(instr);
2064 2076
2065 return 0; 2077 return 0;
2066 } 2078 }
2067 2079
2068 2080
2069 static int cfi_amdstd_erase_chip(struct mtd_info *mtd, struct erase_info *instr) 2081 static int cfi_amdstd_erase_chip(struct mtd_info *mtd, struct erase_info *instr)
2070 { 2082 {
2071 struct map_info *map = mtd->priv; 2083 struct map_info *map = mtd->priv;
2072 struct cfi_private *cfi = map->fldrv_priv; 2084 struct cfi_private *cfi = map->fldrv_priv;
2073 int ret = 0; 2085 int ret = 0;
2074 2086
2075 if (instr->addr != 0) 2087 if (instr->addr != 0)
2076 return -EINVAL; 2088 return -EINVAL;
2077 2089
2078 if (instr->len != mtd->size) 2090 if (instr->len != mtd->size)
2079 return -EINVAL; 2091 return -EINVAL;
2080 2092
2081 ret = do_erase_chip(map, &cfi->chips[0]); 2093 ret = do_erase_chip(map, &cfi->chips[0]);
2082 if (ret) 2094 if (ret)
2083 return ret; 2095 return ret;
2084 2096
2085 instr->state = MTD_ERASE_DONE; 2097 instr->state = MTD_ERASE_DONE;
2086 mtd_erase_callback(instr); 2098 mtd_erase_callback(instr);
2087 2099
2088 return 0; 2100 return 0;
2089 } 2101 }
2090 2102
2091 static int do_atmel_lock(struct map_info *map, struct flchip *chip, 2103 static int do_atmel_lock(struct map_info *map, struct flchip *chip,
2092 unsigned long adr, int len, void *thunk) 2104 unsigned long adr, int len, void *thunk)
2093 { 2105 {
2094 struct cfi_private *cfi = map->fldrv_priv; 2106 struct cfi_private *cfi = map->fldrv_priv;
2095 int ret; 2107 int ret;
2096 2108
2097 mutex_lock(&chip->mutex); 2109 mutex_lock(&chip->mutex);
2098 ret = get_chip(map, chip, adr + chip->start, FL_LOCKING); 2110 ret = get_chip(map, chip, adr + chip->start, FL_LOCKING);
2099 if (ret) 2111 if (ret)
2100 goto out_unlock; 2112 goto out_unlock;
2101 chip->state = FL_LOCKING; 2113 chip->state = FL_LOCKING;
2102 2114
2103 pr_debug("MTD %s(): LOCK 0x%08lx len %d\n", __func__, adr, len); 2115 pr_debug("MTD %s(): LOCK 0x%08lx len %d\n", __func__, adr, len);
2104 2116
2105 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, 2117 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
2106 cfi->device_type, NULL); 2118 cfi->device_type, NULL);
2107 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, 2119 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi,
2108 cfi->device_type, NULL); 2120 cfi->device_type, NULL);
2109 cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi, 2121 cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi,
2110 cfi->device_type, NULL); 2122 cfi->device_type, NULL);
2111 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, 2123 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
2112 cfi->device_type, NULL); 2124 cfi->device_type, NULL);
2113 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, 2125 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi,
2114 cfi->device_type, NULL); 2126 cfi->device_type, NULL);
2115 map_write(map, CMD(0x40), chip->start + adr); 2127 map_write(map, CMD(0x40), chip->start + adr);
2116 2128
2117 chip->state = FL_READY; 2129 chip->state = FL_READY;
2118 put_chip(map, chip, adr + chip->start); 2130 put_chip(map, chip, adr + chip->start);
2119 ret = 0; 2131 ret = 0;
2120 2132
2121 out_unlock: 2133 out_unlock:
2122 mutex_unlock(&chip->mutex); 2134 mutex_unlock(&chip->mutex);
2123 return ret; 2135 return ret;
2124 } 2136 }
2125 2137
2126 static int do_atmel_unlock(struct map_info *map, struct flchip *chip, 2138 static int do_atmel_unlock(struct map_info *map, struct flchip *chip,
2127 unsigned long adr, int len, void *thunk) 2139 unsigned long adr, int len, void *thunk)
2128 { 2140 {
2129 struct cfi_private *cfi = map->fldrv_priv; 2141 struct cfi_private *cfi = map->fldrv_priv;
2130 int ret; 2142 int ret;
2131 2143
2132 mutex_lock(&chip->mutex); 2144 mutex_lock(&chip->mutex);
2133 ret = get_chip(map, chip, adr + chip->start, FL_UNLOCKING); 2145 ret = get_chip(map, chip, adr + chip->start, FL_UNLOCKING);
2134 if (ret) 2146 if (ret)
2135 goto out_unlock; 2147 goto out_unlock;
2136 chip->state = FL_UNLOCKING; 2148 chip->state = FL_UNLOCKING;
2137 2149
2138 pr_debug("MTD %s(): LOCK 0x%08lx len %d\n", __func__, adr, len); 2150 pr_debug("MTD %s(): LOCK 0x%08lx len %d\n", __func__, adr, len);
2139 2151
2140 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, 2152 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
2141 cfi->device_type, NULL); 2153 cfi->device_type, NULL);
2142 map_write(map, CMD(0x70), adr); 2154 map_write(map, CMD(0x70), adr);
2143 2155
2144 chip->state = FL_READY; 2156 chip->state = FL_READY;
2145 put_chip(map, chip, adr + chip->start); 2157 put_chip(map, chip, adr + chip->start);
2146 ret = 0; 2158 ret = 0;
2147 2159
2148 out_unlock: 2160 out_unlock:
2149 mutex_unlock(&chip->mutex); 2161 mutex_unlock(&chip->mutex);
2150 return ret; 2162 return ret;
2151 } 2163 }
2152 2164
2153 static int cfi_atmel_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len) 2165 static int cfi_atmel_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
2154 { 2166 {
2155 return cfi_varsize_frob(mtd, do_atmel_lock, ofs, len, NULL); 2167 return cfi_varsize_frob(mtd, do_atmel_lock, ofs, len, NULL);
2156 } 2168 }
2157 2169
2158 static int cfi_atmel_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len) 2170 static int cfi_atmel_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
2159 { 2171 {
2160 return cfi_varsize_frob(mtd, do_atmel_unlock, ofs, len, NULL); 2172 return cfi_varsize_frob(mtd, do_atmel_unlock, ofs, len, NULL);
2161 } 2173 }
2162 2174
2163 2175
2164 static void cfi_amdstd_sync (struct mtd_info *mtd) 2176 static void cfi_amdstd_sync (struct mtd_info *mtd)
2165 { 2177 {
2166 struct map_info *map = mtd->priv; 2178 struct map_info *map = mtd->priv;
2167 struct cfi_private *cfi = map->fldrv_priv; 2179 struct cfi_private *cfi = map->fldrv_priv;
2168 int i; 2180 int i;
2169 struct flchip *chip; 2181 struct flchip *chip;
2170 int ret = 0; 2182 int ret = 0;
2171 DECLARE_WAITQUEUE(wait, current); 2183 DECLARE_WAITQUEUE(wait, current);
2172 2184
2173 for (i=0; !ret && i<cfi->numchips; i++) { 2185 for (i=0; !ret && i<cfi->numchips; i++) {
2174 chip = &cfi->chips[i]; 2186 chip = &cfi->chips[i];
2175 2187
2176 retry: 2188 retry:
2177 mutex_lock(&chip->mutex); 2189 mutex_lock(&chip->mutex);
2178 2190
2179 switch(chip->state) { 2191 switch(chip->state) {
2180 case FL_READY: 2192 case FL_READY:
2181 case FL_STATUS: 2193 case FL_STATUS:
2182 case FL_CFI_QUERY: 2194 case FL_CFI_QUERY:
2183 case FL_JEDEC_QUERY: 2195 case FL_JEDEC_QUERY:
2184 chip->oldstate = chip->state; 2196 chip->oldstate = chip->state;
2185 chip->state = FL_SYNCING; 2197 chip->state = FL_SYNCING;
2186 /* No need to wake_up() on this state change - 2198 /* No need to wake_up() on this state change -
2187 * as the whole point is that nobody can do anything 2199 * as the whole point is that nobody can do anything
2188 * with the chip now anyway. 2200 * with the chip now anyway.
2189 */ 2201 */
2190 case FL_SYNCING: 2202 case FL_SYNCING:
2191 mutex_unlock(&chip->mutex); 2203 mutex_unlock(&chip->mutex);
2192 break; 2204 break;
2193 2205
2194 default: 2206 default:
2195 /* Not an idle state */ 2207 /* Not an idle state */
2196 set_current_state(TASK_UNINTERRUPTIBLE); 2208 set_current_state(TASK_UNINTERRUPTIBLE);
2197 add_wait_queue(&chip->wq, &wait); 2209 add_wait_queue(&chip->wq, &wait);
2198 2210
2199 mutex_unlock(&chip->mutex); 2211 mutex_unlock(&chip->mutex);
2200 2212
2201 schedule(); 2213 schedule();
2202 2214
2203 remove_wait_queue(&chip->wq, &wait); 2215 remove_wait_queue(&chip->wq, &wait);
2204 2216
2205 goto retry; 2217 goto retry;
2206 } 2218 }
2207 } 2219 }
2208 2220
2209 /* Unlock the chips again */ 2221 /* Unlock the chips again */
2210 2222
2211 for (i--; i >=0; i--) { 2223 for (i--; i >=0; i--) {
2212 chip = &cfi->chips[i]; 2224 chip = &cfi->chips[i];
2213 2225
2214 mutex_lock(&chip->mutex); 2226 mutex_lock(&chip->mutex);
2215 2227
2216 if (chip->state == FL_SYNCING) { 2228 if (chip->state == FL_SYNCING) {
2217 chip->state = chip->oldstate; 2229 chip->state = chip->oldstate;
2218 wake_up(&chip->wq); 2230 wake_up(&chip->wq);
2219 } 2231 }
2220 mutex_unlock(&chip->mutex); 2232 mutex_unlock(&chip->mutex);
2221 } 2233 }
2222 } 2234 }
2223 2235
2224 2236
2225 static int cfi_amdstd_suspend(struct mtd_info *mtd) 2237 static int cfi_amdstd_suspend(struct mtd_info *mtd)
2226 { 2238 {
2227 struct map_info *map = mtd->priv; 2239 struct map_info *map = mtd->priv;
2228 struct cfi_private *cfi = map->fldrv_priv; 2240 struct cfi_private *cfi = map->fldrv_priv;
2229 int i; 2241 int i;
2230 struct flchip *chip; 2242 struct flchip *chip;
2231 int ret = 0; 2243 int ret = 0;
2232 2244
2233 for (i=0; !ret && i<cfi->numchips; i++) { 2245 for (i=0; !ret && i<cfi->numchips; i++) {
2234 chip = &cfi->chips[i]; 2246 chip = &cfi->chips[i];
2235 2247
2236 mutex_lock(&chip->mutex); 2248 mutex_lock(&chip->mutex);
2237 2249
2238 switch(chip->state) { 2250 switch(chip->state) {
2239 case FL_READY: 2251 case FL_READY:
2240 case FL_STATUS: 2252 case FL_STATUS:
2241 case FL_CFI_QUERY: 2253 case FL_CFI_QUERY:
2242 case FL_JEDEC_QUERY: 2254 case FL_JEDEC_QUERY:
2243 chip->oldstate = chip->state; 2255 chip->oldstate = chip->state;
2244 chip->state = FL_PM_SUSPENDED; 2256 chip->state = FL_PM_SUSPENDED;
2245 /* No need to wake_up() on this state change - 2257 /* No need to wake_up() on this state change -
2246 * as the whole point is that nobody can do anything 2258 * as the whole point is that nobody can do anything
2247 * with the chip now anyway. 2259 * with the chip now anyway.
2248 */ 2260 */
2249 case FL_PM_SUSPENDED: 2261 case FL_PM_SUSPENDED:
2250 break; 2262 break;
2251 2263
2252 default: 2264 default:
2253 ret = -EAGAIN; 2265 ret = -EAGAIN;
2254 break; 2266 break;
2255 } 2267 }
2256 mutex_unlock(&chip->mutex); 2268 mutex_unlock(&chip->mutex);
2257 } 2269 }
2258 2270
2259 /* Unlock the chips again */ 2271 /* Unlock the chips again */
2260 2272
2261 if (ret) { 2273 if (ret) {
2262 for (i--; i >=0; i--) { 2274 for (i--; i >=0; i--) {
2263 chip = &cfi->chips[i]; 2275 chip = &cfi->chips[i];
2264 2276
2265 mutex_lock(&chip->mutex); 2277 mutex_lock(&chip->mutex);
2266 2278
2267 if (chip->state == FL_PM_SUSPENDED) { 2279 if (chip->state == FL_PM_SUSPENDED) {
2268 chip->state = chip->oldstate; 2280 chip->state = chip->oldstate;
2269 wake_up(&chip->wq); 2281 wake_up(&chip->wq);
2270 } 2282 }
2271 mutex_unlock(&chip->mutex); 2283 mutex_unlock(&chip->mutex);
2272 } 2284 }
2273 } 2285 }
2274 2286
2275 return ret; 2287 return ret;
2276 } 2288 }
2277 2289
2278 2290
2279 static void cfi_amdstd_resume(struct mtd_info *mtd) 2291 static void cfi_amdstd_resume(struct mtd_info *mtd)
2280 { 2292 {
2281 struct map_info *map = mtd->priv; 2293 struct map_info *map = mtd->priv;
2282 struct cfi_private *cfi = map->fldrv_priv; 2294 struct cfi_private *cfi = map->fldrv_priv;
2283 int i; 2295 int i;
2284 struct flchip *chip; 2296 struct flchip *chip;
2285 2297
2286 for (i=0; i<cfi->numchips; i++) { 2298 for (i=0; i<cfi->numchips; i++) {
2287 2299
2288 chip = &cfi->chips[i]; 2300 chip = &cfi->chips[i];
2289 2301
2290 mutex_lock(&chip->mutex); 2302 mutex_lock(&chip->mutex);
2291 2303
2292 if (chip->state == FL_PM_SUSPENDED) { 2304 if (chip->state == FL_PM_SUSPENDED) {
2293 chip->state = FL_READY; 2305 chip->state = FL_READY;
2294 map_write(map, CMD(0xF0), chip->start); 2306 map_write(map, CMD(0xF0), chip->start);
2295 wake_up(&chip->wq); 2307 wake_up(&chip->wq);
2296 } 2308 }
2297 else 2309 else
2298 printk(KERN_ERR "Argh. Chip not in PM_SUSPENDED state upon resume()\n"); 2310 printk(KERN_ERR "Argh. Chip not in PM_SUSPENDED state upon resume()\n");
2299 2311
2300 mutex_unlock(&chip->mutex); 2312 mutex_unlock(&chip->mutex);
2301 } 2313 }
2302 } 2314 }
2303 2315
2304 2316
2305 /* 2317 /*
2306 * Ensure that the flash device is put back into read array mode before 2318 * Ensure that the flash device is put back into read array mode before
2307 * unloading the driver or rebooting. On some systems, rebooting while 2319 * unloading the driver or rebooting. On some systems, rebooting while
2308 * the flash is in query/program/erase mode will prevent the CPU from 2320 * the flash is in query/program/erase mode will prevent the CPU from
2309 * fetching the bootloader code, requiring a hard reset or power cycle. 2321 * fetching the bootloader code, requiring a hard reset or power cycle.
2310 */ 2322 */
2311 static int cfi_amdstd_reset(struct mtd_info *mtd) 2323 static int cfi_amdstd_reset(struct mtd_info *mtd)
2312 { 2324 {
2313 struct map_info *map = mtd->priv; 2325 struct map_info *map = mtd->priv;
2314 struct cfi_private *cfi = map->fldrv_priv; 2326 struct cfi_private *cfi = map->fldrv_priv;
2315 int i, ret; 2327 int i, ret;
2316 struct flchip *chip; 2328 struct flchip *chip;
2317 2329
2318 for (i = 0; i < cfi->numchips; i++) { 2330 for (i = 0; i < cfi->numchips; i++) {
2319 2331
2320 chip = &cfi->chips[i]; 2332 chip = &cfi->chips[i];
2321 2333
2322 mutex_lock(&chip->mutex); 2334 mutex_lock(&chip->mutex);
2323 2335
2324 ret = get_chip(map, chip, chip->start, FL_SHUTDOWN); 2336 ret = get_chip(map, chip, chip->start, FL_SHUTDOWN);
2325 if (!ret) { 2337 if (!ret) {
2326 map_write(map, CMD(0xF0), chip->start); 2338 map_write(map, CMD(0xF0), chip->start);
2327 chip->state = FL_SHUTDOWN; 2339 chip->state = FL_SHUTDOWN;
2328 put_chip(map, chip, chip->start); 2340 put_chip(map, chip, chip->start);
2329 } 2341 }
2330 2342
2331 mutex_unlock(&chip->mutex); 2343 mutex_unlock(&chip->mutex);
2332 } 2344 }
2333 2345
2334 return 0; 2346 return 0;
2335 } 2347 }
2336 2348
2337 2349
2338 static int cfi_amdstd_reboot(struct notifier_block *nb, unsigned long val, 2350 static int cfi_amdstd_reboot(struct notifier_block *nb, unsigned long val,
2339 void *v) 2351 void *v)
2340 { 2352 {
2341 struct mtd_info *mtd; 2353 struct mtd_info *mtd;
2342 2354
2343 mtd = container_of(nb, struct mtd_info, reboot_notifier); 2355 mtd = container_of(nb, struct mtd_info, reboot_notifier);
2344 cfi_amdstd_reset(mtd); 2356 cfi_amdstd_reset(mtd);
2345 return NOTIFY_DONE; 2357 return NOTIFY_DONE;
2346 } 2358 }
2347 2359
2348 2360
2349 static void cfi_amdstd_destroy(struct mtd_info *mtd) 2361 static void cfi_amdstd_destroy(struct mtd_info *mtd)
2350 { 2362 {
2351 struct map_info *map = mtd->priv; 2363 struct map_info *map = mtd->priv;
2352 struct cfi_private *cfi = map->fldrv_priv; 2364 struct cfi_private *cfi = map->fldrv_priv;
2353 2365
2354 cfi_amdstd_reset(mtd); 2366 cfi_amdstd_reset(mtd);
2355 unregister_reboot_notifier(&mtd->reboot_notifier); 2367 unregister_reboot_notifier(&mtd->reboot_notifier);
2356 kfree(cfi->cmdset_priv); 2368 kfree(cfi->cmdset_priv);
2357 kfree(cfi->cfiq); 2369 kfree(cfi->cfiq);
2358 kfree(cfi); 2370 kfree(cfi);
2359 kfree(mtd->eraseregions); 2371 kfree(mtd->eraseregions);
2360 } 2372 }
2361 2373
2362 MODULE_LICENSE("GPL"); 2374 MODULE_LICENSE("GPL");
2363 MODULE_AUTHOR("Crossnet Co. <info@crossnet.co.jp> et al."); 2375 MODULE_AUTHOR("Crossnet Co. <info@crossnet.co.jp> et al.");
2364 MODULE_DESCRIPTION("MTD chip driver for AMD/Fujitsu flash chips"); 2376 MODULE_DESCRIPTION("MTD chip driver for AMD/Fujitsu flash chips");
2365 MODULE_ALIAS("cfi_cmdset_0006"); 2377 MODULE_ALIAS("cfi_cmdset_0006");
2366 MODULE_ALIAS("cfi_cmdset_0701"); 2378 MODULE_ALIAS("cfi_cmdset_0701");
2367 2379