Commit 43c6871cae298e28700ca1ea76dc94b5f69446bc
Committed by
Artem Bityutskiy
1 parent
b07948251f
Exists in
master
and in
6 other branches
mtd: nuc900_nand: add missing nand_release in nuc900_nand_remove
Signed-off-by: Axel Lin <axel.lin@gmail.com> Acked-by: Wan ZongShun <mcuos.com@gmail.com> Signed-off-by: Artem Bityutskiy <Artem.Bityutskiy@nokia.com>
Showing 1 changed file with 1 additions and 0 deletions Inline Diff
drivers/mtd/nand/nuc900_nand.c
1 | /* | 1 | /* |
2 | * Copyright © 2009 Nuvoton technology corporation. | 2 | * Copyright © 2009 Nuvoton technology corporation. |
3 | * | 3 | * |
4 | * Wan ZongShun <mcuos.com@gmail.com> | 4 | * Wan ZongShun <mcuos.com@gmail.com> |
5 | * | 5 | * |
6 | * This program is free software; you can redistribute it and/or modify | 6 | * This program is free software; you can redistribute it and/or modify |
7 | * it under the terms of the GNU General Public License as published by | 7 | * it under the terms of the GNU General Public License as published by |
8 | * the Free Software Foundation;version 2 of the License. | 8 | * the Free Software Foundation;version 2 of the License. |
9 | * | 9 | * |
10 | */ | 10 | */ |
11 | 11 | ||
12 | #include <linux/slab.h> | 12 | #include <linux/slab.h> |
13 | #include <linux/init.h> | 13 | #include <linux/init.h> |
14 | #include <linux/module.h> | 14 | #include <linux/module.h> |
15 | #include <linux/interrupt.h> | 15 | #include <linux/interrupt.h> |
16 | #include <linux/io.h> | 16 | #include <linux/io.h> |
17 | #include <linux/platform_device.h> | 17 | #include <linux/platform_device.h> |
18 | #include <linux/delay.h> | 18 | #include <linux/delay.h> |
19 | #include <linux/clk.h> | 19 | #include <linux/clk.h> |
20 | #include <linux/err.h> | 20 | #include <linux/err.h> |
21 | 21 | ||
22 | #include <linux/mtd/mtd.h> | 22 | #include <linux/mtd/mtd.h> |
23 | #include <linux/mtd/nand.h> | 23 | #include <linux/mtd/nand.h> |
24 | #include <linux/mtd/partitions.h> | 24 | #include <linux/mtd/partitions.h> |
25 | 25 | ||
26 | #define REG_FMICSR 0x00 | 26 | #define REG_FMICSR 0x00 |
27 | #define REG_SMCSR 0xa0 | 27 | #define REG_SMCSR 0xa0 |
28 | #define REG_SMISR 0xac | 28 | #define REG_SMISR 0xac |
29 | #define REG_SMCMD 0xb0 | 29 | #define REG_SMCMD 0xb0 |
30 | #define REG_SMADDR 0xb4 | 30 | #define REG_SMADDR 0xb4 |
31 | #define REG_SMDATA 0xb8 | 31 | #define REG_SMDATA 0xb8 |
32 | 32 | ||
33 | #define RESET_FMI 0x01 | 33 | #define RESET_FMI 0x01 |
34 | #define NAND_EN 0x08 | 34 | #define NAND_EN 0x08 |
35 | #define READYBUSY (0x01 << 18) | 35 | #define READYBUSY (0x01 << 18) |
36 | 36 | ||
37 | #define SWRST 0x01 | 37 | #define SWRST 0x01 |
38 | #define PSIZE (0x01 << 3) | 38 | #define PSIZE (0x01 << 3) |
39 | #define DMARWEN (0x03 << 1) | 39 | #define DMARWEN (0x03 << 1) |
40 | #define BUSWID (0x01 << 4) | 40 | #define BUSWID (0x01 << 4) |
41 | #define ECC4EN (0x01 << 5) | 41 | #define ECC4EN (0x01 << 5) |
42 | #define WP (0x01 << 24) | 42 | #define WP (0x01 << 24) |
43 | #define NANDCS (0x01 << 25) | 43 | #define NANDCS (0x01 << 25) |
44 | #define ENDADDR (0x01 << 31) | 44 | #define ENDADDR (0x01 << 31) |
45 | 45 | ||
46 | #define read_data_reg(dev) \ | 46 | #define read_data_reg(dev) \ |
47 | __raw_readl((dev)->reg + REG_SMDATA) | 47 | __raw_readl((dev)->reg + REG_SMDATA) |
48 | 48 | ||
49 | #define write_data_reg(dev, val) \ | 49 | #define write_data_reg(dev, val) \ |
50 | __raw_writel((val), (dev)->reg + REG_SMDATA) | 50 | __raw_writel((val), (dev)->reg + REG_SMDATA) |
51 | 51 | ||
52 | #define write_cmd_reg(dev, val) \ | 52 | #define write_cmd_reg(dev, val) \ |
53 | __raw_writel((val), (dev)->reg + REG_SMCMD) | 53 | __raw_writel((val), (dev)->reg + REG_SMCMD) |
54 | 54 | ||
55 | #define write_addr_reg(dev, val) \ | 55 | #define write_addr_reg(dev, val) \ |
56 | __raw_writel((val), (dev)->reg + REG_SMADDR) | 56 | __raw_writel((val), (dev)->reg + REG_SMADDR) |
57 | 57 | ||
58 | struct nuc900_nand { | 58 | struct nuc900_nand { |
59 | struct mtd_info mtd; | 59 | struct mtd_info mtd; |
60 | struct nand_chip chip; | 60 | struct nand_chip chip; |
61 | void __iomem *reg; | 61 | void __iomem *reg; |
62 | struct clk *clk; | 62 | struct clk *clk; |
63 | spinlock_t lock; | 63 | spinlock_t lock; |
64 | }; | 64 | }; |
65 | 65 | ||
66 | static const struct mtd_partition partitions[] = { | 66 | static const struct mtd_partition partitions[] = { |
67 | { | 67 | { |
68 | .name = "NAND FS 0", | 68 | .name = "NAND FS 0", |
69 | .offset = 0, | 69 | .offset = 0, |
70 | .size = 8 * 1024 * 1024 | 70 | .size = 8 * 1024 * 1024 |
71 | }, | 71 | }, |
72 | { | 72 | { |
73 | .name = "NAND FS 1", | 73 | .name = "NAND FS 1", |
74 | .offset = MTDPART_OFS_APPEND, | 74 | .offset = MTDPART_OFS_APPEND, |
75 | .size = MTDPART_SIZ_FULL | 75 | .size = MTDPART_SIZ_FULL |
76 | } | 76 | } |
77 | }; | 77 | }; |
78 | 78 | ||
79 | static unsigned char nuc900_nand_read_byte(struct mtd_info *mtd) | 79 | static unsigned char nuc900_nand_read_byte(struct mtd_info *mtd) |
80 | { | 80 | { |
81 | unsigned char ret; | 81 | unsigned char ret; |
82 | struct nuc900_nand *nand; | 82 | struct nuc900_nand *nand; |
83 | 83 | ||
84 | nand = container_of(mtd, struct nuc900_nand, mtd); | 84 | nand = container_of(mtd, struct nuc900_nand, mtd); |
85 | 85 | ||
86 | ret = (unsigned char)read_data_reg(nand); | 86 | ret = (unsigned char)read_data_reg(nand); |
87 | 87 | ||
88 | return ret; | 88 | return ret; |
89 | } | 89 | } |
90 | 90 | ||
91 | static void nuc900_nand_read_buf(struct mtd_info *mtd, | 91 | static void nuc900_nand_read_buf(struct mtd_info *mtd, |
92 | unsigned char *buf, int len) | 92 | unsigned char *buf, int len) |
93 | { | 93 | { |
94 | int i; | 94 | int i; |
95 | struct nuc900_nand *nand; | 95 | struct nuc900_nand *nand; |
96 | 96 | ||
97 | nand = container_of(mtd, struct nuc900_nand, mtd); | 97 | nand = container_of(mtd, struct nuc900_nand, mtd); |
98 | 98 | ||
99 | for (i = 0; i < len; i++) | 99 | for (i = 0; i < len; i++) |
100 | buf[i] = (unsigned char)read_data_reg(nand); | 100 | buf[i] = (unsigned char)read_data_reg(nand); |
101 | } | 101 | } |
102 | 102 | ||
103 | static void nuc900_nand_write_buf(struct mtd_info *mtd, | 103 | static void nuc900_nand_write_buf(struct mtd_info *mtd, |
104 | const unsigned char *buf, int len) | 104 | const unsigned char *buf, int len) |
105 | { | 105 | { |
106 | int i; | 106 | int i; |
107 | struct nuc900_nand *nand; | 107 | struct nuc900_nand *nand; |
108 | 108 | ||
109 | nand = container_of(mtd, struct nuc900_nand, mtd); | 109 | nand = container_of(mtd, struct nuc900_nand, mtd); |
110 | 110 | ||
111 | for (i = 0; i < len; i++) | 111 | for (i = 0; i < len; i++) |
112 | write_data_reg(nand, buf[i]); | 112 | write_data_reg(nand, buf[i]); |
113 | } | 113 | } |
114 | 114 | ||
115 | static int nuc900_verify_buf(struct mtd_info *mtd, | 115 | static int nuc900_verify_buf(struct mtd_info *mtd, |
116 | const unsigned char *buf, int len) | 116 | const unsigned char *buf, int len) |
117 | { | 117 | { |
118 | int i; | 118 | int i; |
119 | struct nuc900_nand *nand; | 119 | struct nuc900_nand *nand; |
120 | 120 | ||
121 | nand = container_of(mtd, struct nuc900_nand, mtd); | 121 | nand = container_of(mtd, struct nuc900_nand, mtd); |
122 | 122 | ||
123 | for (i = 0; i < len; i++) { | 123 | for (i = 0; i < len; i++) { |
124 | if (buf[i] != (unsigned char)read_data_reg(nand)) | 124 | if (buf[i] != (unsigned char)read_data_reg(nand)) |
125 | return -EFAULT; | 125 | return -EFAULT; |
126 | } | 126 | } |
127 | 127 | ||
128 | return 0; | 128 | return 0; |
129 | } | 129 | } |
130 | 130 | ||
131 | static int nuc900_check_rb(struct nuc900_nand *nand) | 131 | static int nuc900_check_rb(struct nuc900_nand *nand) |
132 | { | 132 | { |
133 | unsigned int val; | 133 | unsigned int val; |
134 | spin_lock(&nand->lock); | 134 | spin_lock(&nand->lock); |
135 | val = __raw_readl(REG_SMISR); | 135 | val = __raw_readl(REG_SMISR); |
136 | val &= READYBUSY; | 136 | val &= READYBUSY; |
137 | spin_unlock(&nand->lock); | 137 | spin_unlock(&nand->lock); |
138 | 138 | ||
139 | return val; | 139 | return val; |
140 | } | 140 | } |
141 | 141 | ||
142 | static int nuc900_nand_devready(struct mtd_info *mtd) | 142 | static int nuc900_nand_devready(struct mtd_info *mtd) |
143 | { | 143 | { |
144 | struct nuc900_nand *nand; | 144 | struct nuc900_nand *nand; |
145 | int ready; | 145 | int ready; |
146 | 146 | ||
147 | nand = container_of(mtd, struct nuc900_nand, mtd); | 147 | nand = container_of(mtd, struct nuc900_nand, mtd); |
148 | 148 | ||
149 | ready = (nuc900_check_rb(nand)) ? 1 : 0; | 149 | ready = (nuc900_check_rb(nand)) ? 1 : 0; |
150 | return ready; | 150 | return ready; |
151 | } | 151 | } |
152 | 152 | ||
153 | static void nuc900_nand_command_lp(struct mtd_info *mtd, unsigned int command, | 153 | static void nuc900_nand_command_lp(struct mtd_info *mtd, unsigned int command, |
154 | int column, int page_addr) | 154 | int column, int page_addr) |
155 | { | 155 | { |
156 | register struct nand_chip *chip = mtd->priv; | 156 | register struct nand_chip *chip = mtd->priv; |
157 | struct nuc900_nand *nand; | 157 | struct nuc900_nand *nand; |
158 | 158 | ||
159 | nand = container_of(mtd, struct nuc900_nand, mtd); | 159 | nand = container_of(mtd, struct nuc900_nand, mtd); |
160 | 160 | ||
161 | if (command == NAND_CMD_READOOB) { | 161 | if (command == NAND_CMD_READOOB) { |
162 | column += mtd->writesize; | 162 | column += mtd->writesize; |
163 | command = NAND_CMD_READ0; | 163 | command = NAND_CMD_READ0; |
164 | } | 164 | } |
165 | 165 | ||
166 | write_cmd_reg(nand, command & 0xff); | 166 | write_cmd_reg(nand, command & 0xff); |
167 | 167 | ||
168 | if (column != -1 || page_addr != -1) { | 168 | if (column != -1 || page_addr != -1) { |
169 | 169 | ||
170 | if (column != -1) { | 170 | if (column != -1) { |
171 | if (chip->options & NAND_BUSWIDTH_16) | 171 | if (chip->options & NAND_BUSWIDTH_16) |
172 | column >>= 1; | 172 | column >>= 1; |
173 | write_addr_reg(nand, column); | 173 | write_addr_reg(nand, column); |
174 | write_addr_reg(nand, column >> 8 | ENDADDR); | 174 | write_addr_reg(nand, column >> 8 | ENDADDR); |
175 | } | 175 | } |
176 | if (page_addr != -1) { | 176 | if (page_addr != -1) { |
177 | write_addr_reg(nand, page_addr); | 177 | write_addr_reg(nand, page_addr); |
178 | 178 | ||
179 | if (chip->chipsize > (128 << 20)) { | 179 | if (chip->chipsize > (128 << 20)) { |
180 | write_addr_reg(nand, page_addr >> 8); | 180 | write_addr_reg(nand, page_addr >> 8); |
181 | write_addr_reg(nand, page_addr >> 16 | ENDADDR); | 181 | write_addr_reg(nand, page_addr >> 16 | ENDADDR); |
182 | } else { | 182 | } else { |
183 | write_addr_reg(nand, page_addr >> 8 | ENDADDR); | 183 | write_addr_reg(nand, page_addr >> 8 | ENDADDR); |
184 | } | 184 | } |
185 | } | 185 | } |
186 | } | 186 | } |
187 | 187 | ||
188 | switch (command) { | 188 | switch (command) { |
189 | case NAND_CMD_CACHEDPROG: | 189 | case NAND_CMD_CACHEDPROG: |
190 | case NAND_CMD_PAGEPROG: | 190 | case NAND_CMD_PAGEPROG: |
191 | case NAND_CMD_ERASE1: | 191 | case NAND_CMD_ERASE1: |
192 | case NAND_CMD_ERASE2: | 192 | case NAND_CMD_ERASE2: |
193 | case NAND_CMD_SEQIN: | 193 | case NAND_CMD_SEQIN: |
194 | case NAND_CMD_RNDIN: | 194 | case NAND_CMD_RNDIN: |
195 | case NAND_CMD_STATUS: | 195 | case NAND_CMD_STATUS: |
196 | case NAND_CMD_DEPLETE1: | 196 | case NAND_CMD_DEPLETE1: |
197 | return; | 197 | return; |
198 | 198 | ||
199 | case NAND_CMD_STATUS_ERROR: | 199 | case NAND_CMD_STATUS_ERROR: |
200 | case NAND_CMD_STATUS_ERROR0: | 200 | case NAND_CMD_STATUS_ERROR0: |
201 | case NAND_CMD_STATUS_ERROR1: | 201 | case NAND_CMD_STATUS_ERROR1: |
202 | case NAND_CMD_STATUS_ERROR2: | 202 | case NAND_CMD_STATUS_ERROR2: |
203 | case NAND_CMD_STATUS_ERROR3: | 203 | case NAND_CMD_STATUS_ERROR3: |
204 | udelay(chip->chip_delay); | 204 | udelay(chip->chip_delay); |
205 | return; | 205 | return; |
206 | 206 | ||
207 | case NAND_CMD_RESET: | 207 | case NAND_CMD_RESET: |
208 | if (chip->dev_ready) | 208 | if (chip->dev_ready) |
209 | break; | 209 | break; |
210 | udelay(chip->chip_delay); | 210 | udelay(chip->chip_delay); |
211 | 211 | ||
212 | write_cmd_reg(nand, NAND_CMD_STATUS); | 212 | write_cmd_reg(nand, NAND_CMD_STATUS); |
213 | write_cmd_reg(nand, command); | 213 | write_cmd_reg(nand, command); |
214 | 214 | ||
215 | while (!nuc900_check_rb(nand)) | 215 | while (!nuc900_check_rb(nand)) |
216 | ; | 216 | ; |
217 | 217 | ||
218 | return; | 218 | return; |
219 | 219 | ||
220 | case NAND_CMD_RNDOUT: | 220 | case NAND_CMD_RNDOUT: |
221 | write_cmd_reg(nand, NAND_CMD_RNDOUTSTART); | 221 | write_cmd_reg(nand, NAND_CMD_RNDOUTSTART); |
222 | return; | 222 | return; |
223 | 223 | ||
224 | case NAND_CMD_READ0: | 224 | case NAND_CMD_READ0: |
225 | 225 | ||
226 | write_cmd_reg(nand, NAND_CMD_READSTART); | 226 | write_cmd_reg(nand, NAND_CMD_READSTART); |
227 | default: | 227 | default: |
228 | 228 | ||
229 | if (!chip->dev_ready) { | 229 | if (!chip->dev_ready) { |
230 | udelay(chip->chip_delay); | 230 | udelay(chip->chip_delay); |
231 | return; | 231 | return; |
232 | } | 232 | } |
233 | } | 233 | } |
234 | 234 | ||
235 | /* Apply this short delay always to ensure that we do wait tWB in | 235 | /* Apply this short delay always to ensure that we do wait tWB in |
236 | * any case on any machine. */ | 236 | * any case on any machine. */ |
237 | ndelay(100); | 237 | ndelay(100); |
238 | 238 | ||
239 | while (!chip->dev_ready(mtd)) | 239 | while (!chip->dev_ready(mtd)) |
240 | ; | 240 | ; |
241 | } | 241 | } |
242 | 242 | ||
243 | 243 | ||
244 | static void nuc900_nand_enable(struct nuc900_nand *nand) | 244 | static void nuc900_nand_enable(struct nuc900_nand *nand) |
245 | { | 245 | { |
246 | unsigned int val; | 246 | unsigned int val; |
247 | spin_lock(&nand->lock); | 247 | spin_lock(&nand->lock); |
248 | __raw_writel(RESET_FMI, (nand->reg + REG_FMICSR)); | 248 | __raw_writel(RESET_FMI, (nand->reg + REG_FMICSR)); |
249 | 249 | ||
250 | val = __raw_readl(nand->reg + REG_FMICSR); | 250 | val = __raw_readl(nand->reg + REG_FMICSR); |
251 | 251 | ||
252 | if (!(val & NAND_EN)) | 252 | if (!(val & NAND_EN)) |
253 | __raw_writel(val | NAND_EN, REG_FMICSR); | 253 | __raw_writel(val | NAND_EN, REG_FMICSR); |
254 | 254 | ||
255 | val = __raw_readl(nand->reg + REG_SMCSR); | 255 | val = __raw_readl(nand->reg + REG_SMCSR); |
256 | 256 | ||
257 | val &= ~(SWRST|PSIZE|DMARWEN|BUSWID|ECC4EN|NANDCS); | 257 | val &= ~(SWRST|PSIZE|DMARWEN|BUSWID|ECC4EN|NANDCS); |
258 | val |= WP; | 258 | val |= WP; |
259 | 259 | ||
260 | __raw_writel(val, nand->reg + REG_SMCSR); | 260 | __raw_writel(val, nand->reg + REG_SMCSR); |
261 | 261 | ||
262 | spin_unlock(&nand->lock); | 262 | spin_unlock(&nand->lock); |
263 | } | 263 | } |
264 | 264 | ||
265 | static int __devinit nuc900_nand_probe(struct platform_device *pdev) | 265 | static int __devinit nuc900_nand_probe(struct platform_device *pdev) |
266 | { | 266 | { |
267 | struct nuc900_nand *nuc900_nand; | 267 | struct nuc900_nand *nuc900_nand; |
268 | struct nand_chip *chip; | 268 | struct nand_chip *chip; |
269 | int retval; | 269 | int retval; |
270 | struct resource *res; | 270 | struct resource *res; |
271 | 271 | ||
272 | retval = 0; | 272 | retval = 0; |
273 | 273 | ||
274 | nuc900_nand = kzalloc(sizeof(struct nuc900_nand), GFP_KERNEL); | 274 | nuc900_nand = kzalloc(sizeof(struct nuc900_nand), GFP_KERNEL); |
275 | if (!nuc900_nand) | 275 | if (!nuc900_nand) |
276 | return -ENOMEM; | 276 | return -ENOMEM; |
277 | chip = &(nuc900_nand->chip); | 277 | chip = &(nuc900_nand->chip); |
278 | 278 | ||
279 | nuc900_nand->mtd.priv = chip; | 279 | nuc900_nand->mtd.priv = chip; |
280 | nuc900_nand->mtd.owner = THIS_MODULE; | 280 | nuc900_nand->mtd.owner = THIS_MODULE; |
281 | spin_lock_init(&nuc900_nand->lock); | 281 | spin_lock_init(&nuc900_nand->lock); |
282 | 282 | ||
283 | nuc900_nand->clk = clk_get(&pdev->dev, NULL); | 283 | nuc900_nand->clk = clk_get(&pdev->dev, NULL); |
284 | if (IS_ERR(nuc900_nand->clk)) { | 284 | if (IS_ERR(nuc900_nand->clk)) { |
285 | retval = -ENOENT; | 285 | retval = -ENOENT; |
286 | goto fail1; | 286 | goto fail1; |
287 | } | 287 | } |
288 | clk_enable(nuc900_nand->clk); | 288 | clk_enable(nuc900_nand->clk); |
289 | 289 | ||
290 | chip->cmdfunc = nuc900_nand_command_lp; | 290 | chip->cmdfunc = nuc900_nand_command_lp; |
291 | chip->dev_ready = nuc900_nand_devready; | 291 | chip->dev_ready = nuc900_nand_devready; |
292 | chip->read_byte = nuc900_nand_read_byte; | 292 | chip->read_byte = nuc900_nand_read_byte; |
293 | chip->write_buf = nuc900_nand_write_buf; | 293 | chip->write_buf = nuc900_nand_write_buf; |
294 | chip->read_buf = nuc900_nand_read_buf; | 294 | chip->read_buf = nuc900_nand_read_buf; |
295 | chip->verify_buf = nuc900_verify_buf; | 295 | chip->verify_buf = nuc900_verify_buf; |
296 | chip->chip_delay = 50; | 296 | chip->chip_delay = 50; |
297 | chip->options = 0; | 297 | chip->options = 0; |
298 | chip->ecc.mode = NAND_ECC_SOFT; | 298 | chip->ecc.mode = NAND_ECC_SOFT; |
299 | 299 | ||
300 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 300 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
301 | if (!res) { | 301 | if (!res) { |
302 | retval = -ENXIO; | 302 | retval = -ENXIO; |
303 | goto fail1; | 303 | goto fail1; |
304 | } | 304 | } |
305 | 305 | ||
306 | if (!request_mem_region(res->start, resource_size(res), pdev->name)) { | 306 | if (!request_mem_region(res->start, resource_size(res), pdev->name)) { |
307 | retval = -EBUSY; | 307 | retval = -EBUSY; |
308 | goto fail1; | 308 | goto fail1; |
309 | } | 309 | } |
310 | 310 | ||
311 | nuc900_nand->reg = ioremap(res->start, resource_size(res)); | 311 | nuc900_nand->reg = ioremap(res->start, resource_size(res)); |
312 | if (!nuc900_nand->reg) { | 312 | if (!nuc900_nand->reg) { |
313 | retval = -ENOMEM; | 313 | retval = -ENOMEM; |
314 | goto fail2; | 314 | goto fail2; |
315 | } | 315 | } |
316 | 316 | ||
317 | nuc900_nand_enable(nuc900_nand); | 317 | nuc900_nand_enable(nuc900_nand); |
318 | 318 | ||
319 | if (nand_scan(&(nuc900_nand->mtd), 1)) { | 319 | if (nand_scan(&(nuc900_nand->mtd), 1)) { |
320 | retval = -ENXIO; | 320 | retval = -ENXIO; |
321 | goto fail3; | 321 | goto fail3; |
322 | } | 322 | } |
323 | 323 | ||
324 | mtd_device_register(&(nuc900_nand->mtd), partitions, | 324 | mtd_device_register(&(nuc900_nand->mtd), partitions, |
325 | ARRAY_SIZE(partitions)); | 325 | ARRAY_SIZE(partitions)); |
326 | 326 | ||
327 | platform_set_drvdata(pdev, nuc900_nand); | 327 | platform_set_drvdata(pdev, nuc900_nand); |
328 | 328 | ||
329 | return retval; | 329 | return retval; |
330 | 330 | ||
331 | fail3: iounmap(nuc900_nand->reg); | 331 | fail3: iounmap(nuc900_nand->reg); |
332 | fail2: release_mem_region(res->start, resource_size(res)); | 332 | fail2: release_mem_region(res->start, resource_size(res)); |
333 | fail1: kfree(nuc900_nand); | 333 | fail1: kfree(nuc900_nand); |
334 | return retval; | 334 | return retval; |
335 | } | 335 | } |
336 | 336 | ||
337 | static int __devexit nuc900_nand_remove(struct platform_device *pdev) | 337 | static int __devexit nuc900_nand_remove(struct platform_device *pdev) |
338 | { | 338 | { |
339 | struct nuc900_nand *nuc900_nand = platform_get_drvdata(pdev); | 339 | struct nuc900_nand *nuc900_nand = platform_get_drvdata(pdev); |
340 | struct resource *res; | 340 | struct resource *res; |
341 | 341 | ||
342 | nand_release(&nuc900_nand->mtd); | ||
342 | iounmap(nuc900_nand->reg); | 343 | iounmap(nuc900_nand->reg); |
343 | 344 | ||
344 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 345 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
345 | release_mem_region(res->start, resource_size(res)); | 346 | release_mem_region(res->start, resource_size(res)); |
346 | 347 | ||
347 | clk_disable(nuc900_nand->clk); | 348 | clk_disable(nuc900_nand->clk); |
348 | clk_put(nuc900_nand->clk); | 349 | clk_put(nuc900_nand->clk); |
349 | 350 | ||
350 | kfree(nuc900_nand); | 351 | kfree(nuc900_nand); |
351 | 352 | ||
352 | platform_set_drvdata(pdev, NULL); | 353 | platform_set_drvdata(pdev, NULL); |
353 | 354 | ||
354 | return 0; | 355 | return 0; |
355 | } | 356 | } |
356 | 357 | ||
357 | static struct platform_driver nuc900_nand_driver = { | 358 | static struct platform_driver nuc900_nand_driver = { |
358 | .probe = nuc900_nand_probe, | 359 | .probe = nuc900_nand_probe, |
359 | .remove = __devexit_p(nuc900_nand_remove), | 360 | .remove = __devexit_p(nuc900_nand_remove), |
360 | .driver = { | 361 | .driver = { |
361 | .name = "nuc900-fmi", | 362 | .name = "nuc900-fmi", |
362 | .owner = THIS_MODULE, | 363 | .owner = THIS_MODULE, |
363 | }, | 364 | }, |
364 | }; | 365 | }; |
365 | 366 | ||
366 | static int __init nuc900_nand_init(void) | 367 | static int __init nuc900_nand_init(void) |
367 | { | 368 | { |
368 | return platform_driver_register(&nuc900_nand_driver); | 369 | return platform_driver_register(&nuc900_nand_driver); |
369 | } | 370 | } |
370 | 371 | ||
371 | static void __exit nuc900_nand_exit(void) | 372 | static void __exit nuc900_nand_exit(void) |
372 | { | 373 | { |
373 | platform_driver_unregister(&nuc900_nand_driver); | 374 | platform_driver_unregister(&nuc900_nand_driver); |
374 | } | 375 | } |
375 | 376 | ||
376 | module_init(nuc900_nand_init); | 377 | module_init(nuc900_nand_init); |
377 | module_exit(nuc900_nand_exit); | 378 | module_exit(nuc900_nand_exit); |
378 | 379 | ||
379 | MODULE_AUTHOR("Wan ZongShun <mcuos.com@gmail.com>"); | 380 | MODULE_AUTHOR("Wan ZongShun <mcuos.com@gmail.com>"); |
380 | MODULE_DESCRIPTION("w90p910/NUC9xx nand driver!"); | 381 | MODULE_DESCRIPTION("w90p910/NUC9xx nand driver!"); |
381 | MODULE_LICENSE("GPL"); | 382 | MODULE_LICENSE("GPL"); |
382 | MODULE_ALIAS("platform:nuc900-fmi"); | 383 | MODULE_ALIAS("platform:nuc900-fmi"); |
383 | 384 |