Commit 3eb3e72a3f66db140dabd66b553380c19b2035d4
nand/denali: Adding Denali NAND driver support
To add the Denali NAND driver support into U-Boot. This driver is leveraged from Linux with commit ID fdbad98dff8007f2b8bee6698b5d25ebba0471c9. For Denali controller 64 variance, you need to declare macro CONFIG_SYS_NAND_DENALI_64BIT. Signed-off-by: Chin Liang See <clsee@altera.com> Cc: Scott Wood <scottwood@freescale.com> Cc: Masahiro Yamada <yamada.m@jp.panasonic.com> Signed-off-by: Masahiro Yamada <yamada.m@jp.panasonic.com> Reviewed-by: Masahiro Yamada <yamada.m@jp.panasonic.com> Tested-by: Masahiro Yamada <yamada.m@jp.panasonic.com>
Showing 3 changed files with 1673 additions and 0 deletions Side-by-side Diff
... | ... | @@ -42,6 +42,7 @@ |
42 | 42 | obj-$(CONFIG_NAND_ATMEL) += atmel_nand.o |
43 | 43 | obj-$(CONFIG_DRIVER_NAND_BFIN) += bfin_nand.o |
44 | 44 | obj-$(CONFIG_NAND_DAVINCI) += davinci_nand.o |
45 | +obj-$(CONFIG_NAND_DENALI) += denali.o | |
45 | 46 | obj-$(CONFIG_NAND_FSL_ELBC) += fsl_elbc_nand.o |
46 | 47 | obj-$(CONFIG_NAND_FSL_IFC) += fsl_ifc_nand.o |
47 | 48 | obj-$(CONFIG_NAND_FSL_UPM) += fsl_upm.o |
1 | +/* | |
2 | + * Copyright (C) 2014 Panasonic Corporation | |
3 | + * Copyright (C) 2013-2014, Altera Corporation <www.altera.com> | |
4 | + * Copyright (C) 2009-2010, Intel Corporation and its suppliers. | |
5 | + * | |
6 | + * SPDX-License-Identifier: GPL-2.0+ | |
7 | + */ | |
8 | + | |
9 | +#include <common.h> | |
10 | +#include <malloc.h> | |
11 | +#include <nand.h> | |
12 | +#include <asm/errno.h> | |
13 | +#include <asm/io.h> | |
14 | + | |
15 | +#include "denali.h" | |
16 | + | |
17 | +#define NAND_DEFAULT_TIMINGS -1 | |
18 | + | |
19 | +static int onfi_timing_mode = NAND_DEFAULT_TIMINGS; | |
20 | + | |
21 | +/* We define a macro here that combines all interrupts this driver uses into | |
22 | + * a single constant value, for convenience. */ | |
23 | +#define DENALI_IRQ_ALL (INTR_STATUS__DMA_CMD_COMP | \ | |
24 | + INTR_STATUS__ECC_TRANSACTION_DONE | \ | |
25 | + INTR_STATUS__ECC_ERR | \ | |
26 | + INTR_STATUS__PROGRAM_FAIL | \ | |
27 | + INTR_STATUS__LOAD_COMP | \ | |
28 | + INTR_STATUS__PROGRAM_COMP | \ | |
29 | + INTR_STATUS__TIME_OUT | \ | |
30 | + INTR_STATUS__ERASE_FAIL | \ | |
31 | + INTR_STATUS__RST_COMP | \ | |
32 | + INTR_STATUS__ERASE_COMP | \ | |
33 | + INTR_STATUS__ECC_UNCOR_ERR | \ | |
34 | + INTR_STATUS__INT_ACT | \ | |
35 | + INTR_STATUS__LOCKED_BLK) | |
36 | + | |
37 | +/* indicates whether or not the internal value for the flash bank is | |
38 | + * valid or not */ | |
39 | +#define CHIP_SELECT_INVALID -1 | |
40 | + | |
41 | +#define SUPPORT_8BITECC 1 | |
42 | + | |
43 | +/* | |
44 | + * this macro allows us to convert from an MTD structure to our own | |
45 | + * device context (denali) structure. | |
46 | + */ | |
47 | +#define mtd_to_denali(m) (((struct nand_chip *)mtd->priv)->priv) | |
48 | + | |
49 | +/* These constants are defined by the driver to enable common driver | |
50 | + * configuration options. */ | |
51 | +#define SPARE_ACCESS 0x41 | |
52 | +#define MAIN_ACCESS 0x42 | |
53 | +#define MAIN_SPARE_ACCESS 0x43 | |
54 | + | |
55 | +#define DENALI_UNLOCK_START 0x10 | |
56 | +#define DENALI_UNLOCK_END 0x11 | |
57 | +#define DENALI_LOCK 0x21 | |
58 | +#define DENALI_LOCK_TIGHT 0x31 | |
59 | +#define DENALI_BUFFER_LOAD 0x60 | |
60 | +#define DENALI_BUFFER_WRITE 0x62 | |
61 | + | |
62 | +#define DENALI_READ 0 | |
63 | +#define DENALI_WRITE 0x100 | |
64 | + | |
65 | +/* types of device accesses. We can issue commands and get status */ | |
66 | +#define COMMAND_CYCLE 0 | |
67 | +#define ADDR_CYCLE 1 | |
68 | +#define STATUS_CYCLE 2 | |
69 | + | |
70 | +/* this is a helper macro that allows us to | |
71 | + * format the bank into the proper bits for the controller */ | |
72 | +#define BANK(x) ((x) << 24) | |
73 | + | |
74 | +/* Interrupts are cleared by writing a 1 to the appropriate status bit */ | |
75 | +static inline void clear_interrupt(struct denali_nand_info *denali, | |
76 | + uint32_t irq_mask) | |
77 | +{ | |
78 | + uint32_t intr_status_reg; | |
79 | + | |
80 | + intr_status_reg = INTR_STATUS(denali->flash_bank); | |
81 | + | |
82 | + writel(irq_mask, denali->flash_reg + intr_status_reg); | |
83 | +} | |
84 | + | |
85 | +static uint32_t read_interrupt_status(struct denali_nand_info *denali) | |
86 | +{ | |
87 | + uint32_t intr_status_reg; | |
88 | + | |
89 | + intr_status_reg = INTR_STATUS(denali->flash_bank); | |
90 | + | |
91 | + return readl(denali->flash_reg + intr_status_reg); | |
92 | +} | |
93 | + | |
94 | +static void clear_interrupts(struct denali_nand_info *denali) | |
95 | +{ | |
96 | + uint32_t status; | |
97 | + | |
98 | + status = read_interrupt_status(denali); | |
99 | + clear_interrupt(denali, status); | |
100 | + | |
101 | + denali->irq_status = 0; | |
102 | +} | |
103 | + | |
104 | +static void denali_irq_enable(struct denali_nand_info *denali, | |
105 | + uint32_t int_mask) | |
106 | +{ | |
107 | + int i; | |
108 | + | |
109 | + for (i = 0; i < denali->max_banks; ++i) | |
110 | + writel(int_mask, denali->flash_reg + INTR_EN(i)); | |
111 | +} | |
112 | + | |
113 | +static uint32_t wait_for_irq(struct denali_nand_info *denali, uint32_t irq_mask) | |
114 | +{ | |
115 | + unsigned long timeout = 1000000; | |
116 | + uint32_t intr_status; | |
117 | + | |
118 | + do { | |
119 | + intr_status = read_interrupt_status(denali) & DENALI_IRQ_ALL; | |
120 | + if (intr_status & irq_mask) { | |
121 | + denali->irq_status &= ~irq_mask; | |
122 | + /* our interrupt was detected */ | |
123 | + break; | |
124 | + } | |
125 | + udelay(1); | |
126 | + timeout--; | |
127 | + } while (timeout != 0); | |
128 | + | |
129 | + if (timeout == 0) { | |
130 | + /* timeout */ | |
131 | + printf("Denali timeout with interrupt status %08x\n", | |
132 | + read_interrupt_status(denali)); | |
133 | + intr_status = 0; | |
134 | + } | |
135 | + return intr_status; | |
136 | +} | |
137 | + | |
138 | +/* | |
139 | + * Certain operations for the denali NAND controller use an indexed mode to | |
140 | + * read/write data. The operation is performed by writing the address value | |
141 | + * of the command to the device memory followed by the data. This function | |
142 | + * abstracts this common operation. | |
143 | +*/ | |
144 | +static void index_addr(struct denali_nand_info *denali, | |
145 | + uint32_t address, uint32_t data) | |
146 | +{ | |
147 | + writel(address, denali->flash_mem + INDEX_CTRL_REG); | |
148 | + writel(data, denali->flash_mem + INDEX_DATA_REG); | |
149 | +} | |
150 | + | |
151 | +/* Perform an indexed read of the device */ | |
152 | +static void index_addr_read_data(struct denali_nand_info *denali, | |
153 | + uint32_t address, uint32_t *pdata) | |
154 | +{ | |
155 | + writel(address, denali->flash_mem + INDEX_CTRL_REG); | |
156 | + *pdata = readl(denali->flash_mem + INDEX_DATA_REG); | |
157 | +} | |
158 | + | |
159 | +/* We need to buffer some data for some of the NAND core routines. | |
160 | + * The operations manage buffering that data. */ | |
161 | +static void reset_buf(struct denali_nand_info *denali) | |
162 | +{ | |
163 | + denali->buf.head = 0; | |
164 | + denali->buf.tail = 0; | |
165 | +} | |
166 | + | |
167 | +static void write_byte_to_buf(struct denali_nand_info *denali, uint8_t byte) | |
168 | +{ | |
169 | + denali->buf.buf[denali->buf.tail++] = byte; | |
170 | +} | |
171 | + | |
172 | +/* resets a specific device connected to the core */ | |
173 | +static void reset_bank(struct denali_nand_info *denali) | |
174 | +{ | |
175 | + uint32_t irq_status; | |
176 | + uint32_t irq_mask = INTR_STATUS__RST_COMP | | |
177 | + INTR_STATUS__TIME_OUT; | |
178 | + | |
179 | + clear_interrupts(denali); | |
180 | + | |
181 | + writel(1 << denali->flash_bank, denali->flash_reg + DEVICE_RESET); | |
182 | + | |
183 | + irq_status = wait_for_irq(denali, irq_mask); | |
184 | + if (irq_status & INTR_STATUS__TIME_OUT) | |
185 | + debug("reset bank failed.\n"); | |
186 | +} | |
187 | + | |
188 | +/* Reset the flash controller */ | |
189 | +static uint32_t denali_nand_reset(struct denali_nand_info *denali) | |
190 | +{ | |
191 | + uint32_t i; | |
192 | + | |
193 | + for (i = 0; i < denali->max_banks; i++) | |
194 | + writel(INTR_STATUS__RST_COMP | INTR_STATUS__TIME_OUT, | |
195 | + denali->flash_reg + INTR_STATUS(i)); | |
196 | + | |
197 | + for (i = 0; i < denali->max_banks; i++) { | |
198 | + writel(1 << i, denali->flash_reg + DEVICE_RESET); | |
199 | + while (!(readl(denali->flash_reg + INTR_STATUS(i)) & | |
200 | + (INTR_STATUS__RST_COMP | INTR_STATUS__TIME_OUT))) | |
201 | + if (readl(denali->flash_reg + INTR_STATUS(i)) & | |
202 | + INTR_STATUS__TIME_OUT) | |
203 | + debug("NAND Reset operation timed out on bank" | |
204 | + " %d\n", i); | |
205 | + } | |
206 | + | |
207 | + for (i = 0; i < denali->max_banks; i++) | |
208 | + writel(INTR_STATUS__RST_COMP | INTR_STATUS__TIME_OUT, | |
209 | + denali->flash_reg + INTR_STATUS(i)); | |
210 | + | |
211 | + return 0; | |
212 | +} | |
213 | + | |
214 | +/* | |
215 | + * this routine calculates the ONFI timing values for a given mode and | |
216 | + * programs the clocking register accordingly. The mode is determined by | |
217 | + * the get_onfi_nand_para routine. | |
218 | + */ | |
219 | +static void nand_onfi_timing_set(struct denali_nand_info *denali, | |
220 | + uint16_t mode) | |
221 | +{ | |
222 | + uint32_t trea[6] = {40, 30, 25, 20, 20, 16}; | |
223 | + uint32_t trp[6] = {50, 25, 17, 15, 12, 10}; | |
224 | + uint32_t treh[6] = {30, 15, 15, 10, 10, 7}; | |
225 | + uint32_t trc[6] = {100, 50, 35, 30, 25, 20}; | |
226 | + uint32_t trhoh[6] = {0, 15, 15, 15, 15, 15}; | |
227 | + uint32_t trloh[6] = {0, 0, 0, 0, 5, 5}; | |
228 | + uint32_t tcea[6] = {100, 45, 30, 25, 25, 25}; | |
229 | + uint32_t tadl[6] = {200, 100, 100, 100, 70, 70}; | |
230 | + uint32_t trhw[6] = {200, 100, 100, 100, 100, 100}; | |
231 | + uint32_t trhz[6] = {200, 100, 100, 100, 100, 100}; | |
232 | + uint32_t twhr[6] = {120, 80, 80, 60, 60, 60}; | |
233 | + uint32_t tcs[6] = {70, 35, 25, 25, 20, 15}; | |
234 | + | |
235 | + uint32_t tclsrising = 1; | |
236 | + uint32_t data_invalid_rhoh, data_invalid_rloh, data_invalid; | |
237 | + uint32_t dv_window = 0; | |
238 | + uint32_t en_lo, en_hi; | |
239 | + uint32_t acc_clks; | |
240 | + uint32_t addr_2_data, re_2_we, re_2_re, we_2_re, cs_cnt; | |
241 | + | |
242 | + en_lo = DIV_ROUND_UP(trp[mode], CLK_X); | |
243 | + en_hi = DIV_ROUND_UP(treh[mode], CLK_X); | |
244 | + if ((en_hi * CLK_X) < (treh[mode] + 2)) | |
245 | + en_hi++; | |
246 | + | |
247 | + if ((en_lo + en_hi) * CLK_X < trc[mode]) | |
248 | + en_lo += DIV_ROUND_UP((trc[mode] - (en_lo + en_hi) * CLK_X), | |
249 | + CLK_X); | |
250 | + | |
251 | + if ((en_lo + en_hi) < CLK_MULTI) | |
252 | + en_lo += CLK_MULTI - en_lo - en_hi; | |
253 | + | |
254 | + while (dv_window < 8) { | |
255 | + data_invalid_rhoh = en_lo * CLK_X + trhoh[mode]; | |
256 | + | |
257 | + data_invalid_rloh = (en_lo + en_hi) * CLK_X + trloh[mode]; | |
258 | + | |
259 | + data_invalid = | |
260 | + data_invalid_rhoh < | |
261 | + data_invalid_rloh ? data_invalid_rhoh : data_invalid_rloh; | |
262 | + | |
263 | + dv_window = data_invalid - trea[mode]; | |
264 | + | |
265 | + if (dv_window < 8) | |
266 | + en_lo++; | |
267 | + } | |
268 | + | |
269 | + acc_clks = DIV_ROUND_UP(trea[mode], CLK_X); | |
270 | + | |
271 | + while (((acc_clks * CLK_X) - trea[mode]) < 3) | |
272 | + acc_clks++; | |
273 | + | |
274 | + if ((data_invalid - acc_clks * CLK_X) < 2) | |
275 | + debug("%s, Line %d: Warning!\n", __FILE__, __LINE__); | |
276 | + | |
277 | + addr_2_data = DIV_ROUND_UP(tadl[mode], CLK_X); | |
278 | + re_2_we = DIV_ROUND_UP(trhw[mode], CLK_X); | |
279 | + re_2_re = DIV_ROUND_UP(trhz[mode], CLK_X); | |
280 | + we_2_re = DIV_ROUND_UP(twhr[mode], CLK_X); | |
281 | + cs_cnt = DIV_ROUND_UP((tcs[mode] - trp[mode]), CLK_X); | |
282 | + if (!tclsrising) | |
283 | + cs_cnt = DIV_ROUND_UP(tcs[mode], CLK_X); | |
284 | + if (cs_cnt == 0) | |
285 | + cs_cnt = 1; | |
286 | + | |
287 | + if (tcea[mode]) { | |
288 | + while (((cs_cnt * CLK_X) + trea[mode]) < tcea[mode]) | |
289 | + cs_cnt++; | |
290 | + } | |
291 | + | |
292 | + /* Sighting 3462430: Temporary hack for MT29F128G08CJABAWP:B */ | |
293 | + if ((readl(denali->flash_reg + MANUFACTURER_ID) == 0) && | |
294 | + (readl(denali->flash_reg + DEVICE_ID) == 0x88)) | |
295 | + acc_clks = 6; | |
296 | + | |
297 | + writel(acc_clks, denali->flash_reg + ACC_CLKS); | |
298 | + writel(re_2_we, denali->flash_reg + RE_2_WE); | |
299 | + writel(re_2_re, denali->flash_reg + RE_2_RE); | |
300 | + writel(we_2_re, denali->flash_reg + WE_2_RE); | |
301 | + writel(addr_2_data, denali->flash_reg + ADDR_2_DATA); | |
302 | + writel(en_lo, denali->flash_reg + RDWR_EN_LO_CNT); | |
303 | + writel(en_hi, denali->flash_reg + RDWR_EN_HI_CNT); | |
304 | + writel(cs_cnt, denali->flash_reg + CS_SETUP_CNT); | |
305 | +} | |
306 | + | |
307 | +/* queries the NAND device to see what ONFI modes it supports. */ | |
308 | +static uint32_t get_onfi_nand_para(struct denali_nand_info *denali) | |
309 | +{ | |
310 | + int i; | |
311 | + /* | |
312 | + * we needn't to do a reset here because driver has already | |
313 | + * reset all the banks before | |
314 | + */ | |
315 | + if (!(readl(denali->flash_reg + ONFI_TIMING_MODE) & | |
316 | + ONFI_TIMING_MODE__VALUE)) | |
317 | + return -EIO; | |
318 | + | |
319 | + for (i = 5; i > 0; i--) { | |
320 | + if (readl(denali->flash_reg + ONFI_TIMING_MODE) & | |
321 | + (0x01 << i)) | |
322 | + break; | |
323 | + } | |
324 | + | |
325 | + nand_onfi_timing_set(denali, i); | |
326 | + | |
327 | + /* By now, all the ONFI devices we know support the page cache */ | |
328 | + /* rw feature. So here we enable the pipeline_rw_ahead feature */ | |
329 | + return 0; | |
330 | +} | |
331 | + | |
332 | +static void get_samsung_nand_para(struct denali_nand_info *denali, | |
333 | + uint8_t device_id) | |
334 | +{ | |
335 | + if (device_id == 0xd3) { /* Samsung K9WAG08U1A */ | |
336 | + /* Set timing register values according to datasheet */ | |
337 | + writel(5, denali->flash_reg + ACC_CLKS); | |
338 | + writel(20, denali->flash_reg + RE_2_WE); | |
339 | + writel(12, denali->flash_reg + WE_2_RE); | |
340 | + writel(14, denali->flash_reg + ADDR_2_DATA); | |
341 | + writel(3, denali->flash_reg + RDWR_EN_LO_CNT); | |
342 | + writel(2, denali->flash_reg + RDWR_EN_HI_CNT); | |
343 | + writel(2, denali->flash_reg + CS_SETUP_CNT); | |
344 | + } | |
345 | +} | |
346 | + | |
347 | +static void get_toshiba_nand_para(struct denali_nand_info *denali) | |
348 | +{ | |
349 | + uint32_t tmp; | |
350 | + | |
351 | + /* Workaround to fix a controller bug which reports a wrong */ | |
352 | + /* spare area size for some kind of Toshiba NAND device */ | |
353 | + if ((readl(denali->flash_reg + DEVICE_MAIN_AREA_SIZE) == 4096) && | |
354 | + (readl(denali->flash_reg + DEVICE_SPARE_AREA_SIZE) == 64)) { | |
355 | + writel(216, denali->flash_reg + DEVICE_SPARE_AREA_SIZE); | |
356 | + tmp = readl(denali->flash_reg + DEVICES_CONNECTED) * | |
357 | + readl(denali->flash_reg + DEVICE_SPARE_AREA_SIZE); | |
358 | + writel(tmp, denali->flash_reg + LOGICAL_PAGE_SPARE_SIZE); | |
359 | + } | |
360 | +} | |
361 | + | |
362 | +static void get_hynix_nand_para(struct denali_nand_info *denali, | |
363 | + uint8_t device_id) | |
364 | +{ | |
365 | + uint32_t main_size, spare_size; | |
366 | + | |
367 | + switch (device_id) { | |
368 | + case 0xD5: /* Hynix H27UAG8T2A, H27UBG8U5A or H27UCG8VFA */ | |
369 | + case 0xD7: /* Hynix H27UDG8VEM, H27UCG8UDM or H27UCG8V5A */ | |
370 | + writel(128, denali->flash_reg + PAGES_PER_BLOCK); | |
371 | + writel(4096, denali->flash_reg + DEVICE_MAIN_AREA_SIZE); | |
372 | + writel(224, denali->flash_reg + DEVICE_SPARE_AREA_SIZE); | |
373 | + main_size = 4096 * | |
374 | + readl(denali->flash_reg + DEVICES_CONNECTED); | |
375 | + spare_size = 224 * | |
376 | + readl(denali->flash_reg + DEVICES_CONNECTED); | |
377 | + writel(main_size, denali->flash_reg + LOGICAL_PAGE_DATA_SIZE); | |
378 | + writel(spare_size, denali->flash_reg + LOGICAL_PAGE_SPARE_SIZE); | |
379 | + writel(0, denali->flash_reg + DEVICE_WIDTH); | |
380 | + break; | |
381 | + default: | |
382 | + debug("Spectra: Unknown Hynix NAND (Device ID: 0x%x)." | |
383 | + "Will use default parameter values instead.\n", | |
384 | + device_id); | |
385 | + } | |
386 | +} | |
387 | + | |
388 | +/* | |
389 | + * determines how many NAND chips are connected to the controller. Note for | |
390 | + * Intel CE4100 devices we don't support more than one device. | |
391 | + */ | |
392 | +static void find_valid_banks(struct denali_nand_info *denali) | |
393 | +{ | |
394 | + uint32_t id[denali->max_banks]; | |
395 | + int i; | |
396 | + | |
397 | + denali->total_used_banks = 1; | |
398 | + for (i = 0; i < denali->max_banks; i++) { | |
399 | + index_addr(denali, (uint32_t)(MODE_11 | (i << 24) | 0), 0x90); | |
400 | + index_addr(denali, (uint32_t)(MODE_11 | (i << 24) | 1), 0); | |
401 | + index_addr_read_data(denali, | |
402 | + (uint32_t)(MODE_11 | (i << 24) | 2), | |
403 | + &id[i]); | |
404 | + | |
405 | + if (i == 0) { | |
406 | + if (!(id[i] & 0x0ff)) | |
407 | + break; | |
408 | + } else { | |
409 | + if ((id[i] & 0x0ff) == (id[0] & 0x0ff)) | |
410 | + denali->total_used_banks++; | |
411 | + else | |
412 | + break; | |
413 | + } | |
414 | + } | |
415 | +} | |
416 | + | |
417 | +/* | |
418 | + * Use the configuration feature register to determine the maximum number of | |
419 | + * banks that the hardware supports. | |
420 | + */ | |
421 | +static void detect_max_banks(struct denali_nand_info *denali) | |
422 | +{ | |
423 | + uint32_t features = readl(denali->flash_reg + FEATURES); | |
424 | + denali->max_banks = 2 << (features & FEATURES__N_BANKS); | |
425 | +} | |
426 | + | |
427 | +static void detect_partition_feature(struct denali_nand_info *denali) | |
428 | +{ | |
429 | + /* | |
430 | + * For MRST platform, denali->fwblks represent the | |
431 | + * number of blocks firmware is taken, | |
432 | + * FW is in protect partition and MTD driver has no | |
433 | + * permission to access it. So let driver know how many | |
434 | + * blocks it can't touch. | |
435 | + */ | |
436 | + if (readl(denali->flash_reg + FEATURES) & FEATURES__PARTITION) { | |
437 | + if ((readl(denali->flash_reg + PERM_SRC_ID(1)) & | |
438 | + PERM_SRC_ID__SRCID) == SPECTRA_PARTITION_ID) { | |
439 | + denali->fwblks = | |
440 | + ((readl(denali->flash_reg + MIN_MAX_BANK(1)) & | |
441 | + MIN_MAX_BANK__MIN_VALUE) * | |
442 | + denali->blksperchip) | |
443 | + + | |
444 | + (readl(denali->flash_reg + MIN_BLK_ADDR(1)) & | |
445 | + MIN_BLK_ADDR__VALUE); | |
446 | + } else { | |
447 | + denali->fwblks = SPECTRA_START_BLOCK; | |
448 | + } | |
449 | + } else { | |
450 | + denali->fwblks = SPECTRA_START_BLOCK; | |
451 | + } | |
452 | +} | |
453 | + | |
454 | +static uint32_t denali_nand_timing_set(struct denali_nand_info *denali) | |
455 | +{ | |
456 | + uint32_t id_bytes[5], addr; | |
457 | + uint8_t i, maf_id, device_id; | |
458 | + | |
459 | + /* Use read id method to get device ID and other | |
460 | + * params. For some NAND chips, controller can't | |
461 | + * report the correct device ID by reading from | |
462 | + * DEVICE_ID register | |
463 | + * */ | |
464 | + addr = (uint32_t)MODE_11 | BANK(denali->flash_bank); | |
465 | + index_addr(denali, (uint32_t)addr | 0, 0x90); | |
466 | + index_addr(denali, (uint32_t)addr | 1, 0); | |
467 | + for (i = 0; i < 5; i++) | |
468 | + index_addr_read_data(denali, addr | 2, &id_bytes[i]); | |
469 | + maf_id = id_bytes[0]; | |
470 | + device_id = id_bytes[1]; | |
471 | + | |
472 | + if (readl(denali->flash_reg + ONFI_DEVICE_NO_OF_LUNS) & | |
473 | + ONFI_DEVICE_NO_OF_LUNS__ONFI_DEVICE) { /* ONFI 1.0 NAND */ | |
474 | + if (get_onfi_nand_para(denali)) | |
475 | + return -EIO; | |
476 | + } else if (maf_id == 0xEC) { /* Samsung NAND */ | |
477 | + get_samsung_nand_para(denali, device_id); | |
478 | + } else if (maf_id == 0x98) { /* Toshiba NAND */ | |
479 | + get_toshiba_nand_para(denali); | |
480 | + } else if (maf_id == 0xAD) { /* Hynix NAND */ | |
481 | + get_hynix_nand_para(denali, device_id); | |
482 | + } | |
483 | + | |
484 | + find_valid_banks(denali); | |
485 | + | |
486 | + detect_partition_feature(denali); | |
487 | + | |
488 | + /* If the user specified to override the default timings | |
489 | + * with a specific ONFI mode, we apply those changes here. | |
490 | + */ | |
491 | + if (onfi_timing_mode != NAND_DEFAULT_TIMINGS) | |
492 | + nand_onfi_timing_set(denali, onfi_timing_mode); | |
493 | + | |
494 | + return 0; | |
495 | +} | |
496 | + | |
497 | +/* validation function to verify that the controlling software is making | |
498 | + * a valid request | |
499 | + */ | |
500 | +static inline bool is_flash_bank_valid(int flash_bank) | |
501 | +{ | |
502 | + return flash_bank >= 0 && flash_bank < 4; | |
503 | +} | |
504 | + | |
505 | +static void denali_irq_init(struct denali_nand_info *denali) | |
506 | +{ | |
507 | + uint32_t int_mask = 0; | |
508 | + int i; | |
509 | + | |
510 | + /* Disable global interrupts */ | |
511 | + writel(0, denali->flash_reg + GLOBAL_INT_ENABLE); | |
512 | + | |
513 | + int_mask = DENALI_IRQ_ALL; | |
514 | + | |
515 | + /* Clear all status bits */ | |
516 | + for (i = 0; i < denali->max_banks; ++i) | |
517 | + writel(0xFFFF, denali->flash_reg + INTR_STATUS(i)); | |
518 | + | |
519 | + denali_irq_enable(denali, int_mask); | |
520 | +} | |
521 | + | |
522 | +/* This helper function setups the registers for ECC and whether or not | |
523 | + * the spare area will be transferred. */ | |
524 | +static void setup_ecc_for_xfer(struct denali_nand_info *denali, bool ecc_en, | |
525 | + bool transfer_spare) | |
526 | +{ | |
527 | + int ecc_en_flag = 0, transfer_spare_flag = 0; | |
528 | + | |
529 | + /* set ECC, transfer spare bits if needed */ | |
530 | + ecc_en_flag = ecc_en ? ECC_ENABLE__FLAG : 0; | |
531 | + transfer_spare_flag = transfer_spare ? TRANSFER_SPARE_REG__FLAG : 0; | |
532 | + | |
533 | + /* Enable spare area/ECC per user's request. */ | |
534 | + writel(ecc_en_flag, denali->flash_reg + ECC_ENABLE); | |
535 | + /* applicable for MAP01 only */ | |
536 | + writel(transfer_spare_flag, denali->flash_reg + TRANSFER_SPARE_REG); | |
537 | +} | |
538 | + | |
539 | +/* sends a pipeline command operation to the controller. See the Denali NAND | |
540 | + * controller's user guide for more information (section 4.2.3.6). | |
541 | + */ | |
542 | +static int denali_send_pipeline_cmd(struct denali_nand_info *denali, | |
543 | + bool ecc_en, bool transfer_spare, | |
544 | + int access_type, int op) | |
545 | +{ | |
546 | + uint32_t addr, cmd, irq_status; | |
547 | + static uint32_t page_count = 1; | |
548 | + | |
549 | + setup_ecc_for_xfer(denali, ecc_en, transfer_spare); | |
550 | + | |
551 | + /* clear interrupts */ | |
552 | + clear_interrupts(denali); | |
553 | + | |
554 | + addr = BANK(denali->flash_bank) | denali->page; | |
555 | + | |
556 | + /* setup the acccess type */ | |
557 | + cmd = MODE_10 | addr; | |
558 | + index_addr(denali, cmd, access_type); | |
559 | + | |
560 | + /* setup the pipeline command */ | |
561 | + index_addr(denali, cmd, 0x2000 | op | page_count); | |
562 | + | |
563 | + cmd = MODE_01 | addr; | |
564 | + writel(cmd, denali->flash_mem + INDEX_CTRL_REG); | |
565 | + | |
566 | + if (op == DENALI_READ) { | |
567 | + /* wait for command to be accepted */ | |
568 | + irq_status = wait_for_irq(denali, INTR_STATUS__LOAD_COMP); | |
569 | + | |
570 | + if (irq_status == 0) | |
571 | + return -EIO; | |
572 | + } | |
573 | + | |
574 | + return 0; | |
575 | +} | |
576 | + | |
577 | +/* helper function that simply writes a buffer to the flash */ | |
578 | +static int write_data_to_flash_mem(struct denali_nand_info *denali, | |
579 | + const uint8_t *buf, int len) | |
580 | +{ | |
581 | + uint32_t i = 0, *buf32; | |
582 | + | |
583 | + /* verify that the len is a multiple of 4. see comment in | |
584 | + * read_data_from_flash_mem() */ | |
585 | + BUG_ON((len % 4) != 0); | |
586 | + | |
587 | + /* write the data to the flash memory */ | |
588 | + buf32 = (uint32_t *)buf; | |
589 | + for (i = 0; i < len / 4; i++) | |
590 | + writel(*buf32++, denali->flash_mem + INDEX_DATA_REG); | |
591 | + return i * 4; /* intent is to return the number of bytes read */ | |
592 | +} | |
593 | + | |
594 | +/* helper function that simply reads a buffer from the flash */ | |
595 | +static int read_data_from_flash_mem(struct denali_nand_info *denali, | |
596 | + uint8_t *buf, int len) | |
597 | +{ | |
598 | + uint32_t i, *buf32; | |
599 | + | |
600 | + /* | |
601 | + * we assume that len will be a multiple of 4, if not | |
602 | + * it would be nice to know about it ASAP rather than | |
603 | + * have random failures... | |
604 | + * This assumption is based on the fact that this | |
605 | + * function is designed to be used to read flash pages, | |
606 | + * which are typically multiples of 4... | |
607 | + */ | |
608 | + | |
609 | + BUG_ON((len % 4) != 0); | |
610 | + | |
611 | + /* transfer the data from the flash */ | |
612 | + buf32 = (uint32_t *)buf; | |
613 | + for (i = 0; i < len / 4; i++) | |
614 | + *buf32++ = readl(denali->flash_mem + INDEX_DATA_REG); | |
615 | + | |
616 | + return i * 4; /* intent is to return the number of bytes read */ | |
617 | +} | |
618 | + | |
619 | +static void denali_mode_main_access(struct denali_nand_info *denali) | |
620 | +{ | |
621 | + uint32_t addr, cmd; | |
622 | + | |
623 | + addr = BANK(denali->flash_bank) | denali->page; | |
624 | + cmd = MODE_10 | addr; | |
625 | + index_addr(denali, cmd, MAIN_ACCESS); | |
626 | +} | |
627 | + | |
628 | +static void denali_mode_main_spare_access(struct denali_nand_info *denali) | |
629 | +{ | |
630 | + uint32_t addr, cmd; | |
631 | + | |
632 | + addr = BANK(denali->flash_bank) | denali->page; | |
633 | + cmd = MODE_10 | addr; | |
634 | + index_addr(denali, cmd, MAIN_SPARE_ACCESS); | |
635 | +} | |
636 | + | |
637 | +/* writes OOB data to the device */ | |
638 | +static int write_oob_data(struct mtd_info *mtd, uint8_t *buf, int page) | |
639 | +{ | |
640 | + struct denali_nand_info *denali = mtd_to_denali(mtd); | |
641 | + uint32_t irq_status; | |
642 | + uint32_t irq_mask = INTR_STATUS__PROGRAM_COMP | | |
643 | + INTR_STATUS__PROGRAM_FAIL; | |
644 | + int status = 0; | |
645 | + | |
646 | + denali->page = page; | |
647 | + | |
648 | + if (denali_send_pipeline_cmd(denali, false, true, SPARE_ACCESS, | |
649 | + DENALI_WRITE) == 0) { | |
650 | + write_data_to_flash_mem(denali, buf, mtd->oobsize); | |
651 | + | |
652 | + /* wait for operation to complete */ | |
653 | + irq_status = wait_for_irq(denali, irq_mask); | |
654 | + | |
655 | + if (irq_status == 0) { | |
656 | + dev_err(denali->dev, "OOB write failed\n"); | |
657 | + status = -EIO; | |
658 | + } | |
659 | + } else { | |
660 | + printf("unable to send pipeline command\n"); | |
661 | + status = -EIO; | |
662 | + } | |
663 | + return status; | |
664 | +} | |
665 | + | |
666 | +/* reads OOB data from the device */ | |
667 | +static void read_oob_data(struct mtd_info *mtd, uint8_t *buf, int page) | |
668 | +{ | |
669 | + struct denali_nand_info *denali = mtd_to_denali(mtd); | |
670 | + uint32_t irq_mask = INTR_STATUS__LOAD_COMP, | |
671 | + irq_status = 0, addr = 0x0, cmd = 0x0; | |
672 | + | |
673 | + denali->page = page; | |
674 | + | |
675 | + if (denali_send_pipeline_cmd(denali, false, true, SPARE_ACCESS, | |
676 | + DENALI_READ) == 0) { | |
677 | + read_data_from_flash_mem(denali, buf, mtd->oobsize); | |
678 | + | |
679 | + /* wait for command to be accepted | |
680 | + * can always use status0 bit as the mask is identical for each | |
681 | + * bank. */ | |
682 | + irq_status = wait_for_irq(denali, irq_mask); | |
683 | + | |
684 | + if (irq_status == 0) | |
685 | + printf("page on OOB timeout %d\n", denali->page); | |
686 | + | |
687 | + /* We set the device back to MAIN_ACCESS here as I observed | |
688 | + * instability with the controller if you do a block erase | |
689 | + * and the last transaction was a SPARE_ACCESS. Block erase | |
690 | + * is reliable (according to the MTD test infrastructure) | |
691 | + * if you are in MAIN_ACCESS. | |
692 | + */ | |
693 | + addr = BANK(denali->flash_bank) | denali->page; | |
694 | + cmd = MODE_10 | addr; | |
695 | + index_addr(denali, cmd, MAIN_ACCESS); | |
696 | + } | |
697 | +} | |
698 | + | |
699 | +/* this function examines buffers to see if they contain data that | |
700 | + * indicate that the buffer is part of an erased region of flash. | |
701 | + */ | |
702 | +static bool is_erased(uint8_t *buf, int len) | |
703 | +{ | |
704 | + int i = 0; | |
705 | + for (i = 0; i < len; i++) | |
706 | + if (buf[i] != 0xFF) | |
707 | + return false; | |
708 | + return true; | |
709 | +} | |
710 | + | |
711 | +/* programs the controller to either enable/disable DMA transfers */ | |
712 | +static void denali_enable_dma(struct denali_nand_info *denali, bool en) | |
713 | +{ | |
714 | + uint32_t reg_val = 0x0; | |
715 | + | |
716 | + if (en) | |
717 | + reg_val = DMA_ENABLE__FLAG; | |
718 | + | |
719 | + writel(reg_val, denali->flash_reg + DMA_ENABLE); | |
720 | + readl(denali->flash_reg + DMA_ENABLE); | |
721 | +} | |
722 | + | |
723 | +/* setups the HW to perform the data DMA */ | |
724 | +static void denali_setup_dma(struct denali_nand_info *denali, int op) | |
725 | +{ | |
726 | + uint32_t mode; | |
727 | + const int page_count = 1; | |
728 | + uint32_t addr = (uint32_t)denali->buf.dma_buf; | |
729 | + | |
730 | + flush_dcache_range(addr, addr + sizeof(denali->buf.dma_buf)); | |
731 | + | |
732 | +/* For Denali controller that is 64 bit bus IP core */ | |
733 | +#ifdef CONFIG_SYS_NAND_DENALI_64BIT | |
734 | + mode = MODE_10 | BANK(denali->flash_bank) | denali->page; | |
735 | + | |
736 | + /* DMA is a three step process */ | |
737 | + | |
738 | + /* 1. setup transfer type, interrupt when complete, | |
739 | + burst len = 64 bytes, the number of pages */ | |
740 | + index_addr(denali, mode, 0x01002000 | (64 << 16) | op | page_count); | |
741 | + | |
742 | + /* 2. set memory low address bits 31:0 */ | |
743 | + index_addr(denali, mode, addr); | |
744 | + | |
745 | + /* 3. set memory high address bits 64:32 */ | |
746 | + index_addr(denali, mode, 0); | |
747 | +#else | |
748 | + mode = MODE_10 | BANK(denali->flash_bank); | |
749 | + | |
750 | + /* DMA is a four step process */ | |
751 | + | |
752 | + /* 1. setup transfer type and # of pages */ | |
753 | + index_addr(denali, mode | denali->page, 0x2000 | op | page_count); | |
754 | + | |
755 | + /* 2. set memory high address bits 23:8 */ | |
756 | + index_addr(denali, mode | ((uint32_t)(addr >> 16) << 8), 0x2200); | |
757 | + | |
758 | + /* 3. set memory low address bits 23:8 */ | |
759 | + index_addr(denali, mode | ((uint32_t)addr << 8), 0x2300); | |
760 | + | |
761 | + /* 4. interrupt when complete, burst len = 64 bytes*/ | |
762 | + index_addr(denali, mode | 0x14000, 0x2400); | |
763 | +#endif | |
764 | +} | |
765 | + | |
766 | +/* Common DMA function */ | |
767 | +static uint32_t denali_dma_configuration(struct denali_nand_info *denali, | |
768 | + uint32_t ops, bool raw_xfer, | |
769 | + uint32_t irq_mask, int oob_required) | |
770 | +{ | |
771 | + uint32_t irq_status = 0; | |
772 | + /* setup_ecc_for_xfer(bool ecc_en, bool transfer_spare) */ | |
773 | + setup_ecc_for_xfer(denali, !raw_xfer, oob_required); | |
774 | + | |
775 | + /* clear any previous interrupt flags */ | |
776 | + clear_interrupts(denali); | |
777 | + | |
778 | + /* enable the DMA */ | |
779 | + denali_enable_dma(denali, true); | |
780 | + | |
781 | + /* setup the DMA */ | |
782 | + denali_setup_dma(denali, ops); | |
783 | + | |
784 | + /* wait for operation to complete */ | |
785 | + irq_status = wait_for_irq(denali, irq_mask); | |
786 | + | |
787 | + /* if ECC fault happen, seems we need delay before turning off DMA. | |
788 | + * If not, the controller will go into non responsive condition */ | |
789 | + if (irq_status & INTR_STATUS__ECC_UNCOR_ERR) | |
790 | + udelay(100); | |
791 | + | |
792 | + /* disable the DMA */ | |
793 | + denali_enable_dma(denali, false); | |
794 | + | |
795 | + return irq_status; | |
796 | +} | |
797 | + | |
798 | +static int write_page(struct mtd_info *mtd, struct nand_chip *chip, | |
799 | + const uint8_t *buf, bool raw_xfer, int oob_required) | |
800 | +{ | |
801 | + struct denali_nand_info *denali = mtd_to_denali(mtd); | |
802 | + | |
803 | + uint32_t irq_status = 0; | |
804 | + uint32_t irq_mask = INTR_STATUS__DMA_CMD_COMP; | |
805 | + | |
806 | + denali->status = 0; | |
807 | + | |
808 | + /* copy buffer into DMA buffer */ | |
809 | + memcpy(denali->buf.dma_buf, buf, mtd->writesize); | |
810 | + | |
811 | + /* need extra memcpy for raw transfer */ | |
812 | + if (raw_xfer) | |
813 | + memcpy(denali->buf.dma_buf + mtd->writesize, | |
814 | + chip->oob_poi, mtd->oobsize); | |
815 | + | |
816 | + /* setting up DMA */ | |
817 | + irq_status = denali_dma_configuration(denali, DENALI_WRITE, raw_xfer, | |
818 | + irq_mask, oob_required); | |
819 | + | |
820 | + /* if timeout happen, error out */ | |
821 | + if (!(irq_status & INTR_STATUS__DMA_CMD_COMP)) { | |
822 | + debug("DMA timeout for denali write_page\n"); | |
823 | + denali->status = NAND_STATUS_FAIL; | |
824 | + return -EIO; | |
825 | + } | |
826 | + | |
827 | + if (irq_status & INTR_STATUS__LOCKED_BLK) { | |
828 | + debug("Failed as write to locked block\n"); | |
829 | + denali->status = NAND_STATUS_FAIL; | |
830 | + return -EIO; | |
831 | + } | |
832 | + return 0; | |
833 | +} | |
834 | + | |
835 | +/* NAND core entry points */ | |
836 | + | |
837 | +/* | |
838 | + * this is the callback that the NAND core calls to write a page. Since | |
839 | + * writing a page with ECC or without is similar, all the work is done | |
840 | + * by write_page above. | |
841 | + */ | |
842 | +static int denali_write_page(struct mtd_info *mtd, struct nand_chip *chip, | |
843 | + const uint8_t *buf, int oob_required) | |
844 | +{ | |
845 | + struct denali_nand_info *denali = mtd_to_denali(mtd); | |
846 | + | |
847 | + /* | |
848 | + * for regular page writes, we let HW handle all the ECC | |
849 | + * data written to the device. | |
850 | + */ | |
851 | + if (oob_required) | |
852 | + /* switch to main + spare access */ | |
853 | + denali_mode_main_spare_access(denali); | |
854 | + else | |
855 | + /* switch to main access only */ | |
856 | + denali_mode_main_access(denali); | |
857 | + | |
858 | + return write_page(mtd, chip, buf, false, oob_required); | |
859 | +} | |
860 | + | |
861 | +/* | |
862 | + * This is the callback that the NAND core calls to write a page without ECC. | |
863 | + * raw access is similar to ECC page writes, so all the work is done in the | |
864 | + * write_page() function above. | |
865 | + */ | |
866 | +static int denali_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip, | |
867 | + const uint8_t *buf, int oob_required) | |
868 | +{ | |
869 | + struct denali_nand_info *denali = mtd_to_denali(mtd); | |
870 | + | |
871 | + /* | |
872 | + * for raw page writes, we want to disable ECC and simply write | |
873 | + * whatever data is in the buffer. | |
874 | + */ | |
875 | + | |
876 | + if (oob_required) | |
877 | + /* switch to main + spare access */ | |
878 | + denali_mode_main_spare_access(denali); | |
879 | + else | |
880 | + /* switch to main access only */ | |
881 | + denali_mode_main_access(denali); | |
882 | + | |
883 | + return write_page(mtd, chip, buf, true, oob_required); | |
884 | +} | |
885 | + | |
886 | +static int denali_write_oob(struct mtd_info *mtd, struct nand_chip *chip, | |
887 | + int page) | |
888 | +{ | |
889 | + return write_oob_data(mtd, chip->oob_poi, page); | |
890 | +} | |
891 | + | |
892 | +/* raw include ECC value and all the spare area */ | |
893 | +static int denali_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip, | |
894 | + uint8_t *buf, int oob_required, int page) | |
895 | +{ | |
896 | + struct denali_nand_info *denali = mtd_to_denali(mtd); | |
897 | + | |
898 | + uint32_t irq_status, irq_mask = INTR_STATUS__DMA_CMD_COMP; | |
899 | + | |
900 | + if (denali->page != page) { | |
901 | + debug("Missing NAND_CMD_READ0 command\n"); | |
902 | + return -EIO; | |
903 | + } | |
904 | + | |
905 | + if (oob_required) | |
906 | + /* switch to main + spare access */ | |
907 | + denali_mode_main_spare_access(denali); | |
908 | + else | |
909 | + /* switch to main access only */ | |
910 | + denali_mode_main_access(denali); | |
911 | + | |
912 | + /* setting up the DMA where ecc_enable is false */ | |
913 | + irq_status = denali_dma_configuration(denali, DENALI_READ, true, | |
914 | + irq_mask, oob_required); | |
915 | + | |
916 | + /* if timeout happen, error out */ | |
917 | + if (!(irq_status & INTR_STATUS__DMA_CMD_COMP)) { | |
918 | + debug("DMA timeout for denali_read_page_raw\n"); | |
919 | + return -EIO; | |
920 | + } | |
921 | + | |
922 | + /* splitting the content to destination buffer holder */ | |
923 | + memcpy(chip->oob_poi, (denali->buf.dma_buf + mtd->writesize), | |
924 | + mtd->oobsize); | |
925 | + memcpy(buf, denali->buf.dma_buf, mtd->writesize); | |
926 | + | |
927 | + return 0; | |
928 | +} | |
929 | + | |
930 | +static int denali_read_page(struct mtd_info *mtd, struct nand_chip *chip, | |
931 | + uint8_t *buf, int oob_required, int page) | |
932 | +{ | |
933 | + struct denali_nand_info *denali = mtd_to_denali(mtd); | |
934 | + uint32_t irq_status, irq_mask = INTR_STATUS__DMA_CMD_COMP; | |
935 | + | |
936 | + if (denali->page != page) { | |
937 | + debug("Missing NAND_CMD_READ0 command\n"); | |
938 | + return -EIO; | |
939 | + } | |
940 | + | |
941 | + if (oob_required) | |
942 | + /* switch to main + spare access */ | |
943 | + denali_mode_main_spare_access(denali); | |
944 | + else | |
945 | + /* switch to main access only */ | |
946 | + denali_mode_main_access(denali); | |
947 | + | |
948 | + /* setting up the DMA where ecc_enable is true */ | |
949 | + irq_status = denali_dma_configuration(denali, DENALI_READ, false, | |
950 | + irq_mask, oob_required); | |
951 | + | |
952 | + memcpy(buf, denali->buf.dma_buf, mtd->writesize); | |
953 | + | |
954 | + /* check whether any ECC error */ | |
955 | + if (irq_status & INTR_STATUS__ECC_UNCOR_ERR) { | |
956 | + /* is the ECC cause by erase page, check using read_page_raw */ | |
957 | + debug(" Uncorrected ECC detected\n"); | |
958 | + denali_read_page_raw(mtd, chip, buf, oob_required, | |
959 | + denali->page); | |
960 | + | |
961 | + if (is_erased(buf, mtd->writesize) == true && | |
962 | + is_erased(chip->oob_poi, mtd->oobsize) == true) { | |
963 | + debug(" ECC error cause by erased block\n"); | |
964 | + /* false alarm, return the 0xFF */ | |
965 | + } else { | |
966 | + return -EIO; | |
967 | + } | |
968 | + } | |
969 | + memcpy(buf, denali->buf.dma_buf, mtd->writesize); | |
970 | + return 0; | |
971 | +} | |
972 | + | |
973 | +static int denali_read_oob(struct mtd_info *mtd, struct nand_chip *chip, | |
974 | + int page) | |
975 | +{ | |
976 | + read_oob_data(mtd, chip->oob_poi, page); | |
977 | + | |
978 | + return 0; | |
979 | +} | |
980 | + | |
981 | +static uint8_t denali_read_byte(struct mtd_info *mtd) | |
982 | +{ | |
983 | + struct denali_nand_info *denali = mtd_to_denali(mtd); | |
984 | + uint32_t addr, result; | |
985 | + | |
986 | + addr = (uint32_t)MODE_11 | BANK(denali->flash_bank); | |
987 | + index_addr_read_data(denali, addr | 2, &result); | |
988 | + return (uint8_t)result & 0xFF; | |
989 | +} | |
990 | + | |
991 | +static void denali_read_buf(struct mtd_info *mtd, uint8_t *buf, int len) | |
992 | +{ | |
993 | + struct denali_nand_info *denali = mtd_to_denali(mtd); | |
994 | + uint32_t i, addr, result; | |
995 | + | |
996 | + /* delay for tR (data transfer from Flash array to data register) */ | |
997 | + udelay(25); | |
998 | + | |
999 | + /* ensure device completed else additional delay and polling */ | |
1000 | + wait_for_irq(denali, INTR_STATUS__INT_ACT); | |
1001 | + | |
1002 | + addr = (uint32_t)MODE_11 | BANK(denali->flash_bank); | |
1003 | + for (i = 0; i < len; i++) { | |
1004 | + index_addr_read_data(denali, (uint32_t)addr | 2, &result); | |
1005 | + write_byte_to_buf(denali, result); | |
1006 | + } | |
1007 | + memcpy(buf, denali->buf.buf, len); | |
1008 | +} | |
1009 | + | |
1010 | +static void denali_select_chip(struct mtd_info *mtd, int chip) | |
1011 | +{ | |
1012 | + struct denali_nand_info *denali = mtd_to_denali(mtd); | |
1013 | + | |
1014 | + denali->flash_bank = chip; | |
1015 | +} | |
1016 | + | |
1017 | +static int denali_waitfunc(struct mtd_info *mtd, struct nand_chip *chip) | |
1018 | +{ | |
1019 | + struct denali_nand_info *denali = mtd_to_denali(mtd); | |
1020 | + int status = denali->status; | |
1021 | + denali->status = 0; | |
1022 | + | |
1023 | + return status; | |
1024 | +} | |
1025 | + | |
1026 | +static void denali_erase(struct mtd_info *mtd, int page) | |
1027 | +{ | |
1028 | + struct denali_nand_info *denali = mtd_to_denali(mtd); | |
1029 | + uint32_t cmd, irq_status; | |
1030 | + | |
1031 | + /* clear interrupts */ | |
1032 | + clear_interrupts(denali); | |
1033 | + | |
1034 | + /* setup page read request for access type */ | |
1035 | + cmd = MODE_10 | BANK(denali->flash_bank) | page; | |
1036 | + index_addr(denali, cmd, 0x1); | |
1037 | + | |
1038 | + /* wait for erase to complete or failure to occur */ | |
1039 | + irq_status = wait_for_irq(denali, INTR_STATUS__ERASE_COMP | | |
1040 | + INTR_STATUS__ERASE_FAIL); | |
1041 | + | |
1042 | + if (irq_status & INTR_STATUS__ERASE_FAIL || | |
1043 | + irq_status & INTR_STATUS__LOCKED_BLK) | |
1044 | + denali->status = NAND_STATUS_FAIL; | |
1045 | + else | |
1046 | + denali->status = 0; | |
1047 | +} | |
1048 | + | |
1049 | +static void denali_cmdfunc(struct mtd_info *mtd, unsigned int cmd, int col, | |
1050 | + int page) | |
1051 | +{ | |
1052 | + struct denali_nand_info *denali = mtd_to_denali(mtd); | |
1053 | + uint32_t addr; | |
1054 | + | |
1055 | + switch (cmd) { | |
1056 | + case NAND_CMD_PAGEPROG: | |
1057 | + break; | |
1058 | + case NAND_CMD_STATUS: | |
1059 | + addr = MODE_11 | BANK(denali->flash_bank); | |
1060 | + index_addr(denali, addr | 0, cmd); | |
1061 | + break; | |
1062 | + case NAND_CMD_PARAM: | |
1063 | + clear_interrupts(denali); | |
1064 | + case NAND_CMD_READID: | |
1065 | + reset_buf(denali); | |
1066 | + /* sometimes ManufactureId read from register is not right | |
1067 | + * e.g. some of Micron MT29F32G08QAA MLC NAND chips | |
1068 | + * So here we send READID cmd to NAND insteand | |
1069 | + * */ | |
1070 | + addr = MODE_11 | BANK(denali->flash_bank); | |
1071 | + index_addr(denali, addr | 0, cmd); | |
1072 | + index_addr(denali, addr | 1, col & 0xFF); | |
1073 | + break; | |
1074 | + case NAND_CMD_READ0: | |
1075 | + case NAND_CMD_SEQIN: | |
1076 | + denali->page = page; | |
1077 | + break; | |
1078 | + case NAND_CMD_RESET: | |
1079 | + reset_bank(denali); | |
1080 | + break; | |
1081 | + case NAND_CMD_READOOB: | |
1082 | + /* TODO: Read OOB data */ | |
1083 | + break; | |
1084 | + case NAND_CMD_ERASE1: | |
1085 | + /* | |
1086 | + * supporting block erase only, not multiblock erase as | |
1087 | + * it will cross plane and software need complex calculation | |
1088 | + * to identify the block count for the cross plane | |
1089 | + */ | |
1090 | + denali_erase(mtd, page); | |
1091 | + break; | |
1092 | + case NAND_CMD_ERASE2: | |
1093 | + /* nothing to do here as it was done during NAND_CMD_ERASE1 */ | |
1094 | + break; | |
1095 | + case NAND_CMD_UNLOCK1: | |
1096 | + addr = MODE_10 | BANK(denali->flash_bank) | page; | |
1097 | + index_addr(denali, addr | 0, DENALI_UNLOCK_START); | |
1098 | + break; | |
1099 | + case NAND_CMD_UNLOCK2: | |
1100 | + addr = MODE_10 | BANK(denali->flash_bank) | page; | |
1101 | + index_addr(denali, addr | 0, DENALI_UNLOCK_END); | |
1102 | + break; | |
1103 | + case NAND_CMD_LOCK: | |
1104 | + addr = MODE_10 | BANK(denali->flash_bank); | |
1105 | + index_addr(denali, addr | 0, DENALI_LOCK); | |
1106 | + break; | |
1107 | + default: | |
1108 | + printf(": unsupported command received 0x%x\n", cmd); | |
1109 | + break; | |
1110 | + } | |
1111 | +} | |
1112 | +/* end NAND core entry points */ | |
1113 | + | |
1114 | +/* Initialization code to bring the device up to a known good state */ | |
1115 | +static void denali_hw_init(struct denali_nand_info *denali) | |
1116 | +{ | |
1117 | + /* | |
1118 | + * tell driver how many bit controller will skip before writing | |
1119 | + * ECC code in OOB. This is normally used for bad block marker | |
1120 | + */ | |
1121 | + writel(CONFIG_NAND_DENALI_SPARE_AREA_SKIP_BYTES, | |
1122 | + denali->flash_reg + SPARE_AREA_SKIP_BYTES); | |
1123 | + detect_max_banks(denali); | |
1124 | + denali_nand_reset(denali); | |
1125 | + writel(0x0F, denali->flash_reg + RB_PIN_ENABLED); | |
1126 | + writel(CHIP_EN_DONT_CARE__FLAG, | |
1127 | + denali->flash_reg + CHIP_ENABLE_DONT_CARE); | |
1128 | + writel(0xffff, denali->flash_reg + SPARE_AREA_MARKER); | |
1129 | + | |
1130 | + /* Should set value for these registers when init */ | |
1131 | + writel(0, denali->flash_reg + TWO_ROW_ADDR_CYCLES); | |
1132 | + writel(1, denali->flash_reg + ECC_ENABLE); | |
1133 | + denali_nand_timing_set(denali); | |
1134 | + denali_irq_init(denali); | |
1135 | +} | |
1136 | + | |
1137 | +static struct nand_ecclayout nand_oob; | |
1138 | + | |
1139 | +static int denali_nand_init(struct nand_chip *nand) | |
1140 | +{ | |
1141 | + struct denali_nand_info *denali; | |
1142 | + | |
1143 | + denali = malloc(sizeof(*denali)); | |
1144 | + if (!denali) | |
1145 | + return -ENOMEM; | |
1146 | + | |
1147 | + nand->priv = denali; | |
1148 | + | |
1149 | + denali->flash_reg = (void __iomem *)CONFIG_SYS_NAND_REGS_BASE; | |
1150 | + denali->flash_mem = (void __iomem *)CONFIG_SYS_NAND_DATA_BASE; | |
1151 | + | |
1152 | +#ifdef CONFIG_SYS_NAND_USE_FLASH_BBT | |
1153 | + /* check whether flash got BBT table (located at end of flash). As we | |
1154 | + * use NAND_BBT_NO_OOB, the BBT page will start with | |
1155 | + * bbt_pattern. We will have mirror pattern too */ | |
1156 | + nand->bbt_options |= NAND_BBT_USE_FLASH; | |
1157 | + /* | |
1158 | + * We are using main + spare with ECC support. As BBT need ECC support, | |
1159 | + * we need to ensure BBT code don't write to OOB for the BBT pattern. | |
1160 | + * All BBT info will be stored into data area with ECC support. | |
1161 | + */ | |
1162 | + nand->bbt_options |= NAND_BBT_NO_OOB; | |
1163 | +#endif | |
1164 | + | |
1165 | + nand->ecc.mode = NAND_ECC_HW; | |
1166 | + nand->ecc.size = CONFIG_NAND_DENALI_ECC_SIZE; | |
1167 | + nand->ecc.read_oob = denali_read_oob; | |
1168 | + nand->ecc.write_oob = denali_write_oob; | |
1169 | + nand->ecc.read_page = denali_read_page; | |
1170 | + nand->ecc.read_page_raw = denali_read_page_raw; | |
1171 | + nand->ecc.write_page = denali_write_page; | |
1172 | + nand->ecc.write_page_raw = denali_write_page_raw; | |
1173 | + /* | |
1174 | + * Tell driver the ecc strength. This register may be already set | |
1175 | + * correctly. So we read this value out. | |
1176 | + */ | |
1177 | + nand->ecc.strength = readl(denali->flash_reg + ECC_CORRECTION); | |
1178 | + switch (nand->ecc.size) { | |
1179 | + case 512: | |
1180 | + nand->ecc.bytes = (nand->ecc.strength * 13 + 15) / 16 * 2; | |
1181 | + break; | |
1182 | + case 1024: | |
1183 | + nand->ecc.bytes = (nand->ecc.strength * 14 + 15) / 16 * 2; | |
1184 | + break; | |
1185 | + default: | |
1186 | + pr_err("Unsupported ECC size\n"); | |
1187 | + return -EINVAL; | |
1188 | + } | |
1189 | + nand_oob.eccbytes = nand->ecc.bytes; | |
1190 | + nand->ecc.layout = &nand_oob; | |
1191 | + | |
1192 | + /* Set address of hardware control function */ | |
1193 | + nand->cmdfunc = denali_cmdfunc; | |
1194 | + nand->read_byte = denali_read_byte; | |
1195 | + nand->read_buf = denali_read_buf; | |
1196 | + nand->select_chip = denali_select_chip; | |
1197 | + nand->waitfunc = denali_waitfunc; | |
1198 | + denali_hw_init(denali); | |
1199 | + return 0; | |
1200 | +} | |
1201 | + | |
1202 | +int board_nand_init(struct nand_chip *chip) | |
1203 | +{ | |
1204 | + return denali_nand_init(chip); | |
1205 | +} |
1 | +/* | |
2 | + * Copyright (C) 2013-2014 Altera Corporation <www.altera.com> | |
3 | + * Copyright (C) 2009-2010, Intel Corporation and its suppliers. | |
4 | + * | |
5 | + * SPDX-License-Identifier: GPL-2.0+ | |
6 | + */ | |
7 | + | |
8 | +#include <linux/mtd/nand.h> | |
9 | + | |
10 | +#define DEVICE_RESET 0x0 | |
11 | +#define DEVICE_RESET__BANK0 0x0001 | |
12 | +#define DEVICE_RESET__BANK1 0x0002 | |
13 | +#define DEVICE_RESET__BANK2 0x0004 | |
14 | +#define DEVICE_RESET__BANK3 0x0008 | |
15 | + | |
16 | +#define TRANSFER_SPARE_REG 0x10 | |
17 | +#define TRANSFER_SPARE_REG__FLAG 0x0001 | |
18 | + | |
19 | +#define LOAD_WAIT_CNT 0x20 | |
20 | +#define LOAD_WAIT_CNT__VALUE 0xffff | |
21 | + | |
22 | +#define PROGRAM_WAIT_CNT 0x30 | |
23 | +#define PROGRAM_WAIT_CNT__VALUE 0xffff | |
24 | + | |
25 | +#define ERASE_WAIT_CNT 0x40 | |
26 | +#define ERASE_WAIT_CNT__VALUE 0xffff | |
27 | + | |
28 | +#define INT_MON_CYCCNT 0x50 | |
29 | +#define INT_MON_CYCCNT__VALUE 0xffff | |
30 | + | |
31 | +#define RB_PIN_ENABLED 0x60 | |
32 | +#define RB_PIN_ENABLED__BANK0 0x0001 | |
33 | +#define RB_PIN_ENABLED__BANK1 0x0002 | |
34 | +#define RB_PIN_ENABLED__BANK2 0x0004 | |
35 | +#define RB_PIN_ENABLED__BANK3 0x0008 | |
36 | + | |
37 | +#define MULTIPLANE_OPERATION 0x70 | |
38 | +#define MULTIPLANE_OPERATION__FLAG 0x0001 | |
39 | + | |
40 | +#define MULTIPLANE_READ_ENABLE 0x80 | |
41 | +#define MULTIPLANE_READ_ENABLE__FLAG 0x0001 | |
42 | + | |
43 | +#define COPYBACK_DISABLE 0x90 | |
44 | +#define COPYBACK_DISABLE__FLAG 0x0001 | |
45 | + | |
46 | +#define CACHE_WRITE_ENABLE 0xa0 | |
47 | +#define CACHE_WRITE_ENABLE__FLAG 0x0001 | |
48 | + | |
49 | +#define CACHE_READ_ENABLE 0xb0 | |
50 | +#define CACHE_READ_ENABLE__FLAG 0x0001 | |
51 | + | |
52 | +#define PREFETCH_MODE 0xc0 | |
53 | +#define PREFETCH_MODE__PREFETCH_EN 0x0001 | |
54 | +#define PREFETCH_MODE__PREFETCH_BURST_LENGTH 0xfff0 | |
55 | + | |
56 | +#define CHIP_ENABLE_DONT_CARE 0xd0 | |
57 | +#define CHIP_EN_DONT_CARE__FLAG 0x01 | |
58 | + | |
59 | +#define ECC_ENABLE 0xe0 | |
60 | +#define ECC_ENABLE__FLAG 0x0001 | |
61 | + | |
62 | +#define GLOBAL_INT_ENABLE 0xf0 | |
63 | +#define GLOBAL_INT_EN_FLAG 0x01 | |
64 | + | |
65 | +#define WE_2_RE 0x100 | |
66 | +#define WE_2_RE__VALUE 0x003f | |
67 | + | |
68 | +#define ADDR_2_DATA 0x110 | |
69 | +#define ADDR_2_DATA__VALUE 0x003f | |
70 | + | |
71 | +#define RE_2_WE 0x120 | |
72 | +#define RE_2_WE__VALUE 0x003f | |
73 | + | |
74 | +#define ACC_CLKS 0x130 | |
75 | +#define ACC_CLKS__VALUE 0x000f | |
76 | + | |
77 | +#define NUMBER_OF_PLANES 0x140 | |
78 | +#define NUMBER_OF_PLANES__VALUE 0x0007 | |
79 | + | |
80 | +#define PAGES_PER_BLOCK 0x150 | |
81 | +#define PAGES_PER_BLOCK__VALUE 0xffff | |
82 | + | |
83 | +#define DEVICE_WIDTH 0x160 | |
84 | +#define DEVICE_WIDTH__VALUE 0x0003 | |
85 | + | |
86 | +#define DEVICE_MAIN_AREA_SIZE 0x170 | |
87 | +#define DEVICE_MAIN_AREA_SIZE__VALUE 0xffff | |
88 | + | |
89 | +#define DEVICE_SPARE_AREA_SIZE 0x180 | |
90 | +#define DEVICE_SPARE_AREA_SIZE__VALUE 0xffff | |
91 | + | |
92 | +#define TWO_ROW_ADDR_CYCLES 0x190 | |
93 | +#define TWO_ROW_ADDR_CYCLES__FLAG 0x0001 | |
94 | + | |
95 | +#define MULTIPLANE_ADDR_RESTRICT 0x1a0 | |
96 | +#define MULTIPLANE_ADDR_RESTRICT__FLAG 0x0001 | |
97 | + | |
98 | +#define ECC_CORRECTION 0x1b0 | |
99 | +#define ECC_CORRECTION__VALUE 0x001f | |
100 | + | |
101 | +#define READ_MODE 0x1c0 | |
102 | +#define READ_MODE__VALUE 0x000f | |
103 | + | |
104 | +#define WRITE_MODE 0x1d0 | |
105 | +#define WRITE_MODE__VALUE 0x000f | |
106 | + | |
107 | +#define COPYBACK_MODE 0x1e0 | |
108 | +#define COPYBACK_MODE__VALUE 0x000f | |
109 | + | |
110 | +#define RDWR_EN_LO_CNT 0x1f0 | |
111 | +#define RDWR_EN_LO_CNT__VALUE 0x001f | |
112 | + | |
113 | +#define RDWR_EN_HI_CNT 0x200 | |
114 | +#define RDWR_EN_HI_CNT__VALUE 0x001f | |
115 | + | |
116 | +#define MAX_RD_DELAY 0x210 | |
117 | +#define MAX_RD_DELAY__VALUE 0x000f | |
118 | + | |
119 | +#define CS_SETUP_CNT 0x220 | |
120 | +#define CS_SETUP_CNT__VALUE 0x001f | |
121 | + | |
122 | +#define SPARE_AREA_SKIP_BYTES 0x230 | |
123 | +#define SPARE_AREA_SKIP_BYTES__VALUE 0x003f | |
124 | + | |
125 | +#define SPARE_AREA_MARKER 0x240 | |
126 | +#define SPARE_AREA_MARKER__VALUE 0xffff | |
127 | + | |
128 | +#define DEVICES_CONNECTED 0x250 | |
129 | +#define DEVICES_CONNECTED__VALUE 0x0007 | |
130 | + | |
131 | +#define DIE_MASK 0x260 | |
132 | +#define DIE_MASK__VALUE 0x00ff | |
133 | + | |
134 | +#define FIRST_BLOCK_OF_NEXT_PLANE 0x270 | |
135 | +#define FIRST_BLOCK_OF_NEXT_PLANE__VALUE 0xffff | |
136 | + | |
137 | +#define WRITE_PROTECT 0x280 | |
138 | +#define WRITE_PROTECT__FLAG 0x0001 | |
139 | + | |
140 | +#define RE_2_RE 0x290 | |
141 | +#define RE_2_RE__VALUE 0x003f | |
142 | + | |
143 | +#define MANUFACTURER_ID 0x300 | |
144 | +#define MANUFACTURER_ID__VALUE 0x00ff | |
145 | + | |
146 | +#define DEVICE_ID 0x310 | |
147 | +#define DEVICE_ID__VALUE 0x00ff | |
148 | + | |
149 | +#define DEVICE_PARAM_0 0x320 | |
150 | +#define DEVICE_PARAM_0__VALUE 0x00ff | |
151 | + | |
152 | +#define DEVICE_PARAM_1 0x330 | |
153 | +#define DEVICE_PARAM_1__VALUE 0x00ff | |
154 | + | |
155 | +#define DEVICE_PARAM_2 0x340 | |
156 | +#define DEVICE_PARAM_2__VALUE 0x00ff | |
157 | + | |
158 | +#define LOGICAL_PAGE_DATA_SIZE 0x350 | |
159 | +#define LOGICAL_PAGE_DATA_SIZE__VALUE 0xffff | |
160 | + | |
161 | +#define LOGICAL_PAGE_SPARE_SIZE 0x360 | |
162 | +#define LOGICAL_PAGE_SPARE_SIZE__VALUE 0xffff | |
163 | + | |
164 | +#define REVISION 0x370 | |
165 | +#define REVISION__VALUE 0xffff | |
166 | + | |
167 | +#define ONFI_DEVICE_FEATURES 0x380 | |
168 | +#define ONFI_DEVICE_FEATURES__VALUE 0x003f | |
169 | + | |
170 | +#define ONFI_OPTIONAL_COMMANDS 0x390 | |
171 | +#define ONFI_OPTIONAL_COMMANDS__VALUE 0x003f | |
172 | + | |
173 | +#define ONFI_TIMING_MODE 0x3a0 | |
174 | +#define ONFI_TIMING_MODE__VALUE 0x003f | |
175 | + | |
176 | +#define ONFI_PGM_CACHE_TIMING_MODE 0x3b0 | |
177 | +#define ONFI_PGM_CACHE_TIMING_MODE__VALUE 0x003f | |
178 | + | |
179 | +#define ONFI_DEVICE_NO_OF_LUNS 0x3c0 | |
180 | +#define ONFI_DEVICE_NO_OF_LUNS__NO_OF_LUNS 0x00ff | |
181 | +#define ONFI_DEVICE_NO_OF_LUNS__ONFI_DEVICE 0x0100 | |
182 | + | |
183 | +#define ONFI_DEVICE_NO_OF_BLOCKS_PER_LUN_L 0x3d0 | |
184 | +#define ONFI_DEVICE_NO_OF_BLOCKS_PER_LUN_L__VALUE 0xffff | |
185 | + | |
186 | +#define ONFI_DEVICE_NO_OF_BLOCKS_PER_LUN_U 0x3e0 | |
187 | +#define ONFI_DEVICE_NO_OF_BLOCKS_PER_LUN_U__VALUE 0xffff | |
188 | + | |
189 | +#define FEATURES 0x3f0 | |
190 | +#define FEATURES__N_BANKS 0x0003 | |
191 | +#define FEATURES__ECC_MAX_ERR 0x003c | |
192 | +#define FEATURES__DMA 0x0040 | |
193 | +#define FEATURES__CMD_DMA 0x0080 | |
194 | +#define FEATURES__PARTITION 0x0100 | |
195 | +#define FEATURES__XDMA_SIDEBAND 0x0200 | |
196 | +#define FEATURES__GPREG 0x0400 | |
197 | +#define FEATURES__INDEX_ADDR 0x0800 | |
198 | + | |
199 | +#define TRANSFER_MODE 0x400 | |
200 | +#define TRANSFER_MODE__VALUE 0x0003 | |
201 | + | |
202 | +#define INTR_STATUS(__bank) (0x410 + ((__bank) * 0x50)) | |
203 | +#define INTR_EN(__bank) (0x420 + ((__bank) * 0x50)) | |
204 | + | |
205 | +/* | |
206 | + * Some versions of the IP have the ECC fixup handled in hardware. In this | |
207 | + * configuration we only get interrupted when the error is uncorrectable. | |
208 | + * Unfortunately this bit replaces INTR_STATUS__ECC_TRANSACTION_DONE from the | |
209 | + * old IP. | |
210 | + */ | |
211 | +#define INTR_STATUS__ECC_UNCOR_ERR 0x0001 | |
212 | +#define INTR_STATUS__ECC_TRANSACTION_DONE 0x0001 | |
213 | +#define INTR_STATUS__ECC_ERR 0x0002 | |
214 | +#define INTR_STATUS__DMA_CMD_COMP 0x0004 | |
215 | +#define INTR_STATUS__TIME_OUT 0x0008 | |
216 | +#define INTR_STATUS__PROGRAM_FAIL 0x0010 | |
217 | +#define INTR_STATUS__ERASE_FAIL 0x0020 | |
218 | +#define INTR_STATUS__LOAD_COMP 0x0040 | |
219 | +#define INTR_STATUS__PROGRAM_COMP 0x0080 | |
220 | +#define INTR_STATUS__ERASE_COMP 0x0100 | |
221 | +#define INTR_STATUS__PIPE_CPYBCK_CMD_COMP 0x0200 | |
222 | +#define INTR_STATUS__LOCKED_BLK 0x0400 | |
223 | +#define INTR_STATUS__UNSUP_CMD 0x0800 | |
224 | +#define INTR_STATUS__INT_ACT 0x1000 | |
225 | +#define INTR_STATUS__RST_COMP 0x2000 | |
226 | +#define INTR_STATUS__PIPE_CMD_ERR 0x4000 | |
227 | +#define INTR_STATUS__PAGE_XFER_INC 0x8000 | |
228 | + | |
229 | +#define INTR_EN__ECC_TRANSACTION_DONE 0x0001 | |
230 | +#define INTR_EN__ECC_ERR 0x0002 | |
231 | +#define INTR_EN__DMA_CMD_COMP 0x0004 | |
232 | +#define INTR_EN__TIME_OUT 0x0008 | |
233 | +#define INTR_EN__PROGRAM_FAIL 0x0010 | |
234 | +#define INTR_EN__ERASE_FAIL 0x0020 | |
235 | +#define INTR_EN__LOAD_COMP 0x0040 | |
236 | +#define INTR_EN__PROGRAM_COMP 0x0080 | |
237 | +#define INTR_EN__ERASE_COMP 0x0100 | |
238 | +#define INTR_EN__PIPE_CPYBCK_CMD_COMP 0x0200 | |
239 | +#define INTR_EN__LOCKED_BLK 0x0400 | |
240 | +#define INTR_EN__UNSUP_CMD 0x0800 | |
241 | +#define INTR_EN__INT_ACT 0x1000 | |
242 | +#define INTR_EN__RST_COMP 0x2000 | |
243 | +#define INTR_EN__PIPE_CMD_ERR 0x4000 | |
244 | +#define INTR_EN__PAGE_XFER_INC 0x8000 | |
245 | + | |
246 | +#define PAGE_CNT(__bank) (0x430 + ((__bank) * 0x50)) | |
247 | +#define ERR_PAGE_ADDR(__bank) (0x440 + ((__bank) * 0x50)) | |
248 | +#define ERR_BLOCK_ADDR(__bank) (0x450 + ((__bank) * 0x50)) | |
249 | + | |
250 | +#define DATA_INTR 0x550 | |
251 | +#define DATA_INTR__WRITE_SPACE_AV 0x0001 | |
252 | +#define DATA_INTR__READ_DATA_AV 0x0002 | |
253 | + | |
254 | +#define DATA_INTR_EN 0x560 | |
255 | +#define DATA_INTR_EN__WRITE_SPACE_AV 0x0001 | |
256 | +#define DATA_INTR_EN__READ_DATA_AV 0x0002 | |
257 | + | |
258 | +#define GPREG_0 0x570 | |
259 | +#define GPREG_0__VALUE 0xffff | |
260 | + | |
261 | +#define GPREG_1 0x580 | |
262 | +#define GPREG_1__VALUE 0xffff | |
263 | + | |
264 | +#define GPREG_2 0x590 | |
265 | +#define GPREG_2__VALUE 0xffff | |
266 | + | |
267 | +#define GPREG_3 0x5a0 | |
268 | +#define GPREG_3__VALUE 0xffff | |
269 | + | |
270 | +#define ECC_THRESHOLD 0x600 | |
271 | +#define ECC_THRESHOLD__VALUE 0x03ff | |
272 | + | |
273 | +#define ECC_ERROR_BLOCK_ADDRESS 0x610 | |
274 | +#define ECC_ERROR_BLOCK_ADDRESS__VALUE 0xffff | |
275 | + | |
276 | +#define ECC_ERROR_PAGE_ADDRESS 0x620 | |
277 | +#define ECC_ERROR_PAGE_ADDRESS__VALUE 0x0fff | |
278 | +#define ECC_ERROR_PAGE_ADDRESS__BANK 0xf000 | |
279 | + | |
280 | +#define ECC_ERROR_ADDRESS 0x630 | |
281 | +#define ECC_ERROR_ADDRESS__OFFSET 0x0fff | |
282 | +#define ECC_ERROR_ADDRESS__SECTOR_NR 0xf000 | |
283 | + | |
284 | +#define ERR_CORRECTION_INFO 0x640 | |
285 | +#define ERR_CORRECTION_INFO__BYTEMASK 0x00ff | |
286 | +#define ERR_CORRECTION_INFO__DEVICE_NR 0x0f00 | |
287 | +#define ERR_CORRECTION_INFO__ERROR_TYPE 0x4000 | |
288 | +#define ERR_CORRECTION_INFO__LAST_ERR_INFO 0x8000 | |
289 | + | |
290 | +#define DMA_ENABLE 0x700 | |
291 | +#define DMA_ENABLE__FLAG 0x0001 | |
292 | + | |
293 | +#define IGNORE_ECC_DONE 0x710 | |
294 | +#define IGNORE_ECC_DONE__FLAG 0x0001 | |
295 | + | |
296 | +#define DMA_INTR 0x720 | |
297 | +#define DMA_INTR__TARGET_ERROR 0x0001 | |
298 | +#define DMA_INTR__DESC_COMP_CHANNEL0 0x0002 | |
299 | +#define DMA_INTR__DESC_COMP_CHANNEL1 0x0004 | |
300 | +#define DMA_INTR__DESC_COMP_CHANNEL2 0x0008 | |
301 | +#define DMA_INTR__DESC_COMP_CHANNEL3 0x0010 | |
302 | +#define DMA_INTR__MEMCOPY_DESC_COMP 0x0020 | |
303 | + | |
304 | +#define DMA_INTR_EN 0x730 | |
305 | +#define DMA_INTR_EN__TARGET_ERROR 0x0001 | |
306 | +#define DMA_INTR_EN__DESC_COMP_CHANNEL0 0x0002 | |
307 | +#define DMA_INTR_EN__DESC_COMP_CHANNEL1 0x0004 | |
308 | +#define DMA_INTR_EN__DESC_COMP_CHANNEL2 0x0008 | |
309 | +#define DMA_INTR_EN__DESC_COMP_CHANNEL3 0x0010 | |
310 | +#define DMA_INTR_EN__MEMCOPY_DESC_COMP 0x0020 | |
311 | + | |
312 | +#define TARGET_ERR_ADDR_LO 0x740 | |
313 | +#define TARGET_ERR_ADDR_LO__VALUE 0xffff | |
314 | + | |
315 | +#define TARGET_ERR_ADDR_HI 0x750 | |
316 | +#define TARGET_ERR_ADDR_HI__VALUE 0xffff | |
317 | + | |
318 | +#define CHNL_ACTIVE 0x760 | |
319 | +#define CHNL_ACTIVE__CHANNEL0 0x0001 | |
320 | +#define CHNL_ACTIVE__CHANNEL1 0x0002 | |
321 | +#define CHNL_ACTIVE__CHANNEL2 0x0004 | |
322 | +#define CHNL_ACTIVE__CHANNEL3 0x0008 | |
323 | + | |
324 | +#define ACTIVE_SRC_ID 0x800 | |
325 | +#define ACTIVE_SRC_ID__VALUE 0x00ff | |
326 | + | |
327 | +#define PTN_INTR 0x810 | |
328 | +#define PTN_INTR__CONFIG_ERROR 0x0001 | |
329 | +#define PTN_INTR__ACCESS_ERROR_BANK0 0x0002 | |
330 | +#define PTN_INTR__ACCESS_ERROR_BANK1 0x0004 | |
331 | +#define PTN_INTR__ACCESS_ERROR_BANK2 0x0008 | |
332 | +#define PTN_INTR__ACCESS_ERROR_BANK3 0x0010 | |
333 | +#define PTN_INTR__REG_ACCESS_ERROR 0x0020 | |
334 | + | |
335 | +#define PTN_INTR_EN 0x820 | |
336 | +#define PTN_INTR_EN__CONFIG_ERROR 0x0001 | |
337 | +#define PTN_INTR_EN__ACCESS_ERROR_BANK0 0x0002 | |
338 | +#define PTN_INTR_EN__ACCESS_ERROR_BANK1 0x0004 | |
339 | +#define PTN_INTR_EN__ACCESS_ERROR_BANK2 0x0008 | |
340 | +#define PTN_INTR_EN__ACCESS_ERROR_BANK3 0x0010 | |
341 | +#define PTN_INTR_EN__REG_ACCESS_ERROR 0x0020 | |
342 | + | |
343 | +#define PERM_SRC_ID(__bank) (0x830 + ((__bank) * 0x40)) | |
344 | +#define PERM_SRC_ID__SRCID 0x00ff | |
345 | +#define PERM_SRC_ID__DIRECT_ACCESS_ACTIVE 0x0800 | |
346 | +#define PERM_SRC_ID__WRITE_ACTIVE 0x2000 | |
347 | +#define PERM_SRC_ID__READ_ACTIVE 0x4000 | |
348 | +#define PERM_SRC_ID__PARTITION_VALID 0x8000 | |
349 | + | |
350 | +#define MIN_BLK_ADDR(__bank) (0x840 + ((__bank) * 0x40)) | |
351 | +#define MIN_BLK_ADDR__VALUE 0xffff | |
352 | + | |
353 | +#define MAX_BLK_ADDR(__bank) (0x850 + ((__bank) * 0x40)) | |
354 | +#define MAX_BLK_ADDR__VALUE 0xffff | |
355 | + | |
356 | +#define MIN_MAX_BANK(__bank) (0x860 + ((__bank) * 0x40)) | |
357 | +#define MIN_MAX_BANK__MIN_VALUE 0x0003 | |
358 | +#define MIN_MAX_BANK__MAX_VALUE 0x000c | |
359 | + | |
360 | +/* lld.h */ | |
361 | +#define GOOD_BLOCK 0 | |
362 | +#define DEFECTIVE_BLOCK 1 | |
363 | +#define READ_ERROR 2 | |
364 | + | |
365 | +#define CLK_X 5 | |
366 | +#define CLK_MULTI 4 | |
367 | + | |
368 | +/* spectraswconfig.h */ | |
369 | +#define CMD_DMA 0 | |
370 | + | |
371 | +#define SPECTRA_PARTITION_ID 0 | |
372 | +/**** Block Table and Reserved Block Parameters *****/ | |
373 | +#define SPECTRA_START_BLOCK 3 | |
374 | +#define NUM_FREE_BLOCKS_GATE 30 | |
375 | + | |
376 | +/* KBV - Updated to LNW scratch register address */ | |
377 | +#define SCRATCH_REG_ADDR CONFIG_MTD_NAND_DENALI_SCRATCH_REG_ADDR | |
378 | +#define SCRATCH_REG_SIZE 64 | |
379 | + | |
380 | +#define GLOB_HWCTL_DEFAULT_BLKS 2048 | |
381 | + | |
382 | +#define CUSTOM_CONF_PARAMS 0 | |
383 | + | |
384 | +#ifndef _LLD_NAND_ | |
385 | +#define _LLD_NAND_ | |
386 | + | |
387 | +#define INDEX_CTRL_REG 0x0 | |
388 | +#define INDEX_DATA_REG 0x10 | |
389 | + | |
390 | +#define MODE_00 0x00000000 | |
391 | +#define MODE_01 0x04000000 | |
392 | +#define MODE_10 0x08000000 | |
393 | +#define MODE_11 0x0C000000 | |
394 | + | |
395 | + | |
396 | +#define DATA_TRANSFER_MODE 0 | |
397 | +#define PROTECTION_PER_BLOCK 1 | |
398 | +#define LOAD_WAIT_COUNT 2 | |
399 | +#define PROGRAM_WAIT_COUNT 3 | |
400 | +#define ERASE_WAIT_COUNT 4 | |
401 | +#define INT_MONITOR_CYCLE_COUNT 5 | |
402 | +#define READ_BUSY_PIN_ENABLED 6 | |
403 | +#define MULTIPLANE_OPERATION_SUPPORT 7 | |
404 | +#define PRE_FETCH_MODE 8 | |
405 | +#define CE_DONT_CARE_SUPPORT 9 | |
406 | +#define COPYBACK_SUPPORT 10 | |
407 | +#define CACHE_WRITE_SUPPORT 11 | |
408 | +#define CACHE_READ_SUPPORT 12 | |
409 | +#define NUM_PAGES_IN_BLOCK 13 | |
410 | +#define ECC_ENABLE_SELECT 14 | |
411 | +#define WRITE_ENABLE_2_READ_ENABLE 15 | |
412 | +#define ADDRESS_2_DATA 16 | |
413 | +#define READ_ENABLE_2_WRITE_ENABLE 17 | |
414 | +#define TWO_ROW_ADDRESS_CYCLES 18 | |
415 | +#define MULTIPLANE_ADDRESS_RESTRICT 19 | |
416 | +#define ACC_CLOCKS 20 | |
417 | +#define READ_WRITE_ENABLE_LOW_COUNT 21 | |
418 | +#define READ_WRITE_ENABLE_HIGH_COUNT 22 | |
419 | + | |
420 | +#define ECC_SECTOR_SIZE 512 | |
421 | + | |
422 | +#define DENALI_BUF_SIZE (NAND_MAX_PAGESIZE + NAND_MAX_OOBSIZE) | |
423 | + | |
424 | +struct nand_buf { | |
425 | + int head; | |
426 | + int tail; | |
427 | + /* seprating dma_buf as buf can be used for status read purpose */ | |
428 | + uint8_t dma_buf[DENALI_BUF_SIZE] __aligned(64); | |
429 | + uint8_t buf[DENALI_BUF_SIZE]; | |
430 | +}; | |
431 | + | |
432 | +#define INTEL_CE4100 1 | |
433 | +#define INTEL_MRST 2 | |
434 | +#define DT 3 | |
435 | + | |
436 | +struct denali_nand_info { | |
437 | + struct mtd_info mtd; | |
438 | + struct nand_chip *nand; | |
439 | + | |
440 | + int flash_bank; /* currently selected chip */ | |
441 | + int status; | |
442 | + int platform; | |
443 | + struct nand_buf buf; | |
444 | + struct device *dev; | |
445 | + int total_used_banks; | |
446 | + uint32_t block; /* stored for future use */ | |
447 | + uint32_t page; | |
448 | + void __iomem *flash_reg; /* Mapped io reg base address */ | |
449 | + void __iomem *flash_mem; /* Mapped io reg base address */ | |
450 | + | |
451 | + /* elements used by ISR */ | |
452 | + /*struct completion complete;*/ | |
453 | + | |
454 | + uint32_t irq_status; | |
455 | + int irq_debug_array[32]; | |
456 | + int idx; | |
457 | + int irq; | |
458 | + | |
459 | + uint32_t devnum; /* represent how many nands connected */ | |
460 | + uint32_t fwblks; /* represent how many blocks FW used */ | |
461 | + uint32_t totalblks; | |
462 | + uint32_t blksperchip; | |
463 | + uint32_t bbtskipbytes; | |
464 | + uint32_t max_banks; | |
465 | +}; | |
466 | + | |
467 | +#endif /*_LLD_NAND_*/ |
-
mentioned in commit 4b0abf
-
mentioned in commit 4b0abf
-
mentioned in commit 4b0abf
-
mentioned in commit 4b0abf
-
mentioned in commit 4b0abf
-
mentioned in commit 4b0abf
-
mentioned in commit 4b0abf
-
mentioned in commit 4b0abf
-
mentioned in commit 4b0abf
-
mentioned in commit 4b0abf
-
mentioned in commit 4b0abf
-
mentioned in commit 4b0abf
-
mentioned in commit 4b0abf
-
mentioned in commit 4b0abf
-
mentioned in commit 4b0abf
-
mentioned in commit 4b0abf
-
mentioned in commit 4b0abf
-
mentioned in commit 4b0abf
-
mentioned in commit 4b0abf
-
mentioned in commit 4b0abf
-
mentioned in commit 4b0abf
-
mentioned in commit 4b0abf
-
mentioned in commit 4b0abf
-
mentioned in commit 4b0abf
-
mentioned in commit 4b0abf
-
mentioned in commit 4b0abf
-
mentioned in commit 4b0abf
-
mentioned in commit 4b0abf
-
mentioned in commit 4b0abf
-
mentioned in commit 4b0abf
-
mentioned in commit 4b0abf
-
mentioned in commit 4b0abf
-
mentioned in commit 4b0abf
-
mentioned in commit 4b0abf
-
mentioned in commit 4b0abf
-
mentioned in commit 4b0abf
-
mentioned in commit 4b0abf
-
mentioned in commit 4b0abf
-
mentioned in commit 4b0abf
-
mentioned in commit 4b0abf
-
mentioned in commit 4b0abf
-
mentioned in commit 4b0abf
-
mentioned in commit 4b0abf