Commit 089d03629b04ebe8163905a2398742b426e35085
Committed by
Haojian Zhuang
1 parent
9450be76d0
Exists in
smarc-l5.0.0_1.0.0-ga
and in
5 other branches
ARM: pxa: add devicetree code for irq handling
Properly register on-chip interrupt using the irqdomain logic. The number of interrupts is taken from the devicetree node. That includes the following changes: - cpu_has_ipr() was converted from an inline function to a static bool variable, so it can be set using the "marvell,intc-priority" property inside the device node of the tree. - IRQ_BASE was converted from a macro to a runtime variable so that it can be initialized dynamically from the DT init code. - irq_base() now uses pxa_irq_base and just adds an offset. Hence, there are now no compile-time fixed values used in case of DT initialization. Signed-off-by: Daniel Mack <zonque@gmail.com> Acked-by: Arnd Bergmann <arnd@arndb.de> Signed-off-by: Haojian Zhuang <haojian.zhuang@gmail.com>
Showing 2 changed files with 125 additions and 23 deletions Side-by-side Diff
arch/arm/mach-pxa/irq.c
... | ... | @@ -17,6 +17,8 @@ |
17 | 17 | #include <linux/syscore_ops.h> |
18 | 18 | #include <linux/io.h> |
19 | 19 | #include <linux/irq.h> |
20 | +#include <linux/of_address.h> | |
21 | +#include <linux/of_irq.h> | |
20 | 22 | |
21 | 23 | #include <asm/exception.h> |
22 | 24 | |
... | ... | @@ -25,8 +27,6 @@ |
25 | 27 | |
26 | 28 | #include "generic.h" |
27 | 29 | |
28 | -#define IRQ_BASE io_p2v(0x40d00000) | |
29 | - | |
30 | 30 | #define ICIP (0x000) |
31 | 31 | #define ICMR (0x004) |
32 | 32 | #define ICLR (0x008) |
33 | 33 | |
34 | 34 | |
35 | 35 | |
36 | 36 | |
... | ... | @@ -48,22 +48,19 @@ |
48 | 48 | * This is for peripheral IRQs internal to the PXA chip. |
49 | 49 | */ |
50 | 50 | |
51 | +static void __iomem *pxa_irq_base; | |
51 | 52 | static int pxa_internal_irq_nr; |
53 | +static bool cpu_has_ipr; | |
52 | 54 | |
53 | -static inline int cpu_has_ipr(void) | |
54 | -{ | |
55 | - return !cpu_is_pxa25x(); | |
56 | -} | |
57 | - | |
58 | 55 | static inline void __iomem *irq_base(int i) |
59 | 56 | { |
60 | - static unsigned long phys_base[] = { | |
61 | - 0x40d00000, | |
62 | - 0x40d0009c, | |
63 | - 0x40d00130, | |
57 | + static unsigned long phys_base_offset[] = { | |
58 | + 0x0, | |
59 | + 0x9c, | |
60 | + 0x130, | |
64 | 61 | }; |
65 | 62 | |
66 | - return io_p2v(phys_base[i]); | |
63 | + return pxa_irq_base + phys_base_offset[i]; | |
67 | 64 | } |
68 | 65 | |
69 | 66 | void pxa_mask_irq(struct irq_data *d) |
... | ... | @@ -96,8 +93,8 @@ |
96 | 93 | uint32_t icip, icmr, mask; |
97 | 94 | |
98 | 95 | do { |
99 | - icip = __raw_readl(IRQ_BASE + ICIP); | |
100 | - icmr = __raw_readl(IRQ_BASE + ICMR); | |
96 | + icip = __raw_readl(pxa_irq_base + ICIP); | |
97 | + icmr = __raw_readl(pxa_irq_base + ICMR); | |
101 | 98 | mask = icip & icmr; |
102 | 99 | |
103 | 100 | if (mask == 0) |
... | ... | @@ -128,6 +125,8 @@ |
128 | 125 | BUG_ON(irq_nr > MAX_INTERNAL_IRQS); |
129 | 126 | |
130 | 127 | pxa_internal_irq_nr = irq_nr; |
128 | + cpu_has_ipr = !cpu_is_pxa25x(); | |
129 | + pxa_irq_base = io_p2v(0x40d00000); | |
131 | 130 | |
132 | 131 | for (n = 0; n < irq_nr; n += 32) { |
133 | 132 | void __iomem *base = irq_base(n >> 5); |
... | ... | @@ -136,8 +135,8 @@ |
136 | 135 | __raw_writel(0, base + ICLR); /* all IRQs are IRQ, not FIQ */ |
137 | 136 | for (i = n; (i < (n + 32)) && (i < irq_nr); i++) { |
138 | 137 | /* initialize interrupt priority */ |
139 | - if (cpu_has_ipr()) | |
140 | - __raw_writel(i | IPR_VALID, IRQ_BASE + IPR(i)); | |
138 | + if (cpu_has_ipr) | |
139 | + __raw_writel(i | IPR_VALID, pxa_irq_base + IPR(i)); | |
141 | 140 | |
142 | 141 | irq = PXA_IRQ(i); |
143 | 142 | irq_set_chip_and_handler(irq, &pxa_internal_irq_chip, |
144 | 143 | |
... | ... | @@ -168,9 +167,9 @@ |
168 | 167 | __raw_writel(0, base + ICMR); |
169 | 168 | } |
170 | 169 | |
171 | - if (cpu_has_ipr()) { | |
170 | + if (cpu_has_ipr) { | |
172 | 171 | for (i = 0; i < pxa_internal_irq_nr; i++) |
173 | - saved_ipr[i] = __raw_readl(IRQ_BASE + IPR(i)); | |
172 | + saved_ipr[i] = __raw_readl(pxa_irq_base + IPR(i)); | |
174 | 173 | } |
175 | 174 | |
176 | 175 | return 0; |
177 | 176 | |
178 | 177 | |
... | ... | @@ -187,11 +186,11 @@ |
187 | 186 | __raw_writel(0, base + ICLR); |
188 | 187 | } |
189 | 188 | |
190 | - if (cpu_has_ipr()) | |
189 | + if (cpu_has_ipr) | |
191 | 190 | for (i = 0; i < pxa_internal_irq_nr; i++) |
192 | - __raw_writel(saved_ipr[i], IRQ_BASE + IPR(i)); | |
191 | + __raw_writel(saved_ipr[i], pxa_irq_base + IPR(i)); | |
193 | 192 | |
194 | - __raw_writel(1, IRQ_BASE + ICCR); | |
193 | + __raw_writel(1, pxa_irq_base + ICCR); | |
195 | 194 | } |
196 | 195 | #else |
197 | 196 | #define pxa_irq_suspend NULL |
... | ... | @@ -202,4 +201,94 @@ |
202 | 201 | .suspend = pxa_irq_suspend, |
203 | 202 | .resume = pxa_irq_resume, |
204 | 203 | }; |
204 | + | |
205 | +#ifdef CONFIG_OF | |
206 | +static struct irq_domain *pxa_irq_domain; | |
207 | + | |
208 | +static int pxa_irq_map(struct irq_domain *h, unsigned int virq, | |
209 | + irq_hw_number_t hw) | |
210 | +{ | |
211 | + void __iomem *base = irq_base(hw / 32); | |
212 | + | |
213 | + /* initialize interrupt priority */ | |
214 | + if (cpu_has_ipr) | |
215 | + __raw_writel(hw | IPR_VALID, pxa_irq_base + IPR(hw)); | |
216 | + | |
217 | + irq_set_chip_and_handler(hw, &pxa_internal_irq_chip, | |
218 | + handle_level_irq); | |
219 | + irq_set_chip_data(hw, base); | |
220 | + set_irq_flags(hw, IRQF_VALID); | |
221 | + | |
222 | + return 0; | |
223 | +} | |
224 | + | |
225 | +static struct irq_domain_ops pxa_irq_ops = { | |
226 | + .map = pxa_irq_map, | |
227 | + .xlate = irq_domain_xlate_onecell, | |
228 | +}; | |
229 | + | |
230 | +static const struct of_device_id intc_ids[] __initconst = { | |
231 | + { .compatible = "marvell,pxa-intc", }, | |
232 | + {} | |
233 | +}; | |
234 | + | |
235 | +void __init pxa_dt_irq_init(int (*fn)(struct irq_data *, unsigned int)) | |
236 | +{ | |
237 | + struct device_node *node; | |
238 | + const struct of_device_id *of_id; | |
239 | + struct pxa_intc_conf *conf; | |
240 | + struct resource res; | |
241 | + int n, ret; | |
242 | + | |
243 | + node = of_find_matching_node(NULL, intc_ids); | |
244 | + if (!node) { | |
245 | + pr_err("Failed to find interrupt controller in arch-pxa\n"); | |
246 | + return; | |
247 | + } | |
248 | + of_id = of_match_node(intc_ids, node); | |
249 | + conf = of_id->data; | |
250 | + | |
251 | + ret = of_property_read_u32(node, "marvell,intc-nr-irqs", | |
252 | + &pxa_internal_irq_nr); | |
253 | + if (ret) { | |
254 | + pr_err("Not found marvell,intc-nr-irqs property\n"); | |
255 | + return; | |
256 | + } | |
257 | + | |
258 | + ret = of_address_to_resource(node, 0, &res); | |
259 | + if (ret < 0) { | |
260 | + pr_err("No registers defined for node\n"); | |
261 | + return; | |
262 | + } | |
263 | + pxa_irq_base = io_p2v(res.start); | |
264 | + | |
265 | + if (of_find_property(node, "marvell,intc-priority", NULL)) | |
266 | + cpu_has_ipr = 1; | |
267 | + | |
268 | + ret = irq_alloc_descs(-1, 0, pxa_internal_irq_nr, 0); | |
269 | + if (ret < 0) { | |
270 | + pr_err("Failed to allocate IRQ numbers\n"); | |
271 | + return; | |
272 | + } | |
273 | + | |
274 | + pxa_irq_domain = irq_domain_add_legacy(node, pxa_internal_irq_nr, 0, 0, | |
275 | + &pxa_irq_ops, NULL); | |
276 | + if (!pxa_irq_domain) | |
277 | + panic("Unable to add PXA IRQ domain\n"); | |
278 | + | |
279 | + irq_set_default_host(pxa_irq_domain); | |
280 | + | |
281 | + for (n = 0; n < pxa_internal_irq_nr; n += 32) { | |
282 | + void __iomem *base = irq_base(n >> 5); | |
283 | + | |
284 | + __raw_writel(0, base + ICMR); /* disable all IRQs */ | |
285 | + __raw_writel(0, base + ICLR); /* all IRQs are IRQ, not FIQ */ | |
286 | + } | |
287 | + | |
288 | + /* only unmasked interrupts kick us out of idle */ | |
289 | + __raw_writel(1, irq_base(0) + ICCR); | |
290 | + | |
291 | + pxa_internal_irq_chip.irq_set_wake = fn; | |
292 | +} | |
293 | +#endif /* CONFIG_OF */ |
arch/arm/mach-pxa/pxa3xx.c
... | ... | @@ -40,6 +40,8 @@ |
40 | 40 | #define PECR_IE(n) ((1 << ((n) * 2)) << 28) |
41 | 41 | #define PECR_IS(n) ((1 << ((n) * 2)) << 29) |
42 | 42 | |
43 | +extern void __init pxa_dt_irq_init(int (*fn)(struct irq_data *, unsigned int)); | |
44 | + | |
43 | 45 | static DEFINE_PXA3_CKEN(pxa3xx_ffuart, FFUART, 14857000, 1); |
44 | 46 | static DEFINE_PXA3_CKEN(pxa3xx_btuart, BTUART, 14857000, 1); |
45 | 47 | static DEFINE_PXA3_CKEN(pxa3xx_stuart, STUART, 14857000, 1); |
... | ... | @@ -382,7 +384,7 @@ |
382 | 384 | pxa_ext_wakeup_chip.irq_set_wake = fn; |
383 | 385 | } |
384 | 386 | |
385 | -void __init pxa3xx_init_irq(void) | |
387 | +static void __init __pxa3xx_init_irq(void) | |
386 | 388 | { |
387 | 389 | /* enable CP6 access */ |
388 | 390 | u32 value; |
389 | 391 | |
... | ... | @@ -390,8 +392,19 @@ |
390 | 392 | value |= (1 << 6); |
391 | 393 | __asm__ __volatile__("mcr p15, 0, %0, c15, c1, 0\n": :"r"(value)); |
392 | 394 | |
393 | - pxa_init_irq(56, pxa3xx_set_wake); | |
394 | 395 | pxa_init_ext_wakeup_irq(pxa3xx_set_wake); |
396 | +} | |
397 | + | |
398 | +void __init pxa3xx_init_irq(void) | |
399 | +{ | |
400 | + __pxa3xx_init_irq(); | |
401 | + pxa_init_irq(56, pxa3xx_set_wake); | |
402 | +} | |
403 | + | |
404 | +void __init pxa3xx_dt_init_irq(void) | |
405 | +{ | |
406 | + __pxa3xx_init_irq(); | |
407 | + pxa_dt_irq_init(pxa3xx_set_wake); | |
395 | 408 | } |
396 | 409 | |
397 | 410 | static struct map_desc pxa3xx_io_desc[] __initdata = { |