Commit e2a8092c3fa9766248e9515252ae44e6df2d97a0
1 parent
dd8cb37b4e
Exists in
master
and in
7 other branches
Blackfin: bf537: fix excessive gpio int demuxing
The search logic in the gpio demux walks all possible gpio blocks starting at the specified pin. The trouble on bf537 parts when we demux the port F and port G mask A interrupts is that we also demux port H mask A ints. Most of the time this isn't an issue as people don't usually use port H, but might as well avoid it when possible. Signed-off-by: Mike Frysinger <vapier@gentoo.org>
Showing 1 changed file with 25 additions and 30 deletions Inline Diff
arch/blackfin/mach-common/ints-priority.c
1 | /* | 1 | /* |
2 | * Set up the interrupt priorities | 2 | * Set up the interrupt priorities |
3 | * | 3 | * |
4 | * Copyright 2004-2009 Analog Devices Inc. | 4 | * Copyright 2004-2009 Analog Devices Inc. |
5 | * 2003 Bas Vermeulen <bas@buyways.nl> | 5 | * 2003 Bas Vermeulen <bas@buyways.nl> |
6 | * 2002 Arcturus Networks Inc. MaTed <mated@sympatico.ca> | 6 | * 2002 Arcturus Networks Inc. MaTed <mated@sympatico.ca> |
7 | * 2000-2001 Lineo, Inc. D. Jefff Dionne <jeff@lineo.ca> | 7 | * 2000-2001 Lineo, Inc. D. Jefff Dionne <jeff@lineo.ca> |
8 | * 1999 D. Jeff Dionne <jeff@uclinux.org> | 8 | * 1999 D. Jeff Dionne <jeff@uclinux.org> |
9 | * 1996 Roman Zippel | 9 | * 1996 Roman Zippel |
10 | * | 10 | * |
11 | * Licensed under the GPL-2 | 11 | * Licensed under the GPL-2 |
12 | */ | 12 | */ |
13 | 13 | ||
14 | #include <linux/module.h> | 14 | #include <linux/module.h> |
15 | #include <linux/kernel_stat.h> | 15 | #include <linux/kernel_stat.h> |
16 | #include <linux/seq_file.h> | 16 | #include <linux/seq_file.h> |
17 | #include <linux/irq.h> | 17 | #include <linux/irq.h> |
18 | #include <linux/sched.h> | 18 | #include <linux/sched.h> |
19 | #ifdef CONFIG_IPIPE | 19 | #ifdef CONFIG_IPIPE |
20 | #include <linux/ipipe.h> | 20 | #include <linux/ipipe.h> |
21 | #endif | 21 | #endif |
22 | #include <asm/traps.h> | 22 | #include <asm/traps.h> |
23 | #include <asm/blackfin.h> | 23 | #include <asm/blackfin.h> |
24 | #include <asm/gpio.h> | 24 | #include <asm/gpio.h> |
25 | #include <asm/irq_handler.h> | 25 | #include <asm/irq_handler.h> |
26 | #include <asm/dpmc.h> | 26 | #include <asm/dpmc.h> |
27 | 27 | ||
28 | #define SIC_SYSIRQ(irq) (irq - (IRQ_CORETMR + 1)) | 28 | #define SIC_SYSIRQ(irq) (irq - (IRQ_CORETMR + 1)) |
29 | 29 | ||
30 | /* | 30 | /* |
31 | * NOTES: | 31 | * NOTES: |
32 | * - we have separated the physical Hardware interrupt from the | 32 | * - we have separated the physical Hardware interrupt from the |
33 | * levels that the LINUX kernel sees (see the description in irq.h) | 33 | * levels that the LINUX kernel sees (see the description in irq.h) |
34 | * - | 34 | * - |
35 | */ | 35 | */ |
36 | 36 | ||
37 | #ifndef CONFIG_SMP | 37 | #ifndef CONFIG_SMP |
38 | /* Initialize this to an actual value to force it into the .data | 38 | /* Initialize this to an actual value to force it into the .data |
39 | * section so that we know it is properly initialized at entry into | 39 | * section so that we know it is properly initialized at entry into |
40 | * the kernel but before bss is initialized to zero (which is where | 40 | * the kernel but before bss is initialized to zero (which is where |
41 | * it would live otherwise). The 0x1f magic represents the IRQs we | 41 | * it would live otherwise). The 0x1f magic represents the IRQs we |
42 | * cannot actually mask out in hardware. | 42 | * cannot actually mask out in hardware. |
43 | */ | 43 | */ |
44 | unsigned long bfin_irq_flags = 0x1f; | 44 | unsigned long bfin_irq_flags = 0x1f; |
45 | EXPORT_SYMBOL(bfin_irq_flags); | 45 | EXPORT_SYMBOL(bfin_irq_flags); |
46 | #endif | 46 | #endif |
47 | 47 | ||
48 | #ifdef CONFIG_PM | 48 | #ifdef CONFIG_PM |
49 | unsigned long bfin_sic_iwr[3]; /* Up to 3 SIC_IWRx registers */ | 49 | unsigned long bfin_sic_iwr[3]; /* Up to 3 SIC_IWRx registers */ |
50 | unsigned vr_wakeup; | 50 | unsigned vr_wakeup; |
51 | #endif | 51 | #endif |
52 | 52 | ||
53 | static struct ivgx { | 53 | static struct ivgx { |
54 | /* irq number for request_irq, available in mach-bf5xx/irq.h */ | 54 | /* irq number for request_irq, available in mach-bf5xx/irq.h */ |
55 | unsigned int irqno; | 55 | unsigned int irqno; |
56 | /* corresponding bit in the SIC_ISR register */ | 56 | /* corresponding bit in the SIC_ISR register */ |
57 | unsigned int isrflag; | 57 | unsigned int isrflag; |
58 | } ivg_table[NR_PERI_INTS]; | 58 | } ivg_table[NR_PERI_INTS]; |
59 | 59 | ||
60 | static struct ivg_slice { | 60 | static struct ivg_slice { |
61 | /* position of first irq in ivg_table for given ivg */ | 61 | /* position of first irq in ivg_table for given ivg */ |
62 | struct ivgx *ifirst; | 62 | struct ivgx *ifirst; |
63 | struct ivgx *istop; | 63 | struct ivgx *istop; |
64 | } ivg7_13[IVG13 - IVG7 + 1]; | 64 | } ivg7_13[IVG13 - IVG7 + 1]; |
65 | 65 | ||
66 | 66 | ||
67 | /* | 67 | /* |
68 | * Search SIC_IAR and fill tables with the irqvalues | 68 | * Search SIC_IAR and fill tables with the irqvalues |
69 | * and their positions in the SIC_ISR register. | 69 | * and their positions in the SIC_ISR register. |
70 | */ | 70 | */ |
71 | static void __init search_IAR(void) | 71 | static void __init search_IAR(void) |
72 | { | 72 | { |
73 | unsigned ivg, irq_pos = 0; | 73 | unsigned ivg, irq_pos = 0; |
74 | for (ivg = 0; ivg <= IVG13 - IVG7; ivg++) { | 74 | for (ivg = 0; ivg <= IVG13 - IVG7; ivg++) { |
75 | int irqN; | 75 | int irqN; |
76 | 76 | ||
77 | ivg7_13[ivg].istop = ivg7_13[ivg].ifirst = &ivg_table[irq_pos]; | 77 | ivg7_13[ivg].istop = ivg7_13[ivg].ifirst = &ivg_table[irq_pos]; |
78 | 78 | ||
79 | for (irqN = 0; irqN < NR_PERI_INTS; irqN += 4) { | 79 | for (irqN = 0; irqN < NR_PERI_INTS; irqN += 4) { |
80 | int irqn; | 80 | int irqn; |
81 | u32 iar = bfin_read32((unsigned long *)SIC_IAR0 + | 81 | u32 iar = bfin_read32((unsigned long *)SIC_IAR0 + |
82 | #if defined(CONFIG_BF51x) || defined(CONFIG_BF52x) || \ | 82 | #if defined(CONFIG_BF51x) || defined(CONFIG_BF52x) || \ |
83 | defined(CONFIG_BF538) || defined(CONFIG_BF539) | 83 | defined(CONFIG_BF538) || defined(CONFIG_BF539) |
84 | ((irqN % 32) >> 3) + ((irqN / 32) * ((SIC_IAR4 - SIC_IAR0) / 4)) | 84 | ((irqN % 32) >> 3) + ((irqN / 32) * ((SIC_IAR4 - SIC_IAR0) / 4)) |
85 | #else | 85 | #else |
86 | (irqN >> 3) | 86 | (irqN >> 3) |
87 | #endif | 87 | #endif |
88 | ); | 88 | ); |
89 | 89 | ||
90 | for (irqn = irqN; irqn < irqN + 4; ++irqn) { | 90 | for (irqn = irqN; irqn < irqN + 4; ++irqn) { |
91 | int iar_shift = (irqn & 7) * 4; | 91 | int iar_shift = (irqn & 7) * 4; |
92 | if (ivg == (0xf & (iar >> iar_shift))) { | 92 | if (ivg == (0xf & (iar >> iar_shift))) { |
93 | ivg_table[irq_pos].irqno = IVG7 + irqn; | 93 | ivg_table[irq_pos].irqno = IVG7 + irqn; |
94 | ivg_table[irq_pos].isrflag = 1 << (irqn % 32); | 94 | ivg_table[irq_pos].isrflag = 1 << (irqn % 32); |
95 | ivg7_13[ivg].istop++; | 95 | ivg7_13[ivg].istop++; |
96 | irq_pos++; | 96 | irq_pos++; |
97 | } | 97 | } |
98 | } | 98 | } |
99 | } | 99 | } |
100 | } | 100 | } |
101 | } | 101 | } |
102 | 102 | ||
103 | /* | 103 | /* |
104 | * This is for core internal IRQs | 104 | * This is for core internal IRQs |
105 | */ | 105 | */ |
106 | 106 | ||
107 | void bfin_ack_noop(struct irq_data *d) | 107 | void bfin_ack_noop(struct irq_data *d) |
108 | { | 108 | { |
109 | /* Dummy function. */ | 109 | /* Dummy function. */ |
110 | } | 110 | } |
111 | 111 | ||
112 | static void bfin_core_mask_irq(struct irq_data *d) | 112 | static void bfin_core_mask_irq(struct irq_data *d) |
113 | { | 113 | { |
114 | bfin_irq_flags &= ~(1 << d->irq); | 114 | bfin_irq_flags &= ~(1 << d->irq); |
115 | if (!hard_irqs_disabled()) | 115 | if (!hard_irqs_disabled()) |
116 | hard_local_irq_enable(); | 116 | hard_local_irq_enable(); |
117 | } | 117 | } |
118 | 118 | ||
119 | static void bfin_core_unmask_irq(struct irq_data *d) | 119 | static void bfin_core_unmask_irq(struct irq_data *d) |
120 | { | 120 | { |
121 | bfin_irq_flags |= 1 << d->irq; | 121 | bfin_irq_flags |= 1 << d->irq; |
122 | /* | 122 | /* |
123 | * If interrupts are enabled, IMASK must contain the same value | 123 | * If interrupts are enabled, IMASK must contain the same value |
124 | * as bfin_irq_flags. Make sure that invariant holds. If interrupts | 124 | * as bfin_irq_flags. Make sure that invariant holds. If interrupts |
125 | * are currently disabled we need not do anything; one of the | 125 | * are currently disabled we need not do anything; one of the |
126 | * callers will take care of setting IMASK to the proper value | 126 | * callers will take care of setting IMASK to the proper value |
127 | * when reenabling interrupts. | 127 | * when reenabling interrupts. |
128 | * local_irq_enable just does "STI bfin_irq_flags", so it's exactly | 128 | * local_irq_enable just does "STI bfin_irq_flags", so it's exactly |
129 | * what we need. | 129 | * what we need. |
130 | */ | 130 | */ |
131 | if (!hard_irqs_disabled()) | 131 | if (!hard_irqs_disabled()) |
132 | hard_local_irq_enable(); | 132 | hard_local_irq_enable(); |
133 | return; | 133 | return; |
134 | } | 134 | } |
135 | 135 | ||
136 | void bfin_internal_mask_irq(unsigned int irq) | 136 | void bfin_internal_mask_irq(unsigned int irq) |
137 | { | 137 | { |
138 | unsigned long flags = hard_local_irq_save(); | 138 | unsigned long flags = hard_local_irq_save(); |
139 | 139 | ||
140 | #ifdef SIC_IMASK0 | 140 | #ifdef SIC_IMASK0 |
141 | unsigned mask_bank = SIC_SYSIRQ(irq) / 32; | 141 | unsigned mask_bank = SIC_SYSIRQ(irq) / 32; |
142 | unsigned mask_bit = SIC_SYSIRQ(irq) % 32; | 142 | unsigned mask_bit = SIC_SYSIRQ(irq) % 32; |
143 | bfin_write_SIC_IMASK(mask_bank, bfin_read_SIC_IMASK(mask_bank) & | 143 | bfin_write_SIC_IMASK(mask_bank, bfin_read_SIC_IMASK(mask_bank) & |
144 | ~(1 << mask_bit)); | 144 | ~(1 << mask_bit)); |
145 | # ifdef CONFIG_SMP | 145 | # ifdef CONFIG_SMP |
146 | bfin_write_SICB_IMASK(mask_bank, bfin_read_SICB_IMASK(mask_bank) & | 146 | bfin_write_SICB_IMASK(mask_bank, bfin_read_SICB_IMASK(mask_bank) & |
147 | ~(1 << mask_bit)); | 147 | ~(1 << mask_bit)); |
148 | # endif | 148 | # endif |
149 | #else | 149 | #else |
150 | bfin_write_SIC_IMASK(bfin_read_SIC_IMASK() & | 150 | bfin_write_SIC_IMASK(bfin_read_SIC_IMASK() & |
151 | ~(1 << SIC_SYSIRQ(irq))); | 151 | ~(1 << SIC_SYSIRQ(irq))); |
152 | #endif | 152 | #endif |
153 | 153 | ||
154 | hard_local_irq_restore(flags); | 154 | hard_local_irq_restore(flags); |
155 | } | 155 | } |
156 | 156 | ||
157 | static void bfin_internal_mask_irq_chip(struct irq_data *d) | 157 | static void bfin_internal_mask_irq_chip(struct irq_data *d) |
158 | { | 158 | { |
159 | bfin_internal_mask_irq(d->irq); | 159 | bfin_internal_mask_irq(d->irq); |
160 | } | 160 | } |
161 | 161 | ||
162 | #ifdef CONFIG_SMP | 162 | #ifdef CONFIG_SMP |
163 | static void bfin_internal_unmask_irq_affinity(unsigned int irq, | 163 | static void bfin_internal_unmask_irq_affinity(unsigned int irq, |
164 | const struct cpumask *affinity) | 164 | const struct cpumask *affinity) |
165 | #else | 165 | #else |
166 | void bfin_internal_unmask_irq(unsigned int irq) | 166 | void bfin_internal_unmask_irq(unsigned int irq) |
167 | #endif | 167 | #endif |
168 | { | 168 | { |
169 | unsigned long flags = hard_local_irq_save(); | 169 | unsigned long flags = hard_local_irq_save(); |
170 | 170 | ||
171 | #ifdef SIC_IMASK0 | 171 | #ifdef SIC_IMASK0 |
172 | unsigned mask_bank = SIC_SYSIRQ(irq) / 32; | 172 | unsigned mask_bank = SIC_SYSIRQ(irq) / 32; |
173 | unsigned mask_bit = SIC_SYSIRQ(irq) % 32; | 173 | unsigned mask_bit = SIC_SYSIRQ(irq) % 32; |
174 | # ifdef CONFIG_SMP | 174 | # ifdef CONFIG_SMP |
175 | if (cpumask_test_cpu(0, affinity)) | 175 | if (cpumask_test_cpu(0, affinity)) |
176 | # endif | 176 | # endif |
177 | bfin_write_SIC_IMASK(mask_bank, | 177 | bfin_write_SIC_IMASK(mask_bank, |
178 | bfin_read_SIC_IMASK(mask_bank) | | 178 | bfin_read_SIC_IMASK(mask_bank) | |
179 | (1 << mask_bit)); | 179 | (1 << mask_bit)); |
180 | # ifdef CONFIG_SMP | 180 | # ifdef CONFIG_SMP |
181 | if (cpumask_test_cpu(1, affinity)) | 181 | if (cpumask_test_cpu(1, affinity)) |
182 | bfin_write_SICB_IMASK(mask_bank, | 182 | bfin_write_SICB_IMASK(mask_bank, |
183 | bfin_read_SICB_IMASK(mask_bank) | | 183 | bfin_read_SICB_IMASK(mask_bank) | |
184 | (1 << mask_bit)); | 184 | (1 << mask_bit)); |
185 | # endif | 185 | # endif |
186 | #else | 186 | #else |
187 | bfin_write_SIC_IMASK(bfin_read_SIC_IMASK() | | 187 | bfin_write_SIC_IMASK(bfin_read_SIC_IMASK() | |
188 | (1 << SIC_SYSIRQ(irq))); | 188 | (1 << SIC_SYSIRQ(irq))); |
189 | #endif | 189 | #endif |
190 | 190 | ||
191 | hard_local_irq_restore(flags); | 191 | hard_local_irq_restore(flags); |
192 | } | 192 | } |
193 | 193 | ||
194 | #ifdef CONFIG_SMP | 194 | #ifdef CONFIG_SMP |
195 | static void bfin_internal_unmask_irq_chip(struct irq_data *d) | 195 | static void bfin_internal_unmask_irq_chip(struct irq_data *d) |
196 | { | 196 | { |
197 | bfin_internal_unmask_irq_affinity(d->irq, d->affinity); | 197 | bfin_internal_unmask_irq_affinity(d->irq, d->affinity); |
198 | } | 198 | } |
199 | 199 | ||
200 | static int bfin_internal_set_affinity(struct irq_data *d, | 200 | static int bfin_internal_set_affinity(struct irq_data *d, |
201 | const struct cpumask *mask, bool force) | 201 | const struct cpumask *mask, bool force) |
202 | { | 202 | { |
203 | bfin_internal_mask_irq(d->irq); | 203 | bfin_internal_mask_irq(d->irq); |
204 | bfin_internal_unmask_irq_affinity(d->irq, mask); | 204 | bfin_internal_unmask_irq_affinity(d->irq, mask); |
205 | 205 | ||
206 | return 0; | 206 | return 0; |
207 | } | 207 | } |
208 | #else | 208 | #else |
209 | static void bfin_internal_unmask_irq_chip(struct irq_data *d) | 209 | static void bfin_internal_unmask_irq_chip(struct irq_data *d) |
210 | { | 210 | { |
211 | bfin_internal_unmask_irq(d->irq); | 211 | bfin_internal_unmask_irq(d->irq); |
212 | } | 212 | } |
213 | #endif | 213 | #endif |
214 | 214 | ||
215 | #ifdef CONFIG_PM | 215 | #ifdef CONFIG_PM |
216 | int bfin_internal_set_wake(unsigned int irq, unsigned int state) | 216 | int bfin_internal_set_wake(unsigned int irq, unsigned int state) |
217 | { | 217 | { |
218 | u32 bank, bit, wakeup = 0; | 218 | u32 bank, bit, wakeup = 0; |
219 | unsigned long flags; | 219 | unsigned long flags; |
220 | bank = SIC_SYSIRQ(irq) / 32; | 220 | bank = SIC_SYSIRQ(irq) / 32; |
221 | bit = SIC_SYSIRQ(irq) % 32; | 221 | bit = SIC_SYSIRQ(irq) % 32; |
222 | 222 | ||
223 | switch (irq) { | 223 | switch (irq) { |
224 | #ifdef IRQ_RTC | 224 | #ifdef IRQ_RTC |
225 | case IRQ_RTC: | 225 | case IRQ_RTC: |
226 | wakeup |= WAKE; | 226 | wakeup |= WAKE; |
227 | break; | 227 | break; |
228 | #endif | 228 | #endif |
229 | #ifdef IRQ_CAN0_RX | 229 | #ifdef IRQ_CAN0_RX |
230 | case IRQ_CAN0_RX: | 230 | case IRQ_CAN0_RX: |
231 | wakeup |= CANWE; | 231 | wakeup |= CANWE; |
232 | break; | 232 | break; |
233 | #endif | 233 | #endif |
234 | #ifdef IRQ_CAN1_RX | 234 | #ifdef IRQ_CAN1_RX |
235 | case IRQ_CAN1_RX: | 235 | case IRQ_CAN1_RX: |
236 | wakeup |= CANWE; | 236 | wakeup |= CANWE; |
237 | break; | 237 | break; |
238 | #endif | 238 | #endif |
239 | #ifdef IRQ_USB_INT0 | 239 | #ifdef IRQ_USB_INT0 |
240 | case IRQ_USB_INT0: | 240 | case IRQ_USB_INT0: |
241 | wakeup |= USBWE; | 241 | wakeup |= USBWE; |
242 | break; | 242 | break; |
243 | #endif | 243 | #endif |
244 | #ifdef CONFIG_BF54x | 244 | #ifdef CONFIG_BF54x |
245 | case IRQ_CNT: | 245 | case IRQ_CNT: |
246 | wakeup |= ROTWE; | 246 | wakeup |= ROTWE; |
247 | break; | 247 | break; |
248 | #endif | 248 | #endif |
249 | default: | 249 | default: |
250 | break; | 250 | break; |
251 | } | 251 | } |
252 | 252 | ||
253 | flags = hard_local_irq_save(); | 253 | flags = hard_local_irq_save(); |
254 | 254 | ||
255 | if (state) { | 255 | if (state) { |
256 | bfin_sic_iwr[bank] |= (1 << bit); | 256 | bfin_sic_iwr[bank] |= (1 << bit); |
257 | vr_wakeup |= wakeup; | 257 | vr_wakeup |= wakeup; |
258 | 258 | ||
259 | } else { | 259 | } else { |
260 | bfin_sic_iwr[bank] &= ~(1 << bit); | 260 | bfin_sic_iwr[bank] &= ~(1 << bit); |
261 | vr_wakeup &= ~wakeup; | 261 | vr_wakeup &= ~wakeup; |
262 | } | 262 | } |
263 | 263 | ||
264 | hard_local_irq_restore(flags); | 264 | hard_local_irq_restore(flags); |
265 | 265 | ||
266 | return 0; | 266 | return 0; |
267 | } | 267 | } |
268 | 268 | ||
269 | static int bfin_internal_set_wake_chip(struct irq_data *d, unsigned int state) | 269 | static int bfin_internal_set_wake_chip(struct irq_data *d, unsigned int state) |
270 | { | 270 | { |
271 | return bfin_internal_set_wake(d->irq, state); | 271 | return bfin_internal_set_wake(d->irq, state); |
272 | } | 272 | } |
273 | #else | 273 | #else |
274 | # define bfin_internal_set_wake_chip NULL | 274 | # define bfin_internal_set_wake_chip NULL |
275 | #endif | 275 | #endif |
276 | 276 | ||
277 | static struct irq_chip bfin_core_irqchip = { | 277 | static struct irq_chip bfin_core_irqchip = { |
278 | .name = "CORE", | 278 | .name = "CORE", |
279 | .irq_ack = bfin_ack_noop, | 279 | .irq_ack = bfin_ack_noop, |
280 | .irq_mask = bfin_core_mask_irq, | 280 | .irq_mask = bfin_core_mask_irq, |
281 | .irq_unmask = bfin_core_unmask_irq, | 281 | .irq_unmask = bfin_core_unmask_irq, |
282 | }; | 282 | }; |
283 | 283 | ||
284 | static struct irq_chip bfin_internal_irqchip = { | 284 | static struct irq_chip bfin_internal_irqchip = { |
285 | .name = "INTN", | 285 | .name = "INTN", |
286 | .irq_ack = bfin_ack_noop, | 286 | .irq_ack = bfin_ack_noop, |
287 | .irq_mask = bfin_internal_mask_irq_chip, | 287 | .irq_mask = bfin_internal_mask_irq_chip, |
288 | .irq_unmask = bfin_internal_unmask_irq_chip, | 288 | .irq_unmask = bfin_internal_unmask_irq_chip, |
289 | .irq_mask_ack = bfin_internal_mask_irq_chip, | 289 | .irq_mask_ack = bfin_internal_mask_irq_chip, |
290 | .irq_disable = bfin_internal_mask_irq_chip, | 290 | .irq_disable = bfin_internal_mask_irq_chip, |
291 | .irq_enable = bfin_internal_unmask_irq_chip, | 291 | .irq_enable = bfin_internal_unmask_irq_chip, |
292 | #ifdef CONFIG_SMP | 292 | #ifdef CONFIG_SMP |
293 | .irq_set_affinity = bfin_internal_set_affinity, | 293 | .irq_set_affinity = bfin_internal_set_affinity, |
294 | #endif | 294 | #endif |
295 | .irq_set_wake = bfin_internal_set_wake_chip, | 295 | .irq_set_wake = bfin_internal_set_wake_chip, |
296 | }; | 296 | }; |
297 | 297 | ||
298 | void bfin_handle_irq(unsigned irq) | 298 | void bfin_handle_irq(unsigned irq) |
299 | { | 299 | { |
300 | #ifdef CONFIG_IPIPE | 300 | #ifdef CONFIG_IPIPE |
301 | struct pt_regs regs; /* Contents not used. */ | 301 | struct pt_regs regs; /* Contents not used. */ |
302 | ipipe_trace_irq_entry(irq); | 302 | ipipe_trace_irq_entry(irq); |
303 | __ipipe_handle_irq(irq, ®s); | 303 | __ipipe_handle_irq(irq, ®s); |
304 | ipipe_trace_irq_exit(irq); | 304 | ipipe_trace_irq_exit(irq); |
305 | #else /* !CONFIG_IPIPE */ | 305 | #else /* !CONFIG_IPIPE */ |
306 | generic_handle_irq(irq); | 306 | generic_handle_irq(irq); |
307 | #endif /* !CONFIG_IPIPE */ | 307 | #endif /* !CONFIG_IPIPE */ |
308 | } | 308 | } |
309 | 309 | ||
310 | #if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE) | 310 | #if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE) |
311 | static int mac_stat_int_mask; | 311 | static int mac_stat_int_mask; |
312 | 312 | ||
313 | static void bfin_mac_status_ack_irq(unsigned int irq) | 313 | static void bfin_mac_status_ack_irq(unsigned int irq) |
314 | { | 314 | { |
315 | switch (irq) { | 315 | switch (irq) { |
316 | case IRQ_MAC_MMCINT: | 316 | case IRQ_MAC_MMCINT: |
317 | bfin_write_EMAC_MMC_TIRQS( | 317 | bfin_write_EMAC_MMC_TIRQS( |
318 | bfin_read_EMAC_MMC_TIRQE() & | 318 | bfin_read_EMAC_MMC_TIRQE() & |
319 | bfin_read_EMAC_MMC_TIRQS()); | 319 | bfin_read_EMAC_MMC_TIRQS()); |
320 | bfin_write_EMAC_MMC_RIRQS( | 320 | bfin_write_EMAC_MMC_RIRQS( |
321 | bfin_read_EMAC_MMC_RIRQE() & | 321 | bfin_read_EMAC_MMC_RIRQE() & |
322 | bfin_read_EMAC_MMC_RIRQS()); | 322 | bfin_read_EMAC_MMC_RIRQS()); |
323 | break; | 323 | break; |
324 | case IRQ_MAC_RXFSINT: | 324 | case IRQ_MAC_RXFSINT: |
325 | bfin_write_EMAC_RX_STKY( | 325 | bfin_write_EMAC_RX_STKY( |
326 | bfin_read_EMAC_RX_IRQE() & | 326 | bfin_read_EMAC_RX_IRQE() & |
327 | bfin_read_EMAC_RX_STKY()); | 327 | bfin_read_EMAC_RX_STKY()); |
328 | break; | 328 | break; |
329 | case IRQ_MAC_TXFSINT: | 329 | case IRQ_MAC_TXFSINT: |
330 | bfin_write_EMAC_TX_STKY( | 330 | bfin_write_EMAC_TX_STKY( |
331 | bfin_read_EMAC_TX_IRQE() & | 331 | bfin_read_EMAC_TX_IRQE() & |
332 | bfin_read_EMAC_TX_STKY()); | 332 | bfin_read_EMAC_TX_STKY()); |
333 | break; | 333 | break; |
334 | case IRQ_MAC_WAKEDET: | 334 | case IRQ_MAC_WAKEDET: |
335 | bfin_write_EMAC_WKUP_CTL( | 335 | bfin_write_EMAC_WKUP_CTL( |
336 | bfin_read_EMAC_WKUP_CTL() | MPKS | RWKS); | 336 | bfin_read_EMAC_WKUP_CTL() | MPKS | RWKS); |
337 | break; | 337 | break; |
338 | default: | 338 | default: |
339 | /* These bits are W1C */ | 339 | /* These bits are W1C */ |
340 | bfin_write_EMAC_SYSTAT(1L << (irq - IRQ_MAC_PHYINT)); | 340 | bfin_write_EMAC_SYSTAT(1L << (irq - IRQ_MAC_PHYINT)); |
341 | break; | 341 | break; |
342 | } | 342 | } |
343 | } | 343 | } |
344 | 344 | ||
345 | static void bfin_mac_status_mask_irq(struct irq_data *d) | 345 | static void bfin_mac_status_mask_irq(struct irq_data *d) |
346 | { | 346 | { |
347 | unsigned int irq = d->irq; | 347 | unsigned int irq = d->irq; |
348 | 348 | ||
349 | mac_stat_int_mask &= ~(1L << (irq - IRQ_MAC_PHYINT)); | 349 | mac_stat_int_mask &= ~(1L << (irq - IRQ_MAC_PHYINT)); |
350 | #ifdef BF537_FAMILY | 350 | #ifdef BF537_FAMILY |
351 | switch (irq) { | 351 | switch (irq) { |
352 | case IRQ_MAC_PHYINT: | 352 | case IRQ_MAC_PHYINT: |
353 | bfin_write_EMAC_SYSCTL(bfin_read_EMAC_SYSCTL() & ~PHYIE); | 353 | bfin_write_EMAC_SYSCTL(bfin_read_EMAC_SYSCTL() & ~PHYIE); |
354 | break; | 354 | break; |
355 | default: | 355 | default: |
356 | break; | 356 | break; |
357 | } | 357 | } |
358 | #else | 358 | #else |
359 | if (!mac_stat_int_mask) | 359 | if (!mac_stat_int_mask) |
360 | bfin_internal_mask_irq(IRQ_MAC_ERROR); | 360 | bfin_internal_mask_irq(IRQ_MAC_ERROR); |
361 | #endif | 361 | #endif |
362 | bfin_mac_status_ack_irq(irq); | 362 | bfin_mac_status_ack_irq(irq); |
363 | } | 363 | } |
364 | 364 | ||
365 | static void bfin_mac_status_unmask_irq(struct irq_data *d) | 365 | static void bfin_mac_status_unmask_irq(struct irq_data *d) |
366 | { | 366 | { |
367 | unsigned int irq = d->irq; | 367 | unsigned int irq = d->irq; |
368 | 368 | ||
369 | #ifdef BF537_FAMILY | 369 | #ifdef BF537_FAMILY |
370 | switch (irq) { | 370 | switch (irq) { |
371 | case IRQ_MAC_PHYINT: | 371 | case IRQ_MAC_PHYINT: |
372 | bfin_write_EMAC_SYSCTL(bfin_read_EMAC_SYSCTL() | PHYIE); | 372 | bfin_write_EMAC_SYSCTL(bfin_read_EMAC_SYSCTL() | PHYIE); |
373 | break; | 373 | break; |
374 | default: | 374 | default: |
375 | break; | 375 | break; |
376 | } | 376 | } |
377 | #else | 377 | #else |
378 | if (!mac_stat_int_mask) | 378 | if (!mac_stat_int_mask) |
379 | bfin_internal_unmask_irq(IRQ_MAC_ERROR); | 379 | bfin_internal_unmask_irq(IRQ_MAC_ERROR); |
380 | #endif | 380 | #endif |
381 | mac_stat_int_mask |= 1L << (irq - IRQ_MAC_PHYINT); | 381 | mac_stat_int_mask |= 1L << (irq - IRQ_MAC_PHYINT); |
382 | } | 382 | } |
383 | 383 | ||
384 | #ifdef CONFIG_PM | 384 | #ifdef CONFIG_PM |
385 | int bfin_mac_status_set_wake(struct irq_data *d, unsigned int state) | 385 | int bfin_mac_status_set_wake(struct irq_data *d, unsigned int state) |
386 | { | 386 | { |
387 | #ifdef BF537_FAMILY | 387 | #ifdef BF537_FAMILY |
388 | return bfin_internal_set_wake(IRQ_GENERIC_ERROR, state); | 388 | return bfin_internal_set_wake(IRQ_GENERIC_ERROR, state); |
389 | #else | 389 | #else |
390 | return bfin_internal_set_wake(IRQ_MAC_ERROR, state); | 390 | return bfin_internal_set_wake(IRQ_MAC_ERROR, state); |
391 | #endif | 391 | #endif |
392 | } | 392 | } |
393 | #else | 393 | #else |
394 | # define bfin_mac_status_set_wake NULL | 394 | # define bfin_mac_status_set_wake NULL |
395 | #endif | 395 | #endif |
396 | 396 | ||
397 | static struct irq_chip bfin_mac_status_irqchip = { | 397 | static struct irq_chip bfin_mac_status_irqchip = { |
398 | .name = "MACST", | 398 | .name = "MACST", |
399 | .irq_ack = bfin_ack_noop, | 399 | .irq_ack = bfin_ack_noop, |
400 | .irq_mask_ack = bfin_mac_status_mask_irq, | 400 | .irq_mask_ack = bfin_mac_status_mask_irq, |
401 | .irq_mask = bfin_mac_status_mask_irq, | 401 | .irq_mask = bfin_mac_status_mask_irq, |
402 | .irq_unmask = bfin_mac_status_unmask_irq, | 402 | .irq_unmask = bfin_mac_status_unmask_irq, |
403 | .irq_set_wake = bfin_mac_status_set_wake, | 403 | .irq_set_wake = bfin_mac_status_set_wake, |
404 | }; | 404 | }; |
405 | 405 | ||
406 | void bfin_demux_mac_status_irq(unsigned int int_err_irq, | 406 | void bfin_demux_mac_status_irq(unsigned int int_err_irq, |
407 | struct irq_desc *inta_desc) | 407 | struct irq_desc *inta_desc) |
408 | { | 408 | { |
409 | int i, irq = 0; | 409 | int i, irq = 0; |
410 | u32 status = bfin_read_EMAC_SYSTAT(); | 410 | u32 status = bfin_read_EMAC_SYSTAT(); |
411 | 411 | ||
412 | for (i = 0; i <= (IRQ_MAC_STMDONE - IRQ_MAC_PHYINT); i++) | 412 | for (i = 0; i <= (IRQ_MAC_STMDONE - IRQ_MAC_PHYINT); i++) |
413 | if (status & (1L << i)) { | 413 | if (status & (1L << i)) { |
414 | irq = IRQ_MAC_PHYINT + i; | 414 | irq = IRQ_MAC_PHYINT + i; |
415 | break; | 415 | break; |
416 | } | 416 | } |
417 | 417 | ||
418 | if (irq) { | 418 | if (irq) { |
419 | if (mac_stat_int_mask & (1L << (irq - IRQ_MAC_PHYINT))) { | 419 | if (mac_stat_int_mask & (1L << (irq - IRQ_MAC_PHYINT))) { |
420 | bfin_handle_irq(irq); | 420 | bfin_handle_irq(irq); |
421 | } else { | 421 | } else { |
422 | bfin_mac_status_ack_irq(irq); | 422 | bfin_mac_status_ack_irq(irq); |
423 | pr_debug("IRQ %d:" | 423 | pr_debug("IRQ %d:" |
424 | " MASKED MAC ERROR INTERRUPT ASSERTED\n", | 424 | " MASKED MAC ERROR INTERRUPT ASSERTED\n", |
425 | irq); | 425 | irq); |
426 | } | 426 | } |
427 | } else | 427 | } else |
428 | printk(KERN_ERR | 428 | printk(KERN_ERR |
429 | "%s : %s : LINE %d :\nIRQ ?: MAC ERROR" | 429 | "%s : %s : LINE %d :\nIRQ ?: MAC ERROR" |
430 | " INTERRUPT ASSERTED BUT NO SOURCE FOUND" | 430 | " INTERRUPT ASSERTED BUT NO SOURCE FOUND" |
431 | "(EMAC_SYSTAT=0x%X)\n", | 431 | "(EMAC_SYSTAT=0x%X)\n", |
432 | __func__, __FILE__, __LINE__, status); | 432 | __func__, __FILE__, __LINE__, status); |
433 | } | 433 | } |
434 | #endif | 434 | #endif |
435 | 435 | ||
436 | static inline void bfin_set_irq_handler(unsigned irq, irq_flow_handler_t handle) | 436 | static inline void bfin_set_irq_handler(unsigned irq, irq_flow_handler_t handle) |
437 | { | 437 | { |
438 | #ifdef CONFIG_IPIPE | 438 | #ifdef CONFIG_IPIPE |
439 | handle = handle_level_irq; | 439 | handle = handle_level_irq; |
440 | #endif | 440 | #endif |
441 | __irq_set_handler_locked(irq, handle); | 441 | __irq_set_handler_locked(irq, handle); |
442 | } | 442 | } |
443 | 443 | ||
444 | static DECLARE_BITMAP(gpio_enabled, MAX_BLACKFIN_GPIOS); | 444 | static DECLARE_BITMAP(gpio_enabled, MAX_BLACKFIN_GPIOS); |
445 | extern void bfin_gpio_irq_prepare(unsigned gpio); | 445 | extern void bfin_gpio_irq_prepare(unsigned gpio); |
446 | 446 | ||
447 | #if !defined(CONFIG_BF54x) | 447 | #if !defined(CONFIG_BF54x) |
448 | 448 | ||
449 | static void bfin_gpio_ack_irq(struct irq_data *d) | 449 | static void bfin_gpio_ack_irq(struct irq_data *d) |
450 | { | 450 | { |
451 | /* AFAIK ack_irq in case mask_ack is provided | 451 | /* AFAIK ack_irq in case mask_ack is provided |
452 | * get's only called for edge sense irqs | 452 | * get's only called for edge sense irqs |
453 | */ | 453 | */ |
454 | set_gpio_data(irq_to_gpio(d->irq), 0); | 454 | set_gpio_data(irq_to_gpio(d->irq), 0); |
455 | } | 455 | } |
456 | 456 | ||
457 | static void bfin_gpio_mask_ack_irq(struct irq_data *d) | 457 | static void bfin_gpio_mask_ack_irq(struct irq_data *d) |
458 | { | 458 | { |
459 | unsigned int irq = d->irq; | 459 | unsigned int irq = d->irq; |
460 | u32 gpionr = irq_to_gpio(irq); | 460 | u32 gpionr = irq_to_gpio(irq); |
461 | 461 | ||
462 | if (!irqd_is_level_type(d)) | 462 | if (!irqd_is_level_type(d)) |
463 | set_gpio_data(gpionr, 0); | 463 | set_gpio_data(gpionr, 0); |
464 | 464 | ||
465 | set_gpio_maska(gpionr, 0); | 465 | set_gpio_maska(gpionr, 0); |
466 | } | 466 | } |
467 | 467 | ||
468 | static void bfin_gpio_mask_irq(struct irq_data *d) | 468 | static void bfin_gpio_mask_irq(struct irq_data *d) |
469 | { | 469 | { |
470 | set_gpio_maska(irq_to_gpio(d->irq), 0); | 470 | set_gpio_maska(irq_to_gpio(d->irq), 0); |
471 | } | 471 | } |
472 | 472 | ||
473 | static void bfin_gpio_unmask_irq(struct irq_data *d) | 473 | static void bfin_gpio_unmask_irq(struct irq_data *d) |
474 | { | 474 | { |
475 | set_gpio_maska(irq_to_gpio(d->irq), 1); | 475 | set_gpio_maska(irq_to_gpio(d->irq), 1); |
476 | } | 476 | } |
477 | 477 | ||
478 | static unsigned int bfin_gpio_irq_startup(struct irq_data *d) | 478 | static unsigned int bfin_gpio_irq_startup(struct irq_data *d) |
479 | { | 479 | { |
480 | u32 gpionr = irq_to_gpio(d->irq); | 480 | u32 gpionr = irq_to_gpio(d->irq); |
481 | 481 | ||
482 | if (__test_and_set_bit(gpionr, gpio_enabled)) | 482 | if (__test_and_set_bit(gpionr, gpio_enabled)) |
483 | bfin_gpio_irq_prepare(gpionr); | 483 | bfin_gpio_irq_prepare(gpionr); |
484 | 484 | ||
485 | bfin_gpio_unmask_irq(d); | 485 | bfin_gpio_unmask_irq(d); |
486 | 486 | ||
487 | return 0; | 487 | return 0; |
488 | } | 488 | } |
489 | 489 | ||
490 | static void bfin_gpio_irq_shutdown(struct irq_data *d) | 490 | static void bfin_gpio_irq_shutdown(struct irq_data *d) |
491 | { | 491 | { |
492 | u32 gpionr = irq_to_gpio(d->irq); | 492 | u32 gpionr = irq_to_gpio(d->irq); |
493 | 493 | ||
494 | bfin_gpio_mask_irq(d); | 494 | bfin_gpio_mask_irq(d); |
495 | __clear_bit(gpionr, gpio_enabled); | 495 | __clear_bit(gpionr, gpio_enabled); |
496 | bfin_gpio_irq_free(gpionr); | 496 | bfin_gpio_irq_free(gpionr); |
497 | } | 497 | } |
498 | 498 | ||
499 | static int bfin_gpio_irq_type(struct irq_data *d, unsigned int type) | 499 | static int bfin_gpio_irq_type(struct irq_data *d, unsigned int type) |
500 | { | 500 | { |
501 | unsigned int irq = d->irq; | 501 | unsigned int irq = d->irq; |
502 | int ret; | 502 | int ret; |
503 | char buf[16]; | 503 | char buf[16]; |
504 | u32 gpionr = irq_to_gpio(irq); | 504 | u32 gpionr = irq_to_gpio(irq); |
505 | 505 | ||
506 | if (type == IRQ_TYPE_PROBE) { | 506 | if (type == IRQ_TYPE_PROBE) { |
507 | /* only probe unenabled GPIO interrupt lines */ | 507 | /* only probe unenabled GPIO interrupt lines */ |
508 | if (test_bit(gpionr, gpio_enabled)) | 508 | if (test_bit(gpionr, gpio_enabled)) |
509 | return 0; | 509 | return 0; |
510 | type = IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING; | 510 | type = IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING; |
511 | } | 511 | } |
512 | 512 | ||
513 | if (type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING | | 513 | if (type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING | |
514 | IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_LEVEL_LOW)) { | 514 | IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_LEVEL_LOW)) { |
515 | 515 | ||
516 | snprintf(buf, 16, "gpio-irq%d", irq); | 516 | snprintf(buf, 16, "gpio-irq%d", irq); |
517 | ret = bfin_gpio_irq_request(gpionr, buf); | 517 | ret = bfin_gpio_irq_request(gpionr, buf); |
518 | if (ret) | 518 | if (ret) |
519 | return ret; | 519 | return ret; |
520 | 520 | ||
521 | if (__test_and_set_bit(gpionr, gpio_enabled)) | 521 | if (__test_and_set_bit(gpionr, gpio_enabled)) |
522 | bfin_gpio_irq_prepare(gpionr); | 522 | bfin_gpio_irq_prepare(gpionr); |
523 | 523 | ||
524 | } else { | 524 | } else { |
525 | __clear_bit(gpionr, gpio_enabled); | 525 | __clear_bit(gpionr, gpio_enabled); |
526 | return 0; | 526 | return 0; |
527 | } | 527 | } |
528 | 528 | ||
529 | set_gpio_inen(gpionr, 0); | 529 | set_gpio_inen(gpionr, 0); |
530 | set_gpio_dir(gpionr, 0); | 530 | set_gpio_dir(gpionr, 0); |
531 | 531 | ||
532 | if ((type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING)) | 532 | if ((type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING)) |
533 | == (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING)) | 533 | == (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING)) |
534 | set_gpio_both(gpionr, 1); | 534 | set_gpio_both(gpionr, 1); |
535 | else | 535 | else |
536 | set_gpio_both(gpionr, 0); | 536 | set_gpio_both(gpionr, 0); |
537 | 537 | ||
538 | if ((type & (IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_LEVEL_LOW))) | 538 | if ((type & (IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_LEVEL_LOW))) |
539 | set_gpio_polar(gpionr, 1); /* low or falling edge denoted by one */ | 539 | set_gpio_polar(gpionr, 1); /* low or falling edge denoted by one */ |
540 | else | 540 | else |
541 | set_gpio_polar(gpionr, 0); /* high or rising edge denoted by zero */ | 541 | set_gpio_polar(gpionr, 0); /* high or rising edge denoted by zero */ |
542 | 542 | ||
543 | if (type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING)) { | 543 | if (type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING)) { |
544 | set_gpio_edge(gpionr, 1); | 544 | set_gpio_edge(gpionr, 1); |
545 | set_gpio_inen(gpionr, 1); | 545 | set_gpio_inen(gpionr, 1); |
546 | set_gpio_data(gpionr, 0); | 546 | set_gpio_data(gpionr, 0); |
547 | 547 | ||
548 | } else { | 548 | } else { |
549 | set_gpio_edge(gpionr, 0); | 549 | set_gpio_edge(gpionr, 0); |
550 | set_gpio_inen(gpionr, 1); | 550 | set_gpio_inen(gpionr, 1); |
551 | } | 551 | } |
552 | 552 | ||
553 | if (type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING)) | 553 | if (type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING)) |
554 | bfin_set_irq_handler(irq, handle_edge_irq); | 554 | bfin_set_irq_handler(irq, handle_edge_irq); |
555 | else | 555 | else |
556 | bfin_set_irq_handler(irq, handle_level_irq); | 556 | bfin_set_irq_handler(irq, handle_level_irq); |
557 | 557 | ||
558 | return 0; | 558 | return 0; |
559 | } | 559 | } |
560 | 560 | ||
561 | #ifdef CONFIG_PM | 561 | #ifdef CONFIG_PM |
562 | static int bfin_gpio_set_wake(struct irq_data *d, unsigned int state) | 562 | static int bfin_gpio_set_wake(struct irq_data *d, unsigned int state) |
563 | { | 563 | { |
564 | return gpio_pm_wakeup_ctrl(irq_to_gpio(d->irq), state); | 564 | return gpio_pm_wakeup_ctrl(irq_to_gpio(d->irq), state); |
565 | } | 565 | } |
566 | #else | 566 | #else |
567 | # define bfin_gpio_set_wake NULL | 567 | # define bfin_gpio_set_wake NULL |
568 | #endif | 568 | #endif |
569 | 569 | ||
570 | static void bfin_demux_gpio_block(unsigned int irq) | ||
571 | { | ||
572 | unsigned int gpio, mask; | ||
573 | |||
574 | gpio = irq_to_gpio(irq); | ||
575 | mask = get_gpiop_data(gpio) & get_gpiop_maska(gpio); | ||
576 | |||
577 | while (mask) { | ||
578 | if (mask & 1) | ||
579 | bfin_handle_irq(irq); | ||
580 | irq++; | ||
581 | mask >>= 1; | ||
582 | } | ||
583 | } | ||
584 | |||
570 | static void bfin_demux_gpio_irq(unsigned int inta_irq, | 585 | static void bfin_demux_gpio_irq(unsigned int inta_irq, |
571 | struct irq_desc *desc) | 586 | struct irq_desc *desc) |
572 | { | 587 | { |
573 | unsigned int i, gpio, mask, irq, search = 0; | 588 | unsigned int irq; |
574 | 589 | ||
575 | switch (inta_irq) { | 590 | switch (inta_irq) { |
576 | #if defined(CONFIG_BF53x) | 591 | #if defined(BF537_FAMILY) |
577 | case IRQ_PROG_INTA: | 592 | case IRQ_PROG_INTA: |
578 | irq = IRQ_PF0; | 593 | bfin_demux_gpio_block(IRQ_PF0); |
579 | search = 1; | 594 | irq = IRQ_PG0; |
580 | break; | 595 | break; |
581 | # if defined(BF537_FAMILY) && !(defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE)) | 596 | # if !(defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE)) |
582 | case IRQ_MAC_RX: | 597 | case IRQ_MAC_RX: |
583 | irq = IRQ_PH0; | 598 | irq = IRQ_PH0; |
584 | break; | 599 | break; |
585 | # endif | 600 | # endif |
601 | #elif defined(BF533_FAMILY) | ||
602 | case IRQ_PROG_INTA: | ||
603 | irq = IRQ_PF0; | ||
604 | break; | ||
586 | #elif defined(BF538_FAMILY) | 605 | #elif defined(BF538_FAMILY) |
587 | case IRQ_PORTF_INTA: | 606 | case IRQ_PORTF_INTA: |
588 | irq = IRQ_PF0; | 607 | irq = IRQ_PF0; |
589 | break; | 608 | break; |
590 | #elif defined(CONFIG_BF52x) || defined(CONFIG_BF51x) | 609 | #elif defined(CONFIG_BF52x) || defined(CONFIG_BF51x) |
591 | case IRQ_PORTF_INTA: | 610 | case IRQ_PORTF_INTA: |
592 | irq = IRQ_PF0; | 611 | irq = IRQ_PF0; |
593 | break; | 612 | break; |
594 | case IRQ_PORTG_INTA: | 613 | case IRQ_PORTG_INTA: |
595 | irq = IRQ_PG0; | 614 | irq = IRQ_PG0; |
596 | break; | 615 | break; |
597 | case IRQ_PORTH_INTA: | 616 | case IRQ_PORTH_INTA: |
598 | irq = IRQ_PH0; | 617 | irq = IRQ_PH0; |
599 | break; | 618 | break; |
600 | #elif defined(CONFIG_BF561) | 619 | #elif defined(CONFIG_BF561) |
601 | case IRQ_PROG0_INTA: | 620 | case IRQ_PROG0_INTA: |
602 | irq = IRQ_PF0; | 621 | irq = IRQ_PF0; |
603 | break; | 622 | break; |
604 | case IRQ_PROG1_INTA: | 623 | case IRQ_PROG1_INTA: |
605 | irq = IRQ_PF16; | 624 | irq = IRQ_PF16; |
606 | break; | 625 | break; |
607 | case IRQ_PROG2_INTA: | 626 | case IRQ_PROG2_INTA: |
608 | irq = IRQ_PF32; | 627 | irq = IRQ_PF32; |
609 | break; | 628 | break; |
610 | #endif | 629 | #endif |
611 | default: | 630 | default: |
612 | BUG(); | 631 | BUG(); |
613 | return; | 632 | return; |
614 | } | 633 | } |
615 | 634 | ||
616 | if (search) { | 635 | bfin_demux_gpio_block(irq); |
617 | for (i = 0; i < MAX_BLACKFIN_GPIOS; i += GPIO_BANKSIZE) { | ||
618 | irq += i; | ||
619 | |||
620 | mask = get_gpiop_data(i) & get_gpiop_maska(i); | ||
621 | |||
622 | while (mask) { | ||
623 | if (mask & 1) | ||
624 | bfin_handle_irq(irq); | ||
625 | irq++; | ||
626 | mask >>= 1; | ||
627 | } | ||
628 | } | ||
629 | } else { | ||
630 | gpio = irq_to_gpio(irq); | ||
631 | mask = get_gpiop_data(gpio) & get_gpiop_maska(gpio); | ||
632 | |||
633 | do { | ||
634 | if (mask & 1) | ||
635 | bfin_handle_irq(irq); | ||
636 | irq++; | ||
637 | mask >>= 1; | ||
638 | } while (mask); | ||
639 | } | ||
640 | |||
641 | } | 636 | } |
642 | 637 | ||
643 | #else /* CONFIG_BF54x */ | 638 | #else /* CONFIG_BF54x */ |
644 | 639 | ||
645 | #define NR_PINT_SYS_IRQS 4 | 640 | #define NR_PINT_SYS_IRQS 4 |
646 | #define NR_PINT_BITS 32 | 641 | #define NR_PINT_BITS 32 |
647 | #define NR_PINTS 160 | 642 | #define NR_PINTS 160 |
648 | #define IRQ_NOT_AVAIL 0xFF | 643 | #define IRQ_NOT_AVAIL 0xFF |
649 | 644 | ||
650 | #define PINT_2_BANK(x) ((x) >> 5) | 645 | #define PINT_2_BANK(x) ((x) >> 5) |
651 | #define PINT_2_BIT(x) ((x) & 0x1F) | 646 | #define PINT_2_BIT(x) ((x) & 0x1F) |
652 | #define PINT_BIT(x) (1 << (PINT_2_BIT(x))) | 647 | #define PINT_BIT(x) (1 << (PINT_2_BIT(x))) |
653 | 648 | ||
654 | static unsigned char irq2pint_lut[NR_PINTS]; | 649 | static unsigned char irq2pint_lut[NR_PINTS]; |
655 | static unsigned char pint2irq_lut[NR_PINT_SYS_IRQS * NR_PINT_BITS]; | 650 | static unsigned char pint2irq_lut[NR_PINT_SYS_IRQS * NR_PINT_BITS]; |
656 | 651 | ||
657 | struct pin_int_t { | 652 | struct pin_int_t { |
658 | unsigned int mask_set; | 653 | unsigned int mask_set; |
659 | unsigned int mask_clear; | 654 | unsigned int mask_clear; |
660 | unsigned int request; | 655 | unsigned int request; |
661 | unsigned int assign; | 656 | unsigned int assign; |
662 | unsigned int edge_set; | 657 | unsigned int edge_set; |
663 | unsigned int edge_clear; | 658 | unsigned int edge_clear; |
664 | unsigned int invert_set; | 659 | unsigned int invert_set; |
665 | unsigned int invert_clear; | 660 | unsigned int invert_clear; |
666 | unsigned int pinstate; | 661 | unsigned int pinstate; |
667 | unsigned int latch; | 662 | unsigned int latch; |
668 | }; | 663 | }; |
669 | 664 | ||
670 | static struct pin_int_t *pint[NR_PINT_SYS_IRQS] = { | 665 | static struct pin_int_t *pint[NR_PINT_SYS_IRQS] = { |
671 | (struct pin_int_t *)PINT0_MASK_SET, | 666 | (struct pin_int_t *)PINT0_MASK_SET, |
672 | (struct pin_int_t *)PINT1_MASK_SET, | 667 | (struct pin_int_t *)PINT1_MASK_SET, |
673 | (struct pin_int_t *)PINT2_MASK_SET, | 668 | (struct pin_int_t *)PINT2_MASK_SET, |
674 | (struct pin_int_t *)PINT3_MASK_SET, | 669 | (struct pin_int_t *)PINT3_MASK_SET, |
675 | }; | 670 | }; |
676 | 671 | ||
677 | inline unsigned int get_irq_base(u32 bank, u8 bmap) | 672 | inline unsigned int get_irq_base(u32 bank, u8 bmap) |
678 | { | 673 | { |
679 | unsigned int irq_base; | 674 | unsigned int irq_base; |
680 | 675 | ||
681 | if (bank < 2) { /*PA-PB */ | 676 | if (bank < 2) { /*PA-PB */ |
682 | irq_base = IRQ_PA0 + bmap * 16; | 677 | irq_base = IRQ_PA0 + bmap * 16; |
683 | } else { /*PC-PJ */ | 678 | } else { /*PC-PJ */ |
684 | irq_base = IRQ_PC0 + bmap * 16; | 679 | irq_base = IRQ_PC0 + bmap * 16; |
685 | } | 680 | } |
686 | 681 | ||
687 | return irq_base; | 682 | return irq_base; |
688 | } | 683 | } |
689 | 684 | ||
690 | /* Whenever PINTx_ASSIGN is altered init_pint_lut() must be executed! */ | 685 | /* Whenever PINTx_ASSIGN is altered init_pint_lut() must be executed! */ |
691 | void init_pint_lut(void) | 686 | void init_pint_lut(void) |
692 | { | 687 | { |
693 | u16 bank, bit, irq_base, bit_pos; | 688 | u16 bank, bit, irq_base, bit_pos; |
694 | u32 pint_assign; | 689 | u32 pint_assign; |
695 | u8 bmap; | 690 | u8 bmap; |
696 | 691 | ||
697 | memset(irq2pint_lut, IRQ_NOT_AVAIL, sizeof(irq2pint_lut)); | 692 | memset(irq2pint_lut, IRQ_NOT_AVAIL, sizeof(irq2pint_lut)); |
698 | 693 | ||
699 | for (bank = 0; bank < NR_PINT_SYS_IRQS; bank++) { | 694 | for (bank = 0; bank < NR_PINT_SYS_IRQS; bank++) { |
700 | 695 | ||
701 | pint_assign = pint[bank]->assign; | 696 | pint_assign = pint[bank]->assign; |
702 | 697 | ||
703 | for (bit = 0; bit < NR_PINT_BITS; bit++) { | 698 | for (bit = 0; bit < NR_PINT_BITS; bit++) { |
704 | 699 | ||
705 | bmap = (pint_assign >> ((bit / 8) * 8)) & 0xFF; | 700 | bmap = (pint_assign >> ((bit / 8) * 8)) & 0xFF; |
706 | 701 | ||
707 | irq_base = get_irq_base(bank, bmap); | 702 | irq_base = get_irq_base(bank, bmap); |
708 | 703 | ||
709 | irq_base += (bit % 8) + ((bit / 8) & 1 ? 8 : 0); | 704 | irq_base += (bit % 8) + ((bit / 8) & 1 ? 8 : 0); |
710 | bit_pos = bit + bank * NR_PINT_BITS; | 705 | bit_pos = bit + bank * NR_PINT_BITS; |
711 | 706 | ||
712 | pint2irq_lut[bit_pos] = irq_base - SYS_IRQS; | 707 | pint2irq_lut[bit_pos] = irq_base - SYS_IRQS; |
713 | irq2pint_lut[irq_base - SYS_IRQS] = bit_pos; | 708 | irq2pint_lut[irq_base - SYS_IRQS] = bit_pos; |
714 | } | 709 | } |
715 | } | 710 | } |
716 | } | 711 | } |
717 | 712 | ||
718 | static void bfin_gpio_ack_irq(struct irq_data *d) | 713 | static void bfin_gpio_ack_irq(struct irq_data *d) |
719 | { | 714 | { |
720 | u32 pint_val = irq2pint_lut[d->irq - SYS_IRQS]; | 715 | u32 pint_val = irq2pint_lut[d->irq - SYS_IRQS]; |
721 | u32 pintbit = PINT_BIT(pint_val); | 716 | u32 pintbit = PINT_BIT(pint_val); |
722 | u32 bank = PINT_2_BANK(pint_val); | 717 | u32 bank = PINT_2_BANK(pint_val); |
723 | 718 | ||
724 | if (irqd_get_trigger_type(d) == IRQ_TYPE_EDGE_BOTH) { | 719 | if (irqd_get_trigger_type(d) == IRQ_TYPE_EDGE_BOTH) { |
725 | if (pint[bank]->invert_set & pintbit) | 720 | if (pint[bank]->invert_set & pintbit) |
726 | pint[bank]->invert_clear = pintbit; | 721 | pint[bank]->invert_clear = pintbit; |
727 | else | 722 | else |
728 | pint[bank]->invert_set = pintbit; | 723 | pint[bank]->invert_set = pintbit; |
729 | } | 724 | } |
730 | pint[bank]->request = pintbit; | 725 | pint[bank]->request = pintbit; |
731 | 726 | ||
732 | } | 727 | } |
733 | 728 | ||
734 | static void bfin_gpio_mask_ack_irq(struct irq_data *d) | 729 | static void bfin_gpio_mask_ack_irq(struct irq_data *d) |
735 | { | 730 | { |
736 | u32 pint_val = irq2pint_lut[d->irq - SYS_IRQS]; | 731 | u32 pint_val = irq2pint_lut[d->irq - SYS_IRQS]; |
737 | u32 pintbit = PINT_BIT(pint_val); | 732 | u32 pintbit = PINT_BIT(pint_val); |
738 | u32 bank = PINT_2_BANK(pint_val); | 733 | u32 bank = PINT_2_BANK(pint_val); |
739 | 734 | ||
740 | if (irqd_get_trigger_type(d) == IRQ_TYPE_EDGE_BOTH) { | 735 | if (irqd_get_trigger_type(d) == IRQ_TYPE_EDGE_BOTH) { |
741 | if (pint[bank]->invert_set & pintbit) | 736 | if (pint[bank]->invert_set & pintbit) |
742 | pint[bank]->invert_clear = pintbit; | 737 | pint[bank]->invert_clear = pintbit; |
743 | else | 738 | else |
744 | pint[bank]->invert_set = pintbit; | 739 | pint[bank]->invert_set = pintbit; |
745 | } | 740 | } |
746 | 741 | ||
747 | pint[bank]->request = pintbit; | 742 | pint[bank]->request = pintbit; |
748 | pint[bank]->mask_clear = pintbit; | 743 | pint[bank]->mask_clear = pintbit; |
749 | } | 744 | } |
750 | 745 | ||
751 | static void bfin_gpio_mask_irq(struct irq_data *d) | 746 | static void bfin_gpio_mask_irq(struct irq_data *d) |
752 | { | 747 | { |
753 | u32 pint_val = irq2pint_lut[d->irq - SYS_IRQS]; | 748 | u32 pint_val = irq2pint_lut[d->irq - SYS_IRQS]; |
754 | 749 | ||
755 | pint[PINT_2_BANK(pint_val)]->mask_clear = PINT_BIT(pint_val); | 750 | pint[PINT_2_BANK(pint_val)]->mask_clear = PINT_BIT(pint_val); |
756 | } | 751 | } |
757 | 752 | ||
758 | static void bfin_gpio_unmask_irq(struct irq_data *d) | 753 | static void bfin_gpio_unmask_irq(struct irq_data *d) |
759 | { | 754 | { |
760 | u32 pint_val = irq2pint_lut[d->irq - SYS_IRQS]; | 755 | u32 pint_val = irq2pint_lut[d->irq - SYS_IRQS]; |
761 | u32 pintbit = PINT_BIT(pint_val); | 756 | u32 pintbit = PINT_BIT(pint_val); |
762 | u32 bank = PINT_2_BANK(pint_val); | 757 | u32 bank = PINT_2_BANK(pint_val); |
763 | 758 | ||
764 | pint[bank]->mask_set = pintbit; | 759 | pint[bank]->mask_set = pintbit; |
765 | } | 760 | } |
766 | 761 | ||
767 | static unsigned int bfin_gpio_irq_startup(struct irq_data *d) | 762 | static unsigned int bfin_gpio_irq_startup(struct irq_data *d) |
768 | { | 763 | { |
769 | unsigned int irq = d->irq; | 764 | unsigned int irq = d->irq; |
770 | u32 gpionr = irq_to_gpio(irq); | 765 | u32 gpionr = irq_to_gpio(irq); |
771 | u32 pint_val = irq2pint_lut[irq - SYS_IRQS]; | 766 | u32 pint_val = irq2pint_lut[irq - SYS_IRQS]; |
772 | 767 | ||
773 | if (pint_val == IRQ_NOT_AVAIL) { | 768 | if (pint_val == IRQ_NOT_AVAIL) { |
774 | printk(KERN_ERR | 769 | printk(KERN_ERR |
775 | "GPIO IRQ %d :Not in PINT Assign table " | 770 | "GPIO IRQ %d :Not in PINT Assign table " |
776 | "Reconfigure Interrupt to Port Assignemt\n", irq); | 771 | "Reconfigure Interrupt to Port Assignemt\n", irq); |
777 | return -ENODEV; | 772 | return -ENODEV; |
778 | } | 773 | } |
779 | 774 | ||
780 | if (__test_and_set_bit(gpionr, gpio_enabled)) | 775 | if (__test_and_set_bit(gpionr, gpio_enabled)) |
781 | bfin_gpio_irq_prepare(gpionr); | 776 | bfin_gpio_irq_prepare(gpionr); |
782 | 777 | ||
783 | bfin_gpio_unmask_irq(d); | 778 | bfin_gpio_unmask_irq(d); |
784 | 779 | ||
785 | return 0; | 780 | return 0; |
786 | } | 781 | } |
787 | 782 | ||
788 | static void bfin_gpio_irq_shutdown(struct irq_data *d) | 783 | static void bfin_gpio_irq_shutdown(struct irq_data *d) |
789 | { | 784 | { |
790 | u32 gpionr = irq_to_gpio(d->irq); | 785 | u32 gpionr = irq_to_gpio(d->irq); |
791 | 786 | ||
792 | bfin_gpio_mask_irq(d); | 787 | bfin_gpio_mask_irq(d); |
793 | __clear_bit(gpionr, gpio_enabled); | 788 | __clear_bit(gpionr, gpio_enabled); |
794 | bfin_gpio_irq_free(gpionr); | 789 | bfin_gpio_irq_free(gpionr); |
795 | } | 790 | } |
796 | 791 | ||
797 | static int bfin_gpio_irq_type(struct irq_data *d, unsigned int type) | 792 | static int bfin_gpio_irq_type(struct irq_data *d, unsigned int type) |
798 | { | 793 | { |
799 | unsigned int irq = d->irq; | 794 | unsigned int irq = d->irq; |
800 | int ret; | 795 | int ret; |
801 | char buf[16]; | 796 | char buf[16]; |
802 | u32 gpionr = irq_to_gpio(irq); | 797 | u32 gpionr = irq_to_gpio(irq); |
803 | u32 pint_val = irq2pint_lut[irq - SYS_IRQS]; | 798 | u32 pint_val = irq2pint_lut[irq - SYS_IRQS]; |
804 | u32 pintbit = PINT_BIT(pint_val); | 799 | u32 pintbit = PINT_BIT(pint_val); |
805 | u32 bank = PINT_2_BANK(pint_val); | 800 | u32 bank = PINT_2_BANK(pint_val); |
806 | 801 | ||
807 | if (pint_val == IRQ_NOT_AVAIL) | 802 | if (pint_val == IRQ_NOT_AVAIL) |
808 | return -ENODEV; | 803 | return -ENODEV; |
809 | 804 | ||
810 | if (type == IRQ_TYPE_PROBE) { | 805 | if (type == IRQ_TYPE_PROBE) { |
811 | /* only probe unenabled GPIO interrupt lines */ | 806 | /* only probe unenabled GPIO interrupt lines */ |
812 | if (test_bit(gpionr, gpio_enabled)) | 807 | if (test_bit(gpionr, gpio_enabled)) |
813 | return 0; | 808 | return 0; |
814 | type = IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING; | 809 | type = IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING; |
815 | } | 810 | } |
816 | 811 | ||
817 | if (type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING | | 812 | if (type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING | |
818 | IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_LEVEL_LOW)) { | 813 | IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_LEVEL_LOW)) { |
819 | 814 | ||
820 | snprintf(buf, 16, "gpio-irq%d", irq); | 815 | snprintf(buf, 16, "gpio-irq%d", irq); |
821 | ret = bfin_gpio_irq_request(gpionr, buf); | 816 | ret = bfin_gpio_irq_request(gpionr, buf); |
822 | if (ret) | 817 | if (ret) |
823 | return ret; | 818 | return ret; |
824 | 819 | ||
825 | if (__test_and_set_bit(gpionr, gpio_enabled)) | 820 | if (__test_and_set_bit(gpionr, gpio_enabled)) |
826 | bfin_gpio_irq_prepare(gpionr); | 821 | bfin_gpio_irq_prepare(gpionr); |
827 | 822 | ||
828 | } else { | 823 | } else { |
829 | __clear_bit(gpionr, gpio_enabled); | 824 | __clear_bit(gpionr, gpio_enabled); |
830 | return 0; | 825 | return 0; |
831 | } | 826 | } |
832 | 827 | ||
833 | if ((type & (IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_LEVEL_LOW))) | 828 | if ((type & (IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_LEVEL_LOW))) |
834 | pint[bank]->invert_set = pintbit; /* low or falling edge denoted by one */ | 829 | pint[bank]->invert_set = pintbit; /* low or falling edge denoted by one */ |
835 | else | 830 | else |
836 | pint[bank]->invert_clear = pintbit; /* high or rising edge denoted by zero */ | 831 | pint[bank]->invert_clear = pintbit; /* high or rising edge denoted by zero */ |
837 | 832 | ||
838 | if ((type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING)) | 833 | if ((type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING)) |
839 | == (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING)) { | 834 | == (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING)) { |
840 | if (gpio_get_value(gpionr)) | 835 | if (gpio_get_value(gpionr)) |
841 | pint[bank]->invert_set = pintbit; | 836 | pint[bank]->invert_set = pintbit; |
842 | else | 837 | else |
843 | pint[bank]->invert_clear = pintbit; | 838 | pint[bank]->invert_clear = pintbit; |
844 | } | 839 | } |
845 | 840 | ||
846 | if (type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING)) { | 841 | if (type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING)) { |
847 | pint[bank]->edge_set = pintbit; | 842 | pint[bank]->edge_set = pintbit; |
848 | bfin_set_irq_handler(irq, handle_edge_irq); | 843 | bfin_set_irq_handler(irq, handle_edge_irq); |
849 | } else { | 844 | } else { |
850 | pint[bank]->edge_clear = pintbit; | 845 | pint[bank]->edge_clear = pintbit; |
851 | bfin_set_irq_handler(irq, handle_level_irq); | 846 | bfin_set_irq_handler(irq, handle_level_irq); |
852 | } | 847 | } |
853 | 848 | ||
854 | return 0; | 849 | return 0; |
855 | } | 850 | } |
856 | 851 | ||
857 | #ifdef CONFIG_PM | 852 | #ifdef CONFIG_PM |
858 | static int bfin_gpio_set_wake(struct irq_data *d, unsigned int state) | 853 | static int bfin_gpio_set_wake(struct irq_data *d, unsigned int state) |
859 | { | 854 | { |
860 | u32 pint_irq; | 855 | u32 pint_irq; |
861 | u32 pint_val = irq2pint_lut[d->irq - SYS_IRQS]; | 856 | u32 pint_val = irq2pint_lut[d->irq - SYS_IRQS]; |
862 | u32 bank = PINT_2_BANK(pint_val); | 857 | u32 bank = PINT_2_BANK(pint_val); |
863 | 858 | ||
864 | switch (bank) { | 859 | switch (bank) { |
865 | case 0: | 860 | case 0: |
866 | pint_irq = IRQ_PINT0; | 861 | pint_irq = IRQ_PINT0; |
867 | break; | 862 | break; |
868 | case 2: | 863 | case 2: |
869 | pint_irq = IRQ_PINT2; | 864 | pint_irq = IRQ_PINT2; |
870 | break; | 865 | break; |
871 | case 3: | 866 | case 3: |
872 | pint_irq = IRQ_PINT3; | 867 | pint_irq = IRQ_PINT3; |
873 | break; | 868 | break; |
874 | case 1: | 869 | case 1: |
875 | pint_irq = IRQ_PINT1; | 870 | pint_irq = IRQ_PINT1; |
876 | break; | 871 | break; |
877 | default: | 872 | default: |
878 | return -EINVAL; | 873 | return -EINVAL; |
879 | } | 874 | } |
880 | 875 | ||
881 | bfin_internal_set_wake(pint_irq, state); | 876 | bfin_internal_set_wake(pint_irq, state); |
882 | 877 | ||
883 | return 0; | 878 | return 0; |
884 | } | 879 | } |
885 | #else | 880 | #else |
886 | # define bfin_gpio_set_wake NULL | 881 | # define bfin_gpio_set_wake NULL |
887 | #endif | 882 | #endif |
888 | 883 | ||
889 | static void bfin_demux_gpio_irq(unsigned int inta_irq, | 884 | static void bfin_demux_gpio_irq(unsigned int inta_irq, |
890 | struct irq_desc *desc) | 885 | struct irq_desc *desc) |
891 | { | 886 | { |
892 | u32 bank, pint_val; | 887 | u32 bank, pint_val; |
893 | u32 request, irq; | 888 | u32 request, irq; |
894 | 889 | ||
895 | switch (inta_irq) { | 890 | switch (inta_irq) { |
896 | case IRQ_PINT0: | 891 | case IRQ_PINT0: |
897 | bank = 0; | 892 | bank = 0; |
898 | break; | 893 | break; |
899 | case IRQ_PINT2: | 894 | case IRQ_PINT2: |
900 | bank = 2; | 895 | bank = 2; |
901 | break; | 896 | break; |
902 | case IRQ_PINT3: | 897 | case IRQ_PINT3: |
903 | bank = 3; | 898 | bank = 3; |
904 | break; | 899 | break; |
905 | case IRQ_PINT1: | 900 | case IRQ_PINT1: |
906 | bank = 1; | 901 | bank = 1; |
907 | break; | 902 | break; |
908 | default: | 903 | default: |
909 | return; | 904 | return; |
910 | } | 905 | } |
911 | 906 | ||
912 | pint_val = bank * NR_PINT_BITS; | 907 | pint_val = bank * NR_PINT_BITS; |
913 | 908 | ||
914 | request = pint[bank]->request; | 909 | request = pint[bank]->request; |
915 | 910 | ||
916 | while (request) { | 911 | while (request) { |
917 | if (request & 1) { | 912 | if (request & 1) { |
918 | irq = pint2irq_lut[pint_val] + SYS_IRQS; | 913 | irq = pint2irq_lut[pint_val] + SYS_IRQS; |
919 | bfin_handle_irq(irq); | 914 | bfin_handle_irq(irq); |
920 | } | 915 | } |
921 | pint_val++; | 916 | pint_val++; |
922 | request >>= 1; | 917 | request >>= 1; |
923 | } | 918 | } |
924 | 919 | ||
925 | } | 920 | } |
926 | #endif | 921 | #endif |
927 | 922 | ||
928 | static struct irq_chip bfin_gpio_irqchip = { | 923 | static struct irq_chip bfin_gpio_irqchip = { |
929 | .name = "GPIO", | 924 | .name = "GPIO", |
930 | .irq_ack = bfin_gpio_ack_irq, | 925 | .irq_ack = bfin_gpio_ack_irq, |
931 | .irq_mask = bfin_gpio_mask_irq, | 926 | .irq_mask = bfin_gpio_mask_irq, |
932 | .irq_mask_ack = bfin_gpio_mask_ack_irq, | 927 | .irq_mask_ack = bfin_gpio_mask_ack_irq, |
933 | .irq_unmask = bfin_gpio_unmask_irq, | 928 | .irq_unmask = bfin_gpio_unmask_irq, |
934 | .irq_disable = bfin_gpio_mask_irq, | 929 | .irq_disable = bfin_gpio_mask_irq, |
935 | .irq_enable = bfin_gpio_unmask_irq, | 930 | .irq_enable = bfin_gpio_unmask_irq, |
936 | .irq_set_type = bfin_gpio_irq_type, | 931 | .irq_set_type = bfin_gpio_irq_type, |
937 | .irq_startup = bfin_gpio_irq_startup, | 932 | .irq_startup = bfin_gpio_irq_startup, |
938 | .irq_shutdown = bfin_gpio_irq_shutdown, | 933 | .irq_shutdown = bfin_gpio_irq_shutdown, |
939 | .irq_set_wake = bfin_gpio_set_wake, | 934 | .irq_set_wake = bfin_gpio_set_wake, |
940 | }; | 935 | }; |
941 | 936 | ||
942 | void __cpuinit init_exception_vectors(void) | 937 | void __cpuinit init_exception_vectors(void) |
943 | { | 938 | { |
944 | /* cannot program in software: | 939 | /* cannot program in software: |
945 | * evt0 - emulation (jtag) | 940 | * evt0 - emulation (jtag) |
946 | * evt1 - reset | 941 | * evt1 - reset |
947 | */ | 942 | */ |
948 | bfin_write_EVT2(evt_nmi); | 943 | bfin_write_EVT2(evt_nmi); |
949 | bfin_write_EVT3(trap); | 944 | bfin_write_EVT3(trap); |
950 | bfin_write_EVT5(evt_ivhw); | 945 | bfin_write_EVT5(evt_ivhw); |
951 | bfin_write_EVT6(evt_timer); | 946 | bfin_write_EVT6(evt_timer); |
952 | bfin_write_EVT7(evt_evt7); | 947 | bfin_write_EVT7(evt_evt7); |
953 | bfin_write_EVT8(evt_evt8); | 948 | bfin_write_EVT8(evt_evt8); |
954 | bfin_write_EVT9(evt_evt9); | 949 | bfin_write_EVT9(evt_evt9); |
955 | bfin_write_EVT10(evt_evt10); | 950 | bfin_write_EVT10(evt_evt10); |
956 | bfin_write_EVT11(evt_evt11); | 951 | bfin_write_EVT11(evt_evt11); |
957 | bfin_write_EVT12(evt_evt12); | 952 | bfin_write_EVT12(evt_evt12); |
958 | bfin_write_EVT13(evt_evt13); | 953 | bfin_write_EVT13(evt_evt13); |
959 | bfin_write_EVT14(evt_evt14); | 954 | bfin_write_EVT14(evt_evt14); |
960 | bfin_write_EVT15(evt_system_call); | 955 | bfin_write_EVT15(evt_system_call); |
961 | CSYNC(); | 956 | CSYNC(); |
962 | } | 957 | } |
963 | 958 | ||
964 | /* | 959 | /* |
965 | * This function should be called during kernel startup to initialize | 960 | * This function should be called during kernel startup to initialize |
966 | * the BFin IRQ handling routines. | 961 | * the BFin IRQ handling routines. |
967 | */ | 962 | */ |
968 | 963 | ||
969 | int __init init_arch_irq(void) | 964 | int __init init_arch_irq(void) |
970 | { | 965 | { |
971 | int irq; | 966 | int irq; |
972 | unsigned long ilat = 0; | 967 | unsigned long ilat = 0; |
973 | 968 | ||
974 | /* Disable all the peripheral intrs - page 4-29 HW Ref manual */ | 969 | /* Disable all the peripheral intrs - page 4-29 HW Ref manual */ |
975 | #ifdef SIC_IMASK0 | 970 | #ifdef SIC_IMASK0 |
976 | bfin_write_SIC_IMASK0(SIC_UNMASK_ALL); | 971 | bfin_write_SIC_IMASK0(SIC_UNMASK_ALL); |
977 | bfin_write_SIC_IMASK1(SIC_UNMASK_ALL); | 972 | bfin_write_SIC_IMASK1(SIC_UNMASK_ALL); |
978 | # ifdef SIC_IMASK2 | 973 | # ifdef SIC_IMASK2 |
979 | bfin_write_SIC_IMASK2(SIC_UNMASK_ALL); | 974 | bfin_write_SIC_IMASK2(SIC_UNMASK_ALL); |
980 | # endif | 975 | # endif |
981 | # ifdef CONFIG_SMP | 976 | # ifdef CONFIG_SMP |
982 | bfin_write_SICB_IMASK0(SIC_UNMASK_ALL); | 977 | bfin_write_SICB_IMASK0(SIC_UNMASK_ALL); |
983 | bfin_write_SICB_IMASK1(SIC_UNMASK_ALL); | 978 | bfin_write_SICB_IMASK1(SIC_UNMASK_ALL); |
984 | # endif | 979 | # endif |
985 | #else | 980 | #else |
986 | bfin_write_SIC_IMASK(SIC_UNMASK_ALL); | 981 | bfin_write_SIC_IMASK(SIC_UNMASK_ALL); |
987 | #endif | 982 | #endif |
988 | 983 | ||
989 | local_irq_disable(); | 984 | local_irq_disable(); |
990 | 985 | ||
991 | #ifdef CONFIG_BF54x | 986 | #ifdef CONFIG_BF54x |
992 | # ifdef CONFIG_PINTx_REASSIGN | 987 | # ifdef CONFIG_PINTx_REASSIGN |
993 | pint[0]->assign = CONFIG_PINT0_ASSIGN; | 988 | pint[0]->assign = CONFIG_PINT0_ASSIGN; |
994 | pint[1]->assign = CONFIG_PINT1_ASSIGN; | 989 | pint[1]->assign = CONFIG_PINT1_ASSIGN; |
995 | pint[2]->assign = CONFIG_PINT2_ASSIGN; | 990 | pint[2]->assign = CONFIG_PINT2_ASSIGN; |
996 | pint[3]->assign = CONFIG_PINT3_ASSIGN; | 991 | pint[3]->assign = CONFIG_PINT3_ASSIGN; |
997 | # endif | 992 | # endif |
998 | /* Whenever PINTx_ASSIGN is altered init_pint_lut() must be executed! */ | 993 | /* Whenever PINTx_ASSIGN is altered init_pint_lut() must be executed! */ |
999 | init_pint_lut(); | 994 | init_pint_lut(); |
1000 | #endif | 995 | #endif |
1001 | 996 | ||
1002 | for (irq = 0; irq <= SYS_IRQS; irq++) { | 997 | for (irq = 0; irq <= SYS_IRQS; irq++) { |
1003 | if (irq <= IRQ_CORETMR) | 998 | if (irq <= IRQ_CORETMR) |
1004 | irq_set_chip(irq, &bfin_core_irqchip); | 999 | irq_set_chip(irq, &bfin_core_irqchip); |
1005 | else | 1000 | else |
1006 | irq_set_chip(irq, &bfin_internal_irqchip); | 1001 | irq_set_chip(irq, &bfin_internal_irqchip); |
1007 | 1002 | ||
1008 | switch (irq) { | 1003 | switch (irq) { |
1009 | #if defined(CONFIG_BF53x) | 1004 | #if defined(CONFIG_BF53x) |
1010 | case IRQ_PROG_INTA: | 1005 | case IRQ_PROG_INTA: |
1011 | # if defined(BF537_FAMILY) && !(defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE)) | 1006 | # if defined(BF537_FAMILY) && !(defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE)) |
1012 | case IRQ_MAC_RX: | 1007 | case IRQ_MAC_RX: |
1013 | # endif | 1008 | # endif |
1014 | #elif defined(CONFIG_BF54x) | 1009 | #elif defined(CONFIG_BF54x) |
1015 | case IRQ_PINT0: | 1010 | case IRQ_PINT0: |
1016 | case IRQ_PINT1: | 1011 | case IRQ_PINT1: |
1017 | case IRQ_PINT2: | 1012 | case IRQ_PINT2: |
1018 | case IRQ_PINT3: | 1013 | case IRQ_PINT3: |
1019 | #elif defined(CONFIG_BF52x) || defined(CONFIG_BF51x) | 1014 | #elif defined(CONFIG_BF52x) || defined(CONFIG_BF51x) |
1020 | case IRQ_PORTF_INTA: | 1015 | case IRQ_PORTF_INTA: |
1021 | case IRQ_PORTG_INTA: | 1016 | case IRQ_PORTG_INTA: |
1022 | case IRQ_PORTH_INTA: | 1017 | case IRQ_PORTH_INTA: |
1023 | #elif defined(CONFIG_BF561) | 1018 | #elif defined(CONFIG_BF561) |
1024 | case IRQ_PROG0_INTA: | 1019 | case IRQ_PROG0_INTA: |
1025 | case IRQ_PROG1_INTA: | 1020 | case IRQ_PROG1_INTA: |
1026 | case IRQ_PROG2_INTA: | 1021 | case IRQ_PROG2_INTA: |
1027 | #elif defined(BF538_FAMILY) | 1022 | #elif defined(BF538_FAMILY) |
1028 | case IRQ_PORTF_INTA: | 1023 | case IRQ_PORTF_INTA: |
1029 | #endif | 1024 | #endif |
1030 | irq_set_chained_handler(irq, bfin_demux_gpio_irq); | 1025 | irq_set_chained_handler(irq, bfin_demux_gpio_irq); |
1031 | break; | 1026 | break; |
1032 | #if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE) | 1027 | #if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE) |
1033 | case IRQ_MAC_ERROR: | 1028 | case IRQ_MAC_ERROR: |
1034 | irq_set_chained_handler(irq, | 1029 | irq_set_chained_handler(irq, |
1035 | bfin_demux_mac_status_irq); | 1030 | bfin_demux_mac_status_irq); |
1036 | break; | 1031 | break; |
1037 | #endif | 1032 | #endif |
1038 | #ifdef CONFIG_SMP | 1033 | #ifdef CONFIG_SMP |
1039 | case IRQ_SUPPLE_0: | 1034 | case IRQ_SUPPLE_0: |
1040 | case IRQ_SUPPLE_1: | 1035 | case IRQ_SUPPLE_1: |
1041 | irq_set_handler(irq, handle_percpu_irq); | 1036 | irq_set_handler(irq, handle_percpu_irq); |
1042 | break; | 1037 | break; |
1043 | #endif | 1038 | #endif |
1044 | 1039 | ||
1045 | #ifdef CONFIG_TICKSOURCE_CORETMR | 1040 | #ifdef CONFIG_TICKSOURCE_CORETMR |
1046 | case IRQ_CORETMR: | 1041 | case IRQ_CORETMR: |
1047 | # ifdef CONFIG_SMP | 1042 | # ifdef CONFIG_SMP |
1048 | irq_set_handler(irq, handle_percpu_irq); | 1043 | irq_set_handler(irq, handle_percpu_irq); |
1049 | # else | 1044 | # else |
1050 | irq_set_handler(irq, handle_simple_irq); | 1045 | irq_set_handler(irq, handle_simple_irq); |
1051 | # endif | 1046 | # endif |
1052 | break; | 1047 | break; |
1053 | #endif | 1048 | #endif |
1054 | 1049 | ||
1055 | #ifdef CONFIG_TICKSOURCE_GPTMR0 | 1050 | #ifdef CONFIG_TICKSOURCE_GPTMR0 |
1056 | case IRQ_TIMER0: | 1051 | case IRQ_TIMER0: |
1057 | irq_set_handler(irq, handle_simple_irq); | 1052 | irq_set_handler(irq, handle_simple_irq); |
1058 | break; | 1053 | break; |
1059 | #endif | 1054 | #endif |
1060 | 1055 | ||
1061 | default: | 1056 | default: |
1062 | #ifdef CONFIG_IPIPE | 1057 | #ifdef CONFIG_IPIPE |
1063 | irq_set_handler(irq, handle_level_irq); | 1058 | irq_set_handler(irq, handle_level_irq); |
1064 | #else | 1059 | #else |
1065 | irq_set_handler(irq, handle_simple_irq); | 1060 | irq_set_handler(irq, handle_simple_irq); |
1066 | #endif | 1061 | #endif |
1067 | break; | 1062 | break; |
1068 | } | 1063 | } |
1069 | } | 1064 | } |
1070 | 1065 | ||
1071 | init_mach_irq(); | 1066 | init_mach_irq(); |
1072 | 1067 | ||
1073 | #if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE) | 1068 | #if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE) |
1074 | for (irq = IRQ_MAC_PHYINT; irq <= IRQ_MAC_STMDONE; irq++) | 1069 | for (irq = IRQ_MAC_PHYINT; irq <= IRQ_MAC_STMDONE; irq++) |
1075 | irq_set_chip_and_handler(irq, &bfin_mac_status_irqchip, | 1070 | irq_set_chip_and_handler(irq, &bfin_mac_status_irqchip, |
1076 | handle_level_irq); | 1071 | handle_level_irq); |
1077 | #endif | 1072 | #endif |
1078 | /* if configured as edge, then will be changed to do_edge_IRQ */ | 1073 | /* if configured as edge, then will be changed to do_edge_IRQ */ |
1079 | for (irq = GPIO_IRQ_BASE; | 1074 | for (irq = GPIO_IRQ_BASE; |
1080 | irq < (GPIO_IRQ_BASE + MAX_BLACKFIN_GPIOS); irq++) | 1075 | irq < (GPIO_IRQ_BASE + MAX_BLACKFIN_GPIOS); irq++) |
1081 | irq_set_chip_and_handler(irq, &bfin_gpio_irqchip, | 1076 | irq_set_chip_and_handler(irq, &bfin_gpio_irqchip, |
1082 | handle_level_irq); | 1077 | handle_level_irq); |
1083 | 1078 | ||
1084 | bfin_write_IMASK(0); | 1079 | bfin_write_IMASK(0); |
1085 | CSYNC(); | 1080 | CSYNC(); |
1086 | ilat = bfin_read_ILAT(); | 1081 | ilat = bfin_read_ILAT(); |
1087 | CSYNC(); | 1082 | CSYNC(); |
1088 | bfin_write_ILAT(ilat); | 1083 | bfin_write_ILAT(ilat); |
1089 | CSYNC(); | 1084 | CSYNC(); |
1090 | 1085 | ||
1091 | printk(KERN_INFO "Configuring Blackfin Priority Driven Interrupts\n"); | 1086 | printk(KERN_INFO "Configuring Blackfin Priority Driven Interrupts\n"); |
1092 | /* IMASK=xxx is equivalent to STI xx or bfin_irq_flags=xx, | 1087 | /* IMASK=xxx is equivalent to STI xx or bfin_irq_flags=xx, |
1093 | * local_irq_enable() | 1088 | * local_irq_enable() |
1094 | */ | 1089 | */ |
1095 | program_IAR(); | 1090 | program_IAR(); |
1096 | /* Therefore it's better to setup IARs before interrupts enabled */ | 1091 | /* Therefore it's better to setup IARs before interrupts enabled */ |
1097 | search_IAR(); | 1092 | search_IAR(); |
1098 | 1093 | ||
1099 | /* Enable interrupts IVG7-15 */ | 1094 | /* Enable interrupts IVG7-15 */ |
1100 | bfin_irq_flags |= IMASK_IVG15 | | 1095 | bfin_irq_flags |= IMASK_IVG15 | |
1101 | IMASK_IVG14 | IMASK_IVG13 | IMASK_IVG12 | IMASK_IVG11 | | 1096 | IMASK_IVG14 | IMASK_IVG13 | IMASK_IVG12 | IMASK_IVG11 | |
1102 | IMASK_IVG10 | IMASK_IVG9 | IMASK_IVG8 | IMASK_IVG7 | IMASK_IVGHW; | 1097 | IMASK_IVG10 | IMASK_IVG9 | IMASK_IVG8 | IMASK_IVG7 | IMASK_IVGHW; |
1103 | 1098 | ||
1104 | /* This implicitly covers ANOMALY_05000171 | 1099 | /* This implicitly covers ANOMALY_05000171 |
1105 | * Boot-ROM code modifies SICA_IWRx wakeup registers | 1100 | * Boot-ROM code modifies SICA_IWRx wakeup registers |
1106 | */ | 1101 | */ |
1107 | #ifdef SIC_IWR0 | 1102 | #ifdef SIC_IWR0 |
1108 | bfin_write_SIC_IWR0(IWR_DISABLE_ALL); | 1103 | bfin_write_SIC_IWR0(IWR_DISABLE_ALL); |
1109 | # ifdef SIC_IWR1 | 1104 | # ifdef SIC_IWR1 |
1110 | /* BF52x/BF51x system reset does not properly reset SIC_IWR1 which | 1105 | /* BF52x/BF51x system reset does not properly reset SIC_IWR1 which |
1111 | * will screw up the bootrom as it relies on MDMA0/1 waking it | 1106 | * will screw up the bootrom as it relies on MDMA0/1 waking it |
1112 | * up from IDLE instructions. See this report for more info: | 1107 | * up from IDLE instructions. See this report for more info: |
1113 | * http://blackfin.uclinux.org/gf/tracker/4323 | 1108 | * http://blackfin.uclinux.org/gf/tracker/4323 |
1114 | */ | 1109 | */ |
1115 | if (ANOMALY_05000435) | 1110 | if (ANOMALY_05000435) |
1116 | bfin_write_SIC_IWR1(IWR_ENABLE(10) | IWR_ENABLE(11)); | 1111 | bfin_write_SIC_IWR1(IWR_ENABLE(10) | IWR_ENABLE(11)); |
1117 | else | 1112 | else |
1118 | bfin_write_SIC_IWR1(IWR_DISABLE_ALL); | 1113 | bfin_write_SIC_IWR1(IWR_DISABLE_ALL); |
1119 | # endif | 1114 | # endif |
1120 | # ifdef SIC_IWR2 | 1115 | # ifdef SIC_IWR2 |
1121 | bfin_write_SIC_IWR2(IWR_DISABLE_ALL); | 1116 | bfin_write_SIC_IWR2(IWR_DISABLE_ALL); |
1122 | # endif | 1117 | # endif |
1123 | #else | 1118 | #else |
1124 | bfin_write_SIC_IWR(IWR_DISABLE_ALL); | 1119 | bfin_write_SIC_IWR(IWR_DISABLE_ALL); |
1125 | #endif | 1120 | #endif |
1126 | 1121 | ||
1127 | return 0; | 1122 | return 0; |
1128 | } | 1123 | } |
1129 | 1124 | ||
1130 | #ifdef CONFIG_DO_IRQ_L1 | 1125 | #ifdef CONFIG_DO_IRQ_L1 |
1131 | __attribute__((l1_text)) | 1126 | __attribute__((l1_text)) |
1132 | #endif | 1127 | #endif |
1133 | static int vec_to_irq(int vec) | 1128 | static int vec_to_irq(int vec) |
1134 | { | 1129 | { |
1135 | struct ivgx *ivg = ivg7_13[vec - IVG7].ifirst; | 1130 | struct ivgx *ivg = ivg7_13[vec - IVG7].ifirst; |
1136 | struct ivgx *ivg_stop = ivg7_13[vec - IVG7].istop; | 1131 | struct ivgx *ivg_stop = ivg7_13[vec - IVG7].istop; |
1137 | unsigned long sic_status[3]; | 1132 | unsigned long sic_status[3]; |
1138 | 1133 | ||
1139 | if (likely(vec == EVT_IVTMR_P)) | 1134 | if (likely(vec == EVT_IVTMR_P)) |
1140 | return IRQ_CORETMR; | 1135 | return IRQ_CORETMR; |
1141 | 1136 | ||
1142 | #ifdef SIC_ISR | 1137 | #ifdef SIC_ISR |
1143 | sic_status[0] = bfin_read_SIC_IMASK() & bfin_read_SIC_ISR(); | 1138 | sic_status[0] = bfin_read_SIC_IMASK() & bfin_read_SIC_ISR(); |
1144 | #else | 1139 | #else |
1145 | if (smp_processor_id()) { | 1140 | if (smp_processor_id()) { |
1146 | # ifdef SICB_ISR0 | 1141 | # ifdef SICB_ISR0 |
1147 | /* This will be optimized out in UP mode. */ | 1142 | /* This will be optimized out in UP mode. */ |
1148 | sic_status[0] = bfin_read_SICB_ISR0() & bfin_read_SICB_IMASK0(); | 1143 | sic_status[0] = bfin_read_SICB_ISR0() & bfin_read_SICB_IMASK0(); |
1149 | sic_status[1] = bfin_read_SICB_ISR1() & bfin_read_SICB_IMASK1(); | 1144 | sic_status[1] = bfin_read_SICB_ISR1() & bfin_read_SICB_IMASK1(); |
1150 | # endif | 1145 | # endif |
1151 | } else { | 1146 | } else { |
1152 | sic_status[0] = bfin_read_SIC_ISR0() & bfin_read_SIC_IMASK0(); | 1147 | sic_status[0] = bfin_read_SIC_ISR0() & bfin_read_SIC_IMASK0(); |
1153 | sic_status[1] = bfin_read_SIC_ISR1() & bfin_read_SIC_IMASK1(); | 1148 | sic_status[1] = bfin_read_SIC_ISR1() & bfin_read_SIC_IMASK1(); |
1154 | } | 1149 | } |
1155 | #endif | 1150 | #endif |
1156 | #ifdef SIC_ISR2 | 1151 | #ifdef SIC_ISR2 |
1157 | sic_status[2] = bfin_read_SIC_ISR2() & bfin_read_SIC_IMASK2(); | 1152 | sic_status[2] = bfin_read_SIC_ISR2() & bfin_read_SIC_IMASK2(); |
1158 | #endif | 1153 | #endif |
1159 | 1154 | ||
1160 | for (;; ivg++) { | 1155 | for (;; ivg++) { |
1161 | if (ivg >= ivg_stop) | 1156 | if (ivg >= ivg_stop) |
1162 | return -1; | 1157 | return -1; |
1163 | #ifdef SIC_ISR | 1158 | #ifdef SIC_ISR |
1164 | if (sic_status[0] & ivg->isrflag) | 1159 | if (sic_status[0] & ivg->isrflag) |
1165 | #else | 1160 | #else |
1166 | if (sic_status[(ivg->irqno - IVG7) / 32] & ivg->isrflag) | 1161 | if (sic_status[(ivg->irqno - IVG7) / 32] & ivg->isrflag) |
1167 | #endif | 1162 | #endif |
1168 | return ivg->irqno; | 1163 | return ivg->irqno; |
1169 | } | 1164 | } |
1170 | } | 1165 | } |
1171 | 1166 | ||
1172 | #ifdef CONFIG_DO_IRQ_L1 | 1167 | #ifdef CONFIG_DO_IRQ_L1 |
1173 | __attribute__((l1_text)) | 1168 | __attribute__((l1_text)) |
1174 | #endif | 1169 | #endif |
1175 | void do_irq(int vec, struct pt_regs *fp) | 1170 | void do_irq(int vec, struct pt_regs *fp) |
1176 | { | 1171 | { |
1177 | int irq = vec_to_irq(vec); | 1172 | int irq = vec_to_irq(vec); |
1178 | if (irq == -1) | 1173 | if (irq == -1) |
1179 | return; | 1174 | return; |
1180 | asm_do_IRQ(irq, fp); | 1175 | asm_do_IRQ(irq, fp); |
1181 | } | 1176 | } |
1182 | 1177 | ||
1183 | #ifdef CONFIG_IPIPE | 1178 | #ifdef CONFIG_IPIPE |
1184 | 1179 | ||
1185 | int __ipipe_get_irq_priority(unsigned irq) | 1180 | int __ipipe_get_irq_priority(unsigned irq) |
1186 | { | 1181 | { |
1187 | int ient, prio; | 1182 | int ient, prio; |
1188 | 1183 | ||
1189 | if (irq <= IRQ_CORETMR) | 1184 | if (irq <= IRQ_CORETMR) |
1190 | return irq; | 1185 | return irq; |
1191 | 1186 | ||
1192 | for (ient = 0; ient < NR_PERI_INTS; ient++) { | 1187 | for (ient = 0; ient < NR_PERI_INTS; ient++) { |
1193 | struct ivgx *ivg = ivg_table + ient; | 1188 | struct ivgx *ivg = ivg_table + ient; |
1194 | if (ivg->irqno == irq) { | 1189 | if (ivg->irqno == irq) { |
1195 | for (prio = 0; prio <= IVG13-IVG7; prio++) { | 1190 | for (prio = 0; prio <= IVG13-IVG7; prio++) { |
1196 | if (ivg7_13[prio].ifirst <= ivg && | 1191 | if (ivg7_13[prio].ifirst <= ivg && |
1197 | ivg7_13[prio].istop > ivg) | 1192 | ivg7_13[prio].istop > ivg) |
1198 | return IVG7 + prio; | 1193 | return IVG7 + prio; |
1199 | } | 1194 | } |
1200 | } | 1195 | } |
1201 | } | 1196 | } |
1202 | 1197 | ||
1203 | return IVG15; | 1198 | return IVG15; |
1204 | } | 1199 | } |
1205 | 1200 | ||
1206 | /* Hw interrupts are disabled on entry (check SAVE_CONTEXT). */ | 1201 | /* Hw interrupts are disabled on entry (check SAVE_CONTEXT). */ |
1207 | #ifdef CONFIG_DO_IRQ_L1 | 1202 | #ifdef CONFIG_DO_IRQ_L1 |
1208 | __attribute__((l1_text)) | 1203 | __attribute__((l1_text)) |
1209 | #endif | 1204 | #endif |
1210 | asmlinkage int __ipipe_grab_irq(int vec, struct pt_regs *regs) | 1205 | asmlinkage int __ipipe_grab_irq(int vec, struct pt_regs *regs) |
1211 | { | 1206 | { |
1212 | struct ipipe_percpu_domain_data *p = ipipe_root_cpudom_ptr(); | 1207 | struct ipipe_percpu_domain_data *p = ipipe_root_cpudom_ptr(); |
1213 | struct ipipe_domain *this_domain = __ipipe_current_domain; | 1208 | struct ipipe_domain *this_domain = __ipipe_current_domain; |
1214 | struct ivgx *ivg_stop = ivg7_13[vec-IVG7].istop; | 1209 | struct ivgx *ivg_stop = ivg7_13[vec-IVG7].istop; |
1215 | struct ivgx *ivg = ivg7_13[vec-IVG7].ifirst; | 1210 | struct ivgx *ivg = ivg7_13[vec-IVG7].ifirst; |
1216 | int irq, s = 0; | 1211 | int irq, s = 0; |
1217 | 1212 | ||
1218 | irq = vec_to_irq(vec); | 1213 | irq = vec_to_irq(vec); |
1219 | if (irq == -1) | 1214 | if (irq == -1) |
1220 | return 0; | 1215 | return 0; |
1221 | 1216 | ||
1222 | if (irq == IRQ_SYSTMR) { | 1217 | if (irq == IRQ_SYSTMR) { |
1223 | #if !defined(CONFIG_GENERIC_CLOCKEVENTS) || defined(CONFIG_TICKSOURCE_GPTMR0) | 1218 | #if !defined(CONFIG_GENERIC_CLOCKEVENTS) || defined(CONFIG_TICKSOURCE_GPTMR0) |
1224 | bfin_write_TIMER_STATUS(1); /* Latch TIMIL0 */ | 1219 | bfin_write_TIMER_STATUS(1); /* Latch TIMIL0 */ |
1225 | #endif | 1220 | #endif |
1226 | /* This is basically what we need from the register frame. */ | 1221 | /* This is basically what we need from the register frame. */ |
1227 | __raw_get_cpu_var(__ipipe_tick_regs).ipend = regs->ipend; | 1222 | __raw_get_cpu_var(__ipipe_tick_regs).ipend = regs->ipend; |
1228 | __raw_get_cpu_var(__ipipe_tick_regs).pc = regs->pc; | 1223 | __raw_get_cpu_var(__ipipe_tick_regs).pc = regs->pc; |
1229 | if (this_domain != ipipe_root_domain) | 1224 | if (this_domain != ipipe_root_domain) |
1230 | __raw_get_cpu_var(__ipipe_tick_regs).ipend &= ~0x10; | 1225 | __raw_get_cpu_var(__ipipe_tick_regs).ipend &= ~0x10; |
1231 | else | 1226 | else |
1232 | __raw_get_cpu_var(__ipipe_tick_regs).ipend |= 0x10; | 1227 | __raw_get_cpu_var(__ipipe_tick_regs).ipend |= 0x10; |
1233 | } | 1228 | } |
1234 | 1229 | ||
1235 | /* | 1230 | /* |
1236 | * We don't want Linux interrupt handlers to run at the | 1231 | * We don't want Linux interrupt handlers to run at the |
1237 | * current core priority level (i.e. < EVT15), since this | 1232 | * current core priority level (i.e. < EVT15), since this |
1238 | * might delay other interrupts handled by a high priority | 1233 | * might delay other interrupts handled by a high priority |
1239 | * domain. Here is what we do instead: | 1234 | * domain. Here is what we do instead: |
1240 | * | 1235 | * |
1241 | * - we raise the SYNCDEFER bit to prevent | 1236 | * - we raise the SYNCDEFER bit to prevent |
1242 | * __ipipe_handle_irq() to sync the pipeline for the root | 1237 | * __ipipe_handle_irq() to sync the pipeline for the root |
1243 | * stage for the incoming interrupt. Upon return, that IRQ is | 1238 | * stage for the incoming interrupt. Upon return, that IRQ is |
1244 | * pending in the interrupt log. | 1239 | * pending in the interrupt log. |
1245 | * | 1240 | * |
1246 | * - we raise the TIF_IRQ_SYNC bit for the current thread, so | 1241 | * - we raise the TIF_IRQ_SYNC bit for the current thread, so |
1247 | * that _schedule_and_signal_from_int will eventually sync the | 1242 | * that _schedule_and_signal_from_int will eventually sync the |
1248 | * pipeline from EVT15. | 1243 | * pipeline from EVT15. |
1249 | */ | 1244 | */ |
1250 | if (this_domain == ipipe_root_domain) { | 1245 | if (this_domain == ipipe_root_domain) { |
1251 | s = __test_and_set_bit(IPIPE_SYNCDEFER_FLAG, &p->status); | 1246 | s = __test_and_set_bit(IPIPE_SYNCDEFER_FLAG, &p->status); |
1252 | barrier(); | 1247 | barrier(); |
1253 | } | 1248 | } |
1254 | 1249 | ||
1255 | ipipe_trace_irq_entry(irq); | 1250 | ipipe_trace_irq_entry(irq); |
1256 | __ipipe_handle_irq(irq, regs); | 1251 | __ipipe_handle_irq(irq, regs); |
1257 | ipipe_trace_irq_exit(irq); | 1252 | ipipe_trace_irq_exit(irq); |
1258 | 1253 | ||
1259 | if (user_mode(regs) && | 1254 | if (user_mode(regs) && |
1260 | !ipipe_test_foreign_stack() && | 1255 | !ipipe_test_foreign_stack() && |
1261 | (current->ipipe_flags & PF_EVTRET) != 0) { | 1256 | (current->ipipe_flags & PF_EVTRET) != 0) { |
1262 | /* | 1257 | /* |
1263 | * Testing for user_regs() does NOT fully eliminate | 1258 | * Testing for user_regs() does NOT fully eliminate |
1264 | * foreign stack contexts, because of the forged | 1259 | * foreign stack contexts, because of the forged |
1265 | * interrupt returns we do through | 1260 | * interrupt returns we do through |
1266 | * __ipipe_call_irqtail. In that case, we might have | 1261 | * __ipipe_call_irqtail. In that case, we might have |
1267 | * preempted a foreign stack context in a high | 1262 | * preempted a foreign stack context in a high |
1268 | * priority domain, with a single interrupt level now | 1263 | * priority domain, with a single interrupt level now |
1269 | * pending after the irqtail unwinding is done. In | 1264 | * pending after the irqtail unwinding is done. In |
1270 | * which case user_mode() is now true, and the event | 1265 | * which case user_mode() is now true, and the event |