Commit 01f8fa4f01d8362358eb90e412bd7ae18a3ec1ad
genirq: Allow forcing cpu affinity of interrupts
The current implementation of irq_set_affinity() refuses rightfully to route an interrupt to an offline cpu. But there is a special case, where this is actually desired. Some of the ARM SoCs have per cpu timers which require setting the affinity during cpu startup where the cpu is not yet in the online mask. If we can't do that, then the local timer interrupt for the about to become online cpu is routed to some random online cpu. The developers of the affected machines tried to work around that issue, but that results in a massive mess in that timer code. We have a yet unused argument in the set_affinity callbacks of the irq chips, which I added back then for a similar reason. It was never required so it got not used. But I'm happy that I never removed it. That allows us to implement a sane handling of the above scenario. So the affected SoC drivers can add the required force handling to their interrupt chip, switch the timer code to irq_force_affinity() and things just work. This does not affect any existing user of irq_set_affinity(). Tagged for stable to allow a simple fix of the affected SoC clock event drivers. Reported-and-tested-by: Krzysztof Kozlowski <k.kozlowski@samsung.com> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Cc: Kyungmin Park <kyungmin.park@samsung.com> Cc: Marek Szyprowski <m.szyprowski@samsung.com> Cc: Bartlomiej Zolnierkiewicz <b.zolnierkie@samsung.com> Cc: Tomasz Figa <t.figa@samsung.com>, Cc: Daniel Lezcano <daniel.lezcano@linaro.org>, Cc: Kukjin Kim <kgene.kim@samsung.com> Cc: linux-arm-kernel@lists.infradead.org, Cc: stable@vger.kernel.org Link: http://lkml.kernel.org/r/20140416143315.717251504@linutronix.de Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Showing 4 changed files with 43 additions and 14 deletions Inline Diff
1 | /* | 1 | /* |
2 | * This file is subject to the terms and conditions of the GNU General Public | 2 | * This file is subject to the terms and conditions of the GNU General Public |
3 | * License. See the file "COPYING" in the main directory of this archive | 3 | * License. See the file "COPYING" in the main directory of this archive |
4 | * for more details. | 4 | * for more details. |
5 | * | 5 | * |
6 | * Copyright (C) 2004-2012 Cavium, Inc. | 6 | * Copyright (C) 2004-2012 Cavium, Inc. |
7 | */ | 7 | */ |
8 | 8 | ||
9 | #include <linux/interrupt.h> | 9 | #include <linux/interrupt.h> |
10 | #include <linux/irqdomain.h> | 10 | #include <linux/irqdomain.h> |
11 | #include <linux/bitops.h> | 11 | #include <linux/bitops.h> |
12 | #include <linux/percpu.h> | 12 | #include <linux/percpu.h> |
13 | #include <linux/slab.h> | 13 | #include <linux/slab.h> |
14 | #include <linux/irq.h> | 14 | #include <linux/irq.h> |
15 | #include <linux/smp.h> | 15 | #include <linux/smp.h> |
16 | #include <linux/of.h> | 16 | #include <linux/of.h> |
17 | 17 | ||
18 | #include <asm/octeon/octeon.h> | 18 | #include <asm/octeon/octeon.h> |
19 | #include <asm/octeon/cvmx-ciu2-defs.h> | 19 | #include <asm/octeon/cvmx-ciu2-defs.h> |
20 | 20 | ||
21 | static DEFINE_PER_CPU(unsigned long, octeon_irq_ciu0_en_mirror); | 21 | static DEFINE_PER_CPU(unsigned long, octeon_irq_ciu0_en_mirror); |
22 | static DEFINE_PER_CPU(unsigned long, octeon_irq_ciu1_en_mirror); | 22 | static DEFINE_PER_CPU(unsigned long, octeon_irq_ciu1_en_mirror); |
23 | static DEFINE_PER_CPU(raw_spinlock_t, octeon_irq_ciu_spinlock); | 23 | static DEFINE_PER_CPU(raw_spinlock_t, octeon_irq_ciu_spinlock); |
24 | 24 | ||
25 | static __read_mostly u8 octeon_irq_ciu_to_irq[8][64]; | 25 | static __read_mostly u8 octeon_irq_ciu_to_irq[8][64]; |
26 | 26 | ||
27 | union octeon_ciu_chip_data { | 27 | union octeon_ciu_chip_data { |
28 | void *p; | 28 | void *p; |
29 | unsigned long l; | 29 | unsigned long l; |
30 | struct { | 30 | struct { |
31 | unsigned long line:6; | 31 | unsigned long line:6; |
32 | unsigned long bit:6; | 32 | unsigned long bit:6; |
33 | unsigned long gpio_line:6; | 33 | unsigned long gpio_line:6; |
34 | } s; | 34 | } s; |
35 | }; | 35 | }; |
36 | 36 | ||
37 | struct octeon_core_chip_data { | 37 | struct octeon_core_chip_data { |
38 | struct mutex core_irq_mutex; | 38 | struct mutex core_irq_mutex; |
39 | bool current_en; | 39 | bool current_en; |
40 | bool desired_en; | 40 | bool desired_en; |
41 | u8 bit; | 41 | u8 bit; |
42 | }; | 42 | }; |
43 | 43 | ||
44 | #define MIPS_CORE_IRQ_LINES 8 | 44 | #define MIPS_CORE_IRQ_LINES 8 |
45 | 45 | ||
46 | static struct octeon_core_chip_data octeon_irq_core_chip_data[MIPS_CORE_IRQ_LINES]; | 46 | static struct octeon_core_chip_data octeon_irq_core_chip_data[MIPS_CORE_IRQ_LINES]; |
47 | 47 | ||
48 | static void octeon_irq_set_ciu_mapping(int irq, int line, int bit, int gpio_line, | 48 | static void octeon_irq_set_ciu_mapping(int irq, int line, int bit, int gpio_line, |
49 | struct irq_chip *chip, | 49 | struct irq_chip *chip, |
50 | irq_flow_handler_t handler) | 50 | irq_flow_handler_t handler) |
51 | { | 51 | { |
52 | union octeon_ciu_chip_data cd; | 52 | union octeon_ciu_chip_data cd; |
53 | 53 | ||
54 | irq_set_chip_and_handler(irq, chip, handler); | 54 | irq_set_chip_and_handler(irq, chip, handler); |
55 | 55 | ||
56 | cd.l = 0; | 56 | cd.l = 0; |
57 | cd.s.line = line; | 57 | cd.s.line = line; |
58 | cd.s.bit = bit; | 58 | cd.s.bit = bit; |
59 | cd.s.gpio_line = gpio_line; | 59 | cd.s.gpio_line = gpio_line; |
60 | 60 | ||
61 | irq_set_chip_data(irq, cd.p); | 61 | irq_set_chip_data(irq, cd.p); |
62 | octeon_irq_ciu_to_irq[line][bit] = irq; | 62 | octeon_irq_ciu_to_irq[line][bit] = irq; |
63 | } | 63 | } |
64 | 64 | ||
65 | static void octeon_irq_force_ciu_mapping(struct irq_domain *domain, | 65 | static void octeon_irq_force_ciu_mapping(struct irq_domain *domain, |
66 | int irq, int line, int bit) | 66 | int irq, int line, int bit) |
67 | { | 67 | { |
68 | irq_domain_associate(domain, irq, line << 6 | bit); | 68 | irq_domain_associate(domain, irq, line << 6 | bit); |
69 | } | 69 | } |
70 | 70 | ||
71 | static int octeon_coreid_for_cpu(int cpu) | 71 | static int octeon_coreid_for_cpu(int cpu) |
72 | { | 72 | { |
73 | #ifdef CONFIG_SMP | 73 | #ifdef CONFIG_SMP |
74 | return cpu_logical_map(cpu); | 74 | return cpu_logical_map(cpu); |
75 | #else | 75 | #else |
76 | return cvmx_get_core_num(); | 76 | return cvmx_get_core_num(); |
77 | #endif | 77 | #endif |
78 | } | 78 | } |
79 | 79 | ||
80 | static int octeon_cpu_for_coreid(int coreid) | 80 | static int octeon_cpu_for_coreid(int coreid) |
81 | { | 81 | { |
82 | #ifdef CONFIG_SMP | 82 | #ifdef CONFIG_SMP |
83 | return cpu_number_map(coreid); | 83 | return cpu_number_map(coreid); |
84 | #else | 84 | #else |
85 | return smp_processor_id(); | 85 | return smp_processor_id(); |
86 | #endif | 86 | #endif |
87 | } | 87 | } |
88 | 88 | ||
89 | static void octeon_irq_core_ack(struct irq_data *data) | 89 | static void octeon_irq_core_ack(struct irq_data *data) |
90 | { | 90 | { |
91 | struct octeon_core_chip_data *cd = irq_data_get_irq_chip_data(data); | 91 | struct octeon_core_chip_data *cd = irq_data_get_irq_chip_data(data); |
92 | unsigned int bit = cd->bit; | 92 | unsigned int bit = cd->bit; |
93 | 93 | ||
94 | /* | 94 | /* |
95 | * We don't need to disable IRQs to make these atomic since | 95 | * We don't need to disable IRQs to make these atomic since |
96 | * they are already disabled earlier in the low level | 96 | * they are already disabled earlier in the low level |
97 | * interrupt code. | 97 | * interrupt code. |
98 | */ | 98 | */ |
99 | clear_c0_status(0x100 << bit); | 99 | clear_c0_status(0x100 << bit); |
100 | /* The two user interrupts must be cleared manually. */ | 100 | /* The two user interrupts must be cleared manually. */ |
101 | if (bit < 2) | 101 | if (bit < 2) |
102 | clear_c0_cause(0x100 << bit); | 102 | clear_c0_cause(0x100 << bit); |
103 | } | 103 | } |
104 | 104 | ||
105 | static void octeon_irq_core_eoi(struct irq_data *data) | 105 | static void octeon_irq_core_eoi(struct irq_data *data) |
106 | { | 106 | { |
107 | struct octeon_core_chip_data *cd = irq_data_get_irq_chip_data(data); | 107 | struct octeon_core_chip_data *cd = irq_data_get_irq_chip_data(data); |
108 | 108 | ||
109 | /* | 109 | /* |
110 | * We don't need to disable IRQs to make these atomic since | 110 | * We don't need to disable IRQs to make these atomic since |
111 | * they are already disabled earlier in the low level | 111 | * they are already disabled earlier in the low level |
112 | * interrupt code. | 112 | * interrupt code. |
113 | */ | 113 | */ |
114 | set_c0_status(0x100 << cd->bit); | 114 | set_c0_status(0x100 << cd->bit); |
115 | } | 115 | } |
116 | 116 | ||
117 | static void octeon_irq_core_set_enable_local(void *arg) | 117 | static void octeon_irq_core_set_enable_local(void *arg) |
118 | { | 118 | { |
119 | struct irq_data *data = arg; | 119 | struct irq_data *data = arg; |
120 | struct octeon_core_chip_data *cd = irq_data_get_irq_chip_data(data); | 120 | struct octeon_core_chip_data *cd = irq_data_get_irq_chip_data(data); |
121 | unsigned int mask = 0x100 << cd->bit; | 121 | unsigned int mask = 0x100 << cd->bit; |
122 | 122 | ||
123 | /* | 123 | /* |
124 | * Interrupts are already disabled, so these are atomic. | 124 | * Interrupts are already disabled, so these are atomic. |
125 | */ | 125 | */ |
126 | if (cd->desired_en) | 126 | if (cd->desired_en) |
127 | set_c0_status(mask); | 127 | set_c0_status(mask); |
128 | else | 128 | else |
129 | clear_c0_status(mask); | 129 | clear_c0_status(mask); |
130 | 130 | ||
131 | } | 131 | } |
132 | 132 | ||
133 | static void octeon_irq_core_disable(struct irq_data *data) | 133 | static void octeon_irq_core_disable(struct irq_data *data) |
134 | { | 134 | { |
135 | struct octeon_core_chip_data *cd = irq_data_get_irq_chip_data(data); | 135 | struct octeon_core_chip_data *cd = irq_data_get_irq_chip_data(data); |
136 | cd->desired_en = false; | 136 | cd->desired_en = false; |
137 | } | 137 | } |
138 | 138 | ||
139 | static void octeon_irq_core_enable(struct irq_data *data) | 139 | static void octeon_irq_core_enable(struct irq_data *data) |
140 | { | 140 | { |
141 | struct octeon_core_chip_data *cd = irq_data_get_irq_chip_data(data); | 141 | struct octeon_core_chip_data *cd = irq_data_get_irq_chip_data(data); |
142 | cd->desired_en = true; | 142 | cd->desired_en = true; |
143 | } | 143 | } |
144 | 144 | ||
145 | static void octeon_irq_core_bus_lock(struct irq_data *data) | 145 | static void octeon_irq_core_bus_lock(struct irq_data *data) |
146 | { | 146 | { |
147 | struct octeon_core_chip_data *cd = irq_data_get_irq_chip_data(data); | 147 | struct octeon_core_chip_data *cd = irq_data_get_irq_chip_data(data); |
148 | 148 | ||
149 | mutex_lock(&cd->core_irq_mutex); | 149 | mutex_lock(&cd->core_irq_mutex); |
150 | } | 150 | } |
151 | 151 | ||
152 | static void octeon_irq_core_bus_sync_unlock(struct irq_data *data) | 152 | static void octeon_irq_core_bus_sync_unlock(struct irq_data *data) |
153 | { | 153 | { |
154 | struct octeon_core_chip_data *cd = irq_data_get_irq_chip_data(data); | 154 | struct octeon_core_chip_data *cd = irq_data_get_irq_chip_data(data); |
155 | 155 | ||
156 | if (cd->desired_en != cd->current_en) { | 156 | if (cd->desired_en != cd->current_en) { |
157 | on_each_cpu(octeon_irq_core_set_enable_local, data, 1); | 157 | on_each_cpu(octeon_irq_core_set_enable_local, data, 1); |
158 | 158 | ||
159 | cd->current_en = cd->desired_en; | 159 | cd->current_en = cd->desired_en; |
160 | } | 160 | } |
161 | 161 | ||
162 | mutex_unlock(&cd->core_irq_mutex); | 162 | mutex_unlock(&cd->core_irq_mutex); |
163 | } | 163 | } |
164 | 164 | ||
165 | static struct irq_chip octeon_irq_chip_core = { | 165 | static struct irq_chip octeon_irq_chip_core = { |
166 | .name = "Core", | 166 | .name = "Core", |
167 | .irq_enable = octeon_irq_core_enable, | 167 | .irq_enable = octeon_irq_core_enable, |
168 | .irq_disable = octeon_irq_core_disable, | 168 | .irq_disable = octeon_irq_core_disable, |
169 | .irq_ack = octeon_irq_core_ack, | 169 | .irq_ack = octeon_irq_core_ack, |
170 | .irq_eoi = octeon_irq_core_eoi, | 170 | .irq_eoi = octeon_irq_core_eoi, |
171 | .irq_bus_lock = octeon_irq_core_bus_lock, | 171 | .irq_bus_lock = octeon_irq_core_bus_lock, |
172 | .irq_bus_sync_unlock = octeon_irq_core_bus_sync_unlock, | 172 | .irq_bus_sync_unlock = octeon_irq_core_bus_sync_unlock, |
173 | 173 | ||
174 | .irq_cpu_online = octeon_irq_core_eoi, | 174 | .irq_cpu_online = octeon_irq_core_eoi, |
175 | .irq_cpu_offline = octeon_irq_core_ack, | 175 | .irq_cpu_offline = octeon_irq_core_ack, |
176 | .flags = IRQCHIP_ONOFFLINE_ENABLED, | 176 | .flags = IRQCHIP_ONOFFLINE_ENABLED, |
177 | }; | 177 | }; |
178 | 178 | ||
179 | static void __init octeon_irq_init_core(void) | 179 | static void __init octeon_irq_init_core(void) |
180 | { | 180 | { |
181 | int i; | 181 | int i; |
182 | int irq; | 182 | int irq; |
183 | struct octeon_core_chip_data *cd; | 183 | struct octeon_core_chip_data *cd; |
184 | 184 | ||
185 | for (i = 0; i < MIPS_CORE_IRQ_LINES; i++) { | 185 | for (i = 0; i < MIPS_CORE_IRQ_LINES; i++) { |
186 | cd = &octeon_irq_core_chip_data[i]; | 186 | cd = &octeon_irq_core_chip_data[i]; |
187 | cd->current_en = false; | 187 | cd->current_en = false; |
188 | cd->desired_en = false; | 188 | cd->desired_en = false; |
189 | cd->bit = i; | 189 | cd->bit = i; |
190 | mutex_init(&cd->core_irq_mutex); | 190 | mutex_init(&cd->core_irq_mutex); |
191 | 191 | ||
192 | irq = OCTEON_IRQ_SW0 + i; | 192 | irq = OCTEON_IRQ_SW0 + i; |
193 | irq_set_chip_data(irq, cd); | 193 | irq_set_chip_data(irq, cd); |
194 | irq_set_chip_and_handler(irq, &octeon_irq_chip_core, | 194 | irq_set_chip_and_handler(irq, &octeon_irq_chip_core, |
195 | handle_percpu_irq); | 195 | handle_percpu_irq); |
196 | } | 196 | } |
197 | } | 197 | } |
198 | 198 | ||
199 | static int next_cpu_for_irq(struct irq_data *data) | 199 | static int next_cpu_for_irq(struct irq_data *data) |
200 | { | 200 | { |
201 | 201 | ||
202 | #ifdef CONFIG_SMP | 202 | #ifdef CONFIG_SMP |
203 | int cpu; | 203 | int cpu; |
204 | int weight = cpumask_weight(data->affinity); | 204 | int weight = cpumask_weight(data->affinity); |
205 | 205 | ||
206 | if (weight > 1) { | 206 | if (weight > 1) { |
207 | cpu = smp_processor_id(); | 207 | cpu = smp_processor_id(); |
208 | for (;;) { | 208 | for (;;) { |
209 | cpu = cpumask_next(cpu, data->affinity); | 209 | cpu = cpumask_next(cpu, data->affinity); |
210 | if (cpu >= nr_cpu_ids) { | 210 | if (cpu >= nr_cpu_ids) { |
211 | cpu = -1; | 211 | cpu = -1; |
212 | continue; | 212 | continue; |
213 | } else if (cpumask_test_cpu(cpu, cpu_online_mask)) { | 213 | } else if (cpumask_test_cpu(cpu, cpu_online_mask)) { |
214 | break; | 214 | break; |
215 | } | 215 | } |
216 | } | 216 | } |
217 | } else if (weight == 1) { | 217 | } else if (weight == 1) { |
218 | cpu = cpumask_first(data->affinity); | 218 | cpu = cpumask_first(data->affinity); |
219 | } else { | 219 | } else { |
220 | cpu = smp_processor_id(); | 220 | cpu = smp_processor_id(); |
221 | } | 221 | } |
222 | return cpu; | 222 | return cpu; |
223 | #else | 223 | #else |
224 | return smp_processor_id(); | 224 | return smp_processor_id(); |
225 | #endif | 225 | #endif |
226 | } | 226 | } |
227 | 227 | ||
228 | static void octeon_irq_ciu_enable(struct irq_data *data) | 228 | static void octeon_irq_ciu_enable(struct irq_data *data) |
229 | { | 229 | { |
230 | int cpu = next_cpu_for_irq(data); | 230 | int cpu = next_cpu_for_irq(data); |
231 | int coreid = octeon_coreid_for_cpu(cpu); | 231 | int coreid = octeon_coreid_for_cpu(cpu); |
232 | unsigned long *pen; | 232 | unsigned long *pen; |
233 | unsigned long flags; | 233 | unsigned long flags; |
234 | union octeon_ciu_chip_data cd; | 234 | union octeon_ciu_chip_data cd; |
235 | raw_spinlock_t *lock = &per_cpu(octeon_irq_ciu_spinlock, cpu); | 235 | raw_spinlock_t *lock = &per_cpu(octeon_irq_ciu_spinlock, cpu); |
236 | 236 | ||
237 | cd.p = irq_data_get_irq_chip_data(data); | 237 | cd.p = irq_data_get_irq_chip_data(data); |
238 | 238 | ||
239 | raw_spin_lock_irqsave(lock, flags); | 239 | raw_spin_lock_irqsave(lock, flags); |
240 | if (cd.s.line == 0) { | 240 | if (cd.s.line == 0) { |
241 | pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu); | 241 | pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu); |
242 | __set_bit(cd.s.bit, pen); | 242 | __set_bit(cd.s.bit, pen); |
243 | /* | 243 | /* |
244 | * Must be visible to octeon_irq_ip{2,3}_ciu() before | 244 | * Must be visible to octeon_irq_ip{2,3}_ciu() before |
245 | * enabling the irq. | 245 | * enabling the irq. |
246 | */ | 246 | */ |
247 | wmb(); | 247 | wmb(); |
248 | cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), *pen); | 248 | cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), *pen); |
249 | } else { | 249 | } else { |
250 | pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu); | 250 | pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu); |
251 | __set_bit(cd.s.bit, pen); | 251 | __set_bit(cd.s.bit, pen); |
252 | /* | 252 | /* |
253 | * Must be visible to octeon_irq_ip{2,3}_ciu() before | 253 | * Must be visible to octeon_irq_ip{2,3}_ciu() before |
254 | * enabling the irq. | 254 | * enabling the irq. |
255 | */ | 255 | */ |
256 | wmb(); | 256 | wmb(); |
257 | cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), *pen); | 257 | cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), *pen); |
258 | } | 258 | } |
259 | raw_spin_unlock_irqrestore(lock, flags); | 259 | raw_spin_unlock_irqrestore(lock, flags); |
260 | } | 260 | } |
261 | 261 | ||
262 | static void octeon_irq_ciu_enable_local(struct irq_data *data) | 262 | static void octeon_irq_ciu_enable_local(struct irq_data *data) |
263 | { | 263 | { |
264 | unsigned long *pen; | 264 | unsigned long *pen; |
265 | unsigned long flags; | 265 | unsigned long flags; |
266 | union octeon_ciu_chip_data cd; | 266 | union octeon_ciu_chip_data cd; |
267 | raw_spinlock_t *lock = &__get_cpu_var(octeon_irq_ciu_spinlock); | 267 | raw_spinlock_t *lock = &__get_cpu_var(octeon_irq_ciu_spinlock); |
268 | 268 | ||
269 | cd.p = irq_data_get_irq_chip_data(data); | 269 | cd.p = irq_data_get_irq_chip_data(data); |
270 | 270 | ||
271 | raw_spin_lock_irqsave(lock, flags); | 271 | raw_spin_lock_irqsave(lock, flags); |
272 | if (cd.s.line == 0) { | 272 | if (cd.s.line == 0) { |
273 | pen = &__get_cpu_var(octeon_irq_ciu0_en_mirror); | 273 | pen = &__get_cpu_var(octeon_irq_ciu0_en_mirror); |
274 | __set_bit(cd.s.bit, pen); | 274 | __set_bit(cd.s.bit, pen); |
275 | /* | 275 | /* |
276 | * Must be visible to octeon_irq_ip{2,3}_ciu() before | 276 | * Must be visible to octeon_irq_ip{2,3}_ciu() before |
277 | * enabling the irq. | 277 | * enabling the irq. |
278 | */ | 278 | */ |
279 | wmb(); | 279 | wmb(); |
280 | cvmx_write_csr(CVMX_CIU_INTX_EN0(cvmx_get_core_num() * 2), *pen); | 280 | cvmx_write_csr(CVMX_CIU_INTX_EN0(cvmx_get_core_num() * 2), *pen); |
281 | } else { | 281 | } else { |
282 | pen = &__get_cpu_var(octeon_irq_ciu1_en_mirror); | 282 | pen = &__get_cpu_var(octeon_irq_ciu1_en_mirror); |
283 | __set_bit(cd.s.bit, pen); | 283 | __set_bit(cd.s.bit, pen); |
284 | /* | 284 | /* |
285 | * Must be visible to octeon_irq_ip{2,3}_ciu() before | 285 | * Must be visible to octeon_irq_ip{2,3}_ciu() before |
286 | * enabling the irq. | 286 | * enabling the irq. |
287 | */ | 287 | */ |
288 | wmb(); | 288 | wmb(); |
289 | cvmx_write_csr(CVMX_CIU_INTX_EN1(cvmx_get_core_num() * 2 + 1), *pen); | 289 | cvmx_write_csr(CVMX_CIU_INTX_EN1(cvmx_get_core_num() * 2 + 1), *pen); |
290 | } | 290 | } |
291 | raw_spin_unlock_irqrestore(lock, flags); | 291 | raw_spin_unlock_irqrestore(lock, flags); |
292 | } | 292 | } |
293 | 293 | ||
294 | static void octeon_irq_ciu_disable_local(struct irq_data *data) | 294 | static void octeon_irq_ciu_disable_local(struct irq_data *data) |
295 | { | 295 | { |
296 | unsigned long *pen; | 296 | unsigned long *pen; |
297 | unsigned long flags; | 297 | unsigned long flags; |
298 | union octeon_ciu_chip_data cd; | 298 | union octeon_ciu_chip_data cd; |
299 | raw_spinlock_t *lock = &__get_cpu_var(octeon_irq_ciu_spinlock); | 299 | raw_spinlock_t *lock = &__get_cpu_var(octeon_irq_ciu_spinlock); |
300 | 300 | ||
301 | cd.p = irq_data_get_irq_chip_data(data); | 301 | cd.p = irq_data_get_irq_chip_data(data); |
302 | 302 | ||
303 | raw_spin_lock_irqsave(lock, flags); | 303 | raw_spin_lock_irqsave(lock, flags); |
304 | if (cd.s.line == 0) { | 304 | if (cd.s.line == 0) { |
305 | pen = &__get_cpu_var(octeon_irq_ciu0_en_mirror); | 305 | pen = &__get_cpu_var(octeon_irq_ciu0_en_mirror); |
306 | __clear_bit(cd.s.bit, pen); | 306 | __clear_bit(cd.s.bit, pen); |
307 | /* | 307 | /* |
308 | * Must be visible to octeon_irq_ip{2,3}_ciu() before | 308 | * Must be visible to octeon_irq_ip{2,3}_ciu() before |
309 | * enabling the irq. | 309 | * enabling the irq. |
310 | */ | 310 | */ |
311 | wmb(); | 311 | wmb(); |
312 | cvmx_write_csr(CVMX_CIU_INTX_EN0(cvmx_get_core_num() * 2), *pen); | 312 | cvmx_write_csr(CVMX_CIU_INTX_EN0(cvmx_get_core_num() * 2), *pen); |
313 | } else { | 313 | } else { |
314 | pen = &__get_cpu_var(octeon_irq_ciu1_en_mirror); | 314 | pen = &__get_cpu_var(octeon_irq_ciu1_en_mirror); |
315 | __clear_bit(cd.s.bit, pen); | 315 | __clear_bit(cd.s.bit, pen); |
316 | /* | 316 | /* |
317 | * Must be visible to octeon_irq_ip{2,3}_ciu() before | 317 | * Must be visible to octeon_irq_ip{2,3}_ciu() before |
318 | * enabling the irq. | 318 | * enabling the irq. |
319 | */ | 319 | */ |
320 | wmb(); | 320 | wmb(); |
321 | cvmx_write_csr(CVMX_CIU_INTX_EN1(cvmx_get_core_num() * 2 + 1), *pen); | 321 | cvmx_write_csr(CVMX_CIU_INTX_EN1(cvmx_get_core_num() * 2 + 1), *pen); |
322 | } | 322 | } |
323 | raw_spin_unlock_irqrestore(lock, flags); | 323 | raw_spin_unlock_irqrestore(lock, flags); |
324 | } | 324 | } |
325 | 325 | ||
326 | static void octeon_irq_ciu_disable_all(struct irq_data *data) | 326 | static void octeon_irq_ciu_disable_all(struct irq_data *data) |
327 | { | 327 | { |
328 | unsigned long flags; | 328 | unsigned long flags; |
329 | unsigned long *pen; | 329 | unsigned long *pen; |
330 | int cpu; | 330 | int cpu; |
331 | union octeon_ciu_chip_data cd; | 331 | union octeon_ciu_chip_data cd; |
332 | raw_spinlock_t *lock; | 332 | raw_spinlock_t *lock; |
333 | 333 | ||
334 | cd.p = irq_data_get_irq_chip_data(data); | 334 | cd.p = irq_data_get_irq_chip_data(data); |
335 | 335 | ||
336 | for_each_online_cpu(cpu) { | 336 | for_each_online_cpu(cpu) { |
337 | int coreid = octeon_coreid_for_cpu(cpu); | 337 | int coreid = octeon_coreid_for_cpu(cpu); |
338 | lock = &per_cpu(octeon_irq_ciu_spinlock, cpu); | 338 | lock = &per_cpu(octeon_irq_ciu_spinlock, cpu); |
339 | if (cd.s.line == 0) | 339 | if (cd.s.line == 0) |
340 | pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu); | 340 | pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu); |
341 | else | 341 | else |
342 | pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu); | 342 | pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu); |
343 | 343 | ||
344 | raw_spin_lock_irqsave(lock, flags); | 344 | raw_spin_lock_irqsave(lock, flags); |
345 | __clear_bit(cd.s.bit, pen); | 345 | __clear_bit(cd.s.bit, pen); |
346 | /* | 346 | /* |
347 | * Must be visible to octeon_irq_ip{2,3}_ciu() before | 347 | * Must be visible to octeon_irq_ip{2,3}_ciu() before |
348 | * enabling the irq. | 348 | * enabling the irq. |
349 | */ | 349 | */ |
350 | wmb(); | 350 | wmb(); |
351 | if (cd.s.line == 0) | 351 | if (cd.s.line == 0) |
352 | cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), *pen); | 352 | cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), *pen); |
353 | else | 353 | else |
354 | cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), *pen); | 354 | cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), *pen); |
355 | raw_spin_unlock_irqrestore(lock, flags); | 355 | raw_spin_unlock_irqrestore(lock, flags); |
356 | } | 356 | } |
357 | } | 357 | } |
358 | 358 | ||
359 | static void octeon_irq_ciu_enable_all(struct irq_data *data) | 359 | static void octeon_irq_ciu_enable_all(struct irq_data *data) |
360 | { | 360 | { |
361 | unsigned long flags; | 361 | unsigned long flags; |
362 | unsigned long *pen; | 362 | unsigned long *pen; |
363 | int cpu; | 363 | int cpu; |
364 | union octeon_ciu_chip_data cd; | 364 | union octeon_ciu_chip_data cd; |
365 | raw_spinlock_t *lock; | 365 | raw_spinlock_t *lock; |
366 | 366 | ||
367 | cd.p = irq_data_get_irq_chip_data(data); | 367 | cd.p = irq_data_get_irq_chip_data(data); |
368 | 368 | ||
369 | for_each_online_cpu(cpu) { | 369 | for_each_online_cpu(cpu) { |
370 | int coreid = octeon_coreid_for_cpu(cpu); | 370 | int coreid = octeon_coreid_for_cpu(cpu); |
371 | lock = &per_cpu(octeon_irq_ciu_spinlock, cpu); | 371 | lock = &per_cpu(octeon_irq_ciu_spinlock, cpu); |
372 | if (cd.s.line == 0) | 372 | if (cd.s.line == 0) |
373 | pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu); | 373 | pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu); |
374 | else | 374 | else |
375 | pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu); | 375 | pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu); |
376 | 376 | ||
377 | raw_spin_lock_irqsave(lock, flags); | 377 | raw_spin_lock_irqsave(lock, flags); |
378 | __set_bit(cd.s.bit, pen); | 378 | __set_bit(cd.s.bit, pen); |
379 | /* | 379 | /* |
380 | * Must be visible to octeon_irq_ip{2,3}_ciu() before | 380 | * Must be visible to octeon_irq_ip{2,3}_ciu() before |
381 | * enabling the irq. | 381 | * enabling the irq. |
382 | */ | 382 | */ |
383 | wmb(); | 383 | wmb(); |
384 | if (cd.s.line == 0) | 384 | if (cd.s.line == 0) |
385 | cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), *pen); | 385 | cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), *pen); |
386 | else | 386 | else |
387 | cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), *pen); | 387 | cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), *pen); |
388 | raw_spin_unlock_irqrestore(lock, flags); | 388 | raw_spin_unlock_irqrestore(lock, flags); |
389 | } | 389 | } |
390 | } | 390 | } |
391 | 391 | ||
392 | /* | 392 | /* |
393 | * Enable the irq on the next core in the affinity set for chips that | 393 | * Enable the irq on the next core in the affinity set for chips that |
394 | * have the EN*_W1{S,C} registers. | 394 | * have the EN*_W1{S,C} registers. |
395 | */ | 395 | */ |
396 | static void octeon_irq_ciu_enable_v2(struct irq_data *data) | 396 | static void octeon_irq_ciu_enable_v2(struct irq_data *data) |
397 | { | 397 | { |
398 | u64 mask; | 398 | u64 mask; |
399 | int cpu = next_cpu_for_irq(data); | 399 | int cpu = next_cpu_for_irq(data); |
400 | union octeon_ciu_chip_data cd; | 400 | union octeon_ciu_chip_data cd; |
401 | 401 | ||
402 | cd.p = irq_data_get_irq_chip_data(data); | 402 | cd.p = irq_data_get_irq_chip_data(data); |
403 | mask = 1ull << (cd.s.bit); | 403 | mask = 1ull << (cd.s.bit); |
404 | 404 | ||
405 | /* | 405 | /* |
406 | * Called under the desc lock, so these should never get out | 406 | * Called under the desc lock, so these should never get out |
407 | * of sync. | 407 | * of sync. |
408 | */ | 408 | */ |
409 | if (cd.s.line == 0) { | 409 | if (cd.s.line == 0) { |
410 | int index = octeon_coreid_for_cpu(cpu) * 2; | 410 | int index = octeon_coreid_for_cpu(cpu) * 2; |
411 | set_bit(cd.s.bit, &per_cpu(octeon_irq_ciu0_en_mirror, cpu)); | 411 | set_bit(cd.s.bit, &per_cpu(octeon_irq_ciu0_en_mirror, cpu)); |
412 | cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask); | 412 | cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask); |
413 | } else { | 413 | } else { |
414 | int index = octeon_coreid_for_cpu(cpu) * 2 + 1; | 414 | int index = octeon_coreid_for_cpu(cpu) * 2 + 1; |
415 | set_bit(cd.s.bit, &per_cpu(octeon_irq_ciu1_en_mirror, cpu)); | 415 | set_bit(cd.s.bit, &per_cpu(octeon_irq_ciu1_en_mirror, cpu)); |
416 | cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask); | 416 | cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask); |
417 | } | 417 | } |
418 | } | 418 | } |
419 | 419 | ||
420 | /* | 420 | /* |
421 | * Enable the irq on the current CPU for chips that | 421 | * Enable the irq on the current CPU for chips that |
422 | * have the EN*_W1{S,C} registers. | 422 | * have the EN*_W1{S,C} registers. |
423 | */ | 423 | */ |
424 | static void octeon_irq_ciu_enable_local_v2(struct irq_data *data) | 424 | static void octeon_irq_ciu_enable_local_v2(struct irq_data *data) |
425 | { | 425 | { |
426 | u64 mask; | 426 | u64 mask; |
427 | union octeon_ciu_chip_data cd; | 427 | union octeon_ciu_chip_data cd; |
428 | 428 | ||
429 | cd.p = irq_data_get_irq_chip_data(data); | 429 | cd.p = irq_data_get_irq_chip_data(data); |
430 | mask = 1ull << (cd.s.bit); | 430 | mask = 1ull << (cd.s.bit); |
431 | 431 | ||
432 | if (cd.s.line == 0) { | 432 | if (cd.s.line == 0) { |
433 | int index = cvmx_get_core_num() * 2; | 433 | int index = cvmx_get_core_num() * 2; |
434 | set_bit(cd.s.bit, &__get_cpu_var(octeon_irq_ciu0_en_mirror)); | 434 | set_bit(cd.s.bit, &__get_cpu_var(octeon_irq_ciu0_en_mirror)); |
435 | cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask); | 435 | cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask); |
436 | } else { | 436 | } else { |
437 | int index = cvmx_get_core_num() * 2 + 1; | 437 | int index = cvmx_get_core_num() * 2 + 1; |
438 | set_bit(cd.s.bit, &__get_cpu_var(octeon_irq_ciu1_en_mirror)); | 438 | set_bit(cd.s.bit, &__get_cpu_var(octeon_irq_ciu1_en_mirror)); |
439 | cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask); | 439 | cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask); |
440 | } | 440 | } |
441 | } | 441 | } |
442 | 442 | ||
443 | static void octeon_irq_ciu_disable_local_v2(struct irq_data *data) | 443 | static void octeon_irq_ciu_disable_local_v2(struct irq_data *data) |
444 | { | 444 | { |
445 | u64 mask; | 445 | u64 mask; |
446 | union octeon_ciu_chip_data cd; | 446 | union octeon_ciu_chip_data cd; |
447 | 447 | ||
448 | cd.p = irq_data_get_irq_chip_data(data); | 448 | cd.p = irq_data_get_irq_chip_data(data); |
449 | mask = 1ull << (cd.s.bit); | 449 | mask = 1ull << (cd.s.bit); |
450 | 450 | ||
451 | if (cd.s.line == 0) { | 451 | if (cd.s.line == 0) { |
452 | int index = cvmx_get_core_num() * 2; | 452 | int index = cvmx_get_core_num() * 2; |
453 | clear_bit(cd.s.bit, &__get_cpu_var(octeon_irq_ciu0_en_mirror)); | 453 | clear_bit(cd.s.bit, &__get_cpu_var(octeon_irq_ciu0_en_mirror)); |
454 | cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index), mask); | 454 | cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index), mask); |
455 | } else { | 455 | } else { |
456 | int index = cvmx_get_core_num() * 2 + 1; | 456 | int index = cvmx_get_core_num() * 2 + 1; |
457 | clear_bit(cd.s.bit, &__get_cpu_var(octeon_irq_ciu1_en_mirror)); | 457 | clear_bit(cd.s.bit, &__get_cpu_var(octeon_irq_ciu1_en_mirror)); |
458 | cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index), mask); | 458 | cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index), mask); |
459 | } | 459 | } |
460 | } | 460 | } |
461 | 461 | ||
462 | /* | 462 | /* |
463 | * Write to the W1C bit in CVMX_CIU_INTX_SUM0 to clear the irq. | 463 | * Write to the W1C bit in CVMX_CIU_INTX_SUM0 to clear the irq. |
464 | */ | 464 | */ |
465 | static void octeon_irq_ciu_ack(struct irq_data *data) | 465 | static void octeon_irq_ciu_ack(struct irq_data *data) |
466 | { | 466 | { |
467 | u64 mask; | 467 | u64 mask; |
468 | union octeon_ciu_chip_data cd; | 468 | union octeon_ciu_chip_data cd; |
469 | 469 | ||
470 | cd.p = irq_data_get_irq_chip_data(data); | 470 | cd.p = irq_data_get_irq_chip_data(data); |
471 | mask = 1ull << (cd.s.bit); | 471 | mask = 1ull << (cd.s.bit); |
472 | 472 | ||
473 | if (cd.s.line == 0) { | 473 | if (cd.s.line == 0) { |
474 | int index = cvmx_get_core_num() * 2; | 474 | int index = cvmx_get_core_num() * 2; |
475 | cvmx_write_csr(CVMX_CIU_INTX_SUM0(index), mask); | 475 | cvmx_write_csr(CVMX_CIU_INTX_SUM0(index), mask); |
476 | } else { | 476 | } else { |
477 | cvmx_write_csr(CVMX_CIU_INT_SUM1, mask); | 477 | cvmx_write_csr(CVMX_CIU_INT_SUM1, mask); |
478 | } | 478 | } |
479 | } | 479 | } |
480 | 480 | ||
481 | /* | 481 | /* |
482 | * Disable the irq on the all cores for chips that have the EN*_W1{S,C} | 482 | * Disable the irq on the all cores for chips that have the EN*_W1{S,C} |
483 | * registers. | 483 | * registers. |
484 | */ | 484 | */ |
485 | static void octeon_irq_ciu_disable_all_v2(struct irq_data *data) | 485 | static void octeon_irq_ciu_disable_all_v2(struct irq_data *data) |
486 | { | 486 | { |
487 | int cpu; | 487 | int cpu; |
488 | u64 mask; | 488 | u64 mask; |
489 | union octeon_ciu_chip_data cd; | 489 | union octeon_ciu_chip_data cd; |
490 | 490 | ||
491 | cd.p = irq_data_get_irq_chip_data(data); | 491 | cd.p = irq_data_get_irq_chip_data(data); |
492 | mask = 1ull << (cd.s.bit); | 492 | mask = 1ull << (cd.s.bit); |
493 | 493 | ||
494 | if (cd.s.line == 0) { | 494 | if (cd.s.line == 0) { |
495 | for_each_online_cpu(cpu) { | 495 | for_each_online_cpu(cpu) { |
496 | int index = octeon_coreid_for_cpu(cpu) * 2; | 496 | int index = octeon_coreid_for_cpu(cpu) * 2; |
497 | clear_bit(cd.s.bit, &per_cpu(octeon_irq_ciu0_en_mirror, cpu)); | 497 | clear_bit(cd.s.bit, &per_cpu(octeon_irq_ciu0_en_mirror, cpu)); |
498 | cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index), mask); | 498 | cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index), mask); |
499 | } | 499 | } |
500 | } else { | 500 | } else { |
501 | for_each_online_cpu(cpu) { | 501 | for_each_online_cpu(cpu) { |
502 | int index = octeon_coreid_for_cpu(cpu) * 2 + 1; | 502 | int index = octeon_coreid_for_cpu(cpu) * 2 + 1; |
503 | clear_bit(cd.s.bit, &per_cpu(octeon_irq_ciu1_en_mirror, cpu)); | 503 | clear_bit(cd.s.bit, &per_cpu(octeon_irq_ciu1_en_mirror, cpu)); |
504 | cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index), mask); | 504 | cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index), mask); |
505 | } | 505 | } |
506 | } | 506 | } |
507 | } | 507 | } |
508 | 508 | ||
509 | /* | 509 | /* |
510 | * Enable the irq on the all cores for chips that have the EN*_W1{S,C} | 510 | * Enable the irq on the all cores for chips that have the EN*_W1{S,C} |
511 | * registers. | 511 | * registers. |
512 | */ | 512 | */ |
513 | static void octeon_irq_ciu_enable_all_v2(struct irq_data *data) | 513 | static void octeon_irq_ciu_enable_all_v2(struct irq_data *data) |
514 | { | 514 | { |
515 | int cpu; | 515 | int cpu; |
516 | u64 mask; | 516 | u64 mask; |
517 | union octeon_ciu_chip_data cd; | 517 | union octeon_ciu_chip_data cd; |
518 | 518 | ||
519 | cd.p = irq_data_get_irq_chip_data(data); | 519 | cd.p = irq_data_get_irq_chip_data(data); |
520 | mask = 1ull << (cd.s.bit); | 520 | mask = 1ull << (cd.s.bit); |
521 | 521 | ||
522 | if (cd.s.line == 0) { | 522 | if (cd.s.line == 0) { |
523 | for_each_online_cpu(cpu) { | 523 | for_each_online_cpu(cpu) { |
524 | int index = octeon_coreid_for_cpu(cpu) * 2; | 524 | int index = octeon_coreid_for_cpu(cpu) * 2; |
525 | set_bit(cd.s.bit, &per_cpu(octeon_irq_ciu0_en_mirror, cpu)); | 525 | set_bit(cd.s.bit, &per_cpu(octeon_irq_ciu0_en_mirror, cpu)); |
526 | cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask); | 526 | cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask); |
527 | } | 527 | } |
528 | } else { | 528 | } else { |
529 | for_each_online_cpu(cpu) { | 529 | for_each_online_cpu(cpu) { |
530 | int index = octeon_coreid_for_cpu(cpu) * 2 + 1; | 530 | int index = octeon_coreid_for_cpu(cpu) * 2 + 1; |
531 | set_bit(cd.s.bit, &per_cpu(octeon_irq_ciu1_en_mirror, cpu)); | 531 | set_bit(cd.s.bit, &per_cpu(octeon_irq_ciu1_en_mirror, cpu)); |
532 | cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask); | 532 | cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask); |
533 | } | 533 | } |
534 | } | 534 | } |
535 | } | 535 | } |
536 | 536 | ||
537 | static void octeon_irq_gpio_setup(struct irq_data *data) | 537 | static void octeon_irq_gpio_setup(struct irq_data *data) |
538 | { | 538 | { |
539 | union cvmx_gpio_bit_cfgx cfg; | 539 | union cvmx_gpio_bit_cfgx cfg; |
540 | union octeon_ciu_chip_data cd; | 540 | union octeon_ciu_chip_data cd; |
541 | u32 t = irqd_get_trigger_type(data); | 541 | u32 t = irqd_get_trigger_type(data); |
542 | 542 | ||
543 | cd.p = irq_data_get_irq_chip_data(data); | 543 | cd.p = irq_data_get_irq_chip_data(data); |
544 | 544 | ||
545 | cfg.u64 = 0; | 545 | cfg.u64 = 0; |
546 | cfg.s.int_en = 1; | 546 | cfg.s.int_en = 1; |
547 | cfg.s.int_type = (t & IRQ_TYPE_EDGE_BOTH) != 0; | 547 | cfg.s.int_type = (t & IRQ_TYPE_EDGE_BOTH) != 0; |
548 | cfg.s.rx_xor = (t & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_EDGE_FALLING)) != 0; | 548 | cfg.s.rx_xor = (t & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_EDGE_FALLING)) != 0; |
549 | 549 | ||
550 | /* 140 nS glitch filter*/ | 550 | /* 140 nS glitch filter*/ |
551 | cfg.s.fil_cnt = 7; | 551 | cfg.s.fil_cnt = 7; |
552 | cfg.s.fil_sel = 3; | 552 | cfg.s.fil_sel = 3; |
553 | 553 | ||
554 | cvmx_write_csr(CVMX_GPIO_BIT_CFGX(cd.s.gpio_line), cfg.u64); | 554 | cvmx_write_csr(CVMX_GPIO_BIT_CFGX(cd.s.gpio_line), cfg.u64); |
555 | } | 555 | } |
556 | 556 | ||
557 | static void octeon_irq_ciu_enable_gpio_v2(struct irq_data *data) | 557 | static void octeon_irq_ciu_enable_gpio_v2(struct irq_data *data) |
558 | { | 558 | { |
559 | octeon_irq_gpio_setup(data); | 559 | octeon_irq_gpio_setup(data); |
560 | octeon_irq_ciu_enable_v2(data); | 560 | octeon_irq_ciu_enable_v2(data); |
561 | } | 561 | } |
562 | 562 | ||
563 | static void octeon_irq_ciu_enable_gpio(struct irq_data *data) | 563 | static void octeon_irq_ciu_enable_gpio(struct irq_data *data) |
564 | { | 564 | { |
565 | octeon_irq_gpio_setup(data); | 565 | octeon_irq_gpio_setup(data); |
566 | octeon_irq_ciu_enable(data); | 566 | octeon_irq_ciu_enable(data); |
567 | } | 567 | } |
568 | 568 | ||
569 | static int octeon_irq_ciu_gpio_set_type(struct irq_data *data, unsigned int t) | 569 | static int octeon_irq_ciu_gpio_set_type(struct irq_data *data, unsigned int t) |
570 | { | 570 | { |
571 | irqd_set_trigger_type(data, t); | 571 | irqd_set_trigger_type(data, t); |
572 | octeon_irq_gpio_setup(data); | 572 | octeon_irq_gpio_setup(data); |
573 | 573 | ||
574 | return IRQ_SET_MASK_OK; | 574 | return IRQ_SET_MASK_OK; |
575 | } | 575 | } |
576 | 576 | ||
577 | static void octeon_irq_ciu_disable_gpio_v2(struct irq_data *data) | 577 | static void octeon_irq_ciu_disable_gpio_v2(struct irq_data *data) |
578 | { | 578 | { |
579 | union octeon_ciu_chip_data cd; | 579 | union octeon_ciu_chip_data cd; |
580 | 580 | ||
581 | cd.p = irq_data_get_irq_chip_data(data); | 581 | cd.p = irq_data_get_irq_chip_data(data); |
582 | cvmx_write_csr(CVMX_GPIO_BIT_CFGX(cd.s.gpio_line), 0); | 582 | cvmx_write_csr(CVMX_GPIO_BIT_CFGX(cd.s.gpio_line), 0); |
583 | 583 | ||
584 | octeon_irq_ciu_disable_all_v2(data); | 584 | octeon_irq_ciu_disable_all_v2(data); |
585 | } | 585 | } |
586 | 586 | ||
587 | static void octeon_irq_ciu_disable_gpio(struct irq_data *data) | 587 | static void octeon_irq_ciu_disable_gpio(struct irq_data *data) |
588 | { | 588 | { |
589 | union octeon_ciu_chip_data cd; | 589 | union octeon_ciu_chip_data cd; |
590 | 590 | ||
591 | cd.p = irq_data_get_irq_chip_data(data); | 591 | cd.p = irq_data_get_irq_chip_data(data); |
592 | cvmx_write_csr(CVMX_GPIO_BIT_CFGX(cd.s.gpio_line), 0); | 592 | cvmx_write_csr(CVMX_GPIO_BIT_CFGX(cd.s.gpio_line), 0); |
593 | 593 | ||
594 | octeon_irq_ciu_disable_all(data); | 594 | octeon_irq_ciu_disable_all(data); |
595 | } | 595 | } |
596 | 596 | ||
597 | static void octeon_irq_ciu_gpio_ack(struct irq_data *data) | 597 | static void octeon_irq_ciu_gpio_ack(struct irq_data *data) |
598 | { | 598 | { |
599 | union octeon_ciu_chip_data cd; | 599 | union octeon_ciu_chip_data cd; |
600 | u64 mask; | 600 | u64 mask; |
601 | 601 | ||
602 | cd.p = irq_data_get_irq_chip_data(data); | 602 | cd.p = irq_data_get_irq_chip_data(data); |
603 | mask = 1ull << (cd.s.gpio_line); | 603 | mask = 1ull << (cd.s.gpio_line); |
604 | 604 | ||
605 | cvmx_write_csr(CVMX_GPIO_INT_CLR, mask); | 605 | cvmx_write_csr(CVMX_GPIO_INT_CLR, mask); |
606 | } | 606 | } |
607 | 607 | ||
608 | static void octeon_irq_handle_gpio(unsigned int irq, struct irq_desc *desc) | 608 | static void octeon_irq_handle_gpio(unsigned int irq, struct irq_desc *desc) |
609 | { | 609 | { |
610 | if (irq_get_trigger_type(irq) & IRQ_TYPE_EDGE_BOTH) | 610 | if (irq_get_trigger_type(irq) & IRQ_TYPE_EDGE_BOTH) |
611 | handle_edge_irq(irq, desc); | 611 | handle_edge_irq(irq, desc); |
612 | else | 612 | else |
613 | handle_level_irq(irq, desc); | 613 | handle_level_irq(irq, desc); |
614 | } | 614 | } |
615 | 615 | ||
616 | #ifdef CONFIG_SMP | 616 | #ifdef CONFIG_SMP |
617 | 617 | ||
618 | static void octeon_irq_cpu_offline_ciu(struct irq_data *data) | 618 | static void octeon_irq_cpu_offline_ciu(struct irq_data *data) |
619 | { | 619 | { |
620 | int cpu = smp_processor_id(); | 620 | int cpu = smp_processor_id(); |
621 | cpumask_t new_affinity; | 621 | cpumask_t new_affinity; |
622 | 622 | ||
623 | if (!cpumask_test_cpu(cpu, data->affinity)) | 623 | if (!cpumask_test_cpu(cpu, data->affinity)) |
624 | return; | 624 | return; |
625 | 625 | ||
626 | if (cpumask_weight(data->affinity) > 1) { | 626 | if (cpumask_weight(data->affinity) > 1) { |
627 | /* | 627 | /* |
628 | * It has multi CPU affinity, just remove this CPU | 628 | * It has multi CPU affinity, just remove this CPU |
629 | * from the affinity set. | 629 | * from the affinity set. |
630 | */ | 630 | */ |
631 | cpumask_copy(&new_affinity, data->affinity); | 631 | cpumask_copy(&new_affinity, data->affinity); |
632 | cpumask_clear_cpu(cpu, &new_affinity); | 632 | cpumask_clear_cpu(cpu, &new_affinity); |
633 | } else { | 633 | } else { |
634 | /* Otherwise, put it on lowest numbered online CPU. */ | 634 | /* Otherwise, put it on lowest numbered online CPU. */ |
635 | cpumask_clear(&new_affinity); | 635 | cpumask_clear(&new_affinity); |
636 | cpumask_set_cpu(cpumask_first(cpu_online_mask), &new_affinity); | 636 | cpumask_set_cpu(cpumask_first(cpu_online_mask), &new_affinity); |
637 | } | 637 | } |
638 | __irq_set_affinity_locked(data, &new_affinity); | 638 | irq_set_affinity_locked(data, &new_affinity, false); |
639 | } | 639 | } |
640 | 640 | ||
641 | static int octeon_irq_ciu_set_affinity(struct irq_data *data, | 641 | static int octeon_irq_ciu_set_affinity(struct irq_data *data, |
642 | const struct cpumask *dest, bool force) | 642 | const struct cpumask *dest, bool force) |
643 | { | 643 | { |
644 | int cpu; | 644 | int cpu; |
645 | bool enable_one = !irqd_irq_disabled(data) && !irqd_irq_masked(data); | 645 | bool enable_one = !irqd_irq_disabled(data) && !irqd_irq_masked(data); |
646 | unsigned long flags; | 646 | unsigned long flags; |
647 | union octeon_ciu_chip_data cd; | 647 | union octeon_ciu_chip_data cd; |
648 | unsigned long *pen; | 648 | unsigned long *pen; |
649 | raw_spinlock_t *lock; | 649 | raw_spinlock_t *lock; |
650 | 650 | ||
651 | cd.p = irq_data_get_irq_chip_data(data); | 651 | cd.p = irq_data_get_irq_chip_data(data); |
652 | 652 | ||
653 | /* | 653 | /* |
654 | * For non-v2 CIU, we will allow only single CPU affinity. | 654 | * For non-v2 CIU, we will allow only single CPU affinity. |
655 | * This removes the need to do locking in the .ack/.eoi | 655 | * This removes the need to do locking in the .ack/.eoi |
656 | * functions. | 656 | * functions. |
657 | */ | 657 | */ |
658 | if (cpumask_weight(dest) != 1) | 658 | if (cpumask_weight(dest) != 1) |
659 | return -EINVAL; | 659 | return -EINVAL; |
660 | 660 | ||
661 | if (!enable_one) | 661 | if (!enable_one) |
662 | return 0; | 662 | return 0; |
663 | 663 | ||
664 | 664 | ||
665 | for_each_online_cpu(cpu) { | 665 | for_each_online_cpu(cpu) { |
666 | int coreid = octeon_coreid_for_cpu(cpu); | 666 | int coreid = octeon_coreid_for_cpu(cpu); |
667 | 667 | ||
668 | lock = &per_cpu(octeon_irq_ciu_spinlock, cpu); | 668 | lock = &per_cpu(octeon_irq_ciu_spinlock, cpu); |
669 | raw_spin_lock_irqsave(lock, flags); | 669 | raw_spin_lock_irqsave(lock, flags); |
670 | 670 | ||
671 | if (cd.s.line == 0) | 671 | if (cd.s.line == 0) |
672 | pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu); | 672 | pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu); |
673 | else | 673 | else |
674 | pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu); | 674 | pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu); |
675 | 675 | ||
676 | if (cpumask_test_cpu(cpu, dest) && enable_one) { | 676 | if (cpumask_test_cpu(cpu, dest) && enable_one) { |
677 | enable_one = 0; | 677 | enable_one = 0; |
678 | __set_bit(cd.s.bit, pen); | 678 | __set_bit(cd.s.bit, pen); |
679 | } else { | 679 | } else { |
680 | __clear_bit(cd.s.bit, pen); | 680 | __clear_bit(cd.s.bit, pen); |
681 | } | 681 | } |
682 | /* | 682 | /* |
683 | * Must be visible to octeon_irq_ip{2,3}_ciu() before | 683 | * Must be visible to octeon_irq_ip{2,3}_ciu() before |
684 | * enabling the irq. | 684 | * enabling the irq. |
685 | */ | 685 | */ |
686 | wmb(); | 686 | wmb(); |
687 | 687 | ||
688 | if (cd.s.line == 0) | 688 | if (cd.s.line == 0) |
689 | cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), *pen); | 689 | cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), *pen); |
690 | else | 690 | else |
691 | cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), *pen); | 691 | cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), *pen); |
692 | 692 | ||
693 | raw_spin_unlock_irqrestore(lock, flags); | 693 | raw_spin_unlock_irqrestore(lock, flags); |
694 | } | 694 | } |
695 | return 0; | 695 | return 0; |
696 | } | 696 | } |
697 | 697 | ||
698 | /* | 698 | /* |
699 | * Set affinity for the irq for chips that have the EN*_W1{S,C} | 699 | * Set affinity for the irq for chips that have the EN*_W1{S,C} |
700 | * registers. | 700 | * registers. |
701 | */ | 701 | */ |
702 | static int octeon_irq_ciu_set_affinity_v2(struct irq_data *data, | 702 | static int octeon_irq_ciu_set_affinity_v2(struct irq_data *data, |
703 | const struct cpumask *dest, | 703 | const struct cpumask *dest, |
704 | bool force) | 704 | bool force) |
705 | { | 705 | { |
706 | int cpu; | 706 | int cpu; |
707 | bool enable_one = !irqd_irq_disabled(data) && !irqd_irq_masked(data); | 707 | bool enable_one = !irqd_irq_disabled(data) && !irqd_irq_masked(data); |
708 | u64 mask; | 708 | u64 mask; |
709 | union octeon_ciu_chip_data cd; | 709 | union octeon_ciu_chip_data cd; |
710 | 710 | ||
711 | if (!enable_one) | 711 | if (!enable_one) |
712 | return 0; | 712 | return 0; |
713 | 713 | ||
714 | cd.p = irq_data_get_irq_chip_data(data); | 714 | cd.p = irq_data_get_irq_chip_data(data); |
715 | mask = 1ull << cd.s.bit; | 715 | mask = 1ull << cd.s.bit; |
716 | 716 | ||
717 | if (cd.s.line == 0) { | 717 | if (cd.s.line == 0) { |
718 | for_each_online_cpu(cpu) { | 718 | for_each_online_cpu(cpu) { |
719 | unsigned long *pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu); | 719 | unsigned long *pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu); |
720 | int index = octeon_coreid_for_cpu(cpu) * 2; | 720 | int index = octeon_coreid_for_cpu(cpu) * 2; |
721 | if (cpumask_test_cpu(cpu, dest) && enable_one) { | 721 | if (cpumask_test_cpu(cpu, dest) && enable_one) { |
722 | enable_one = false; | 722 | enable_one = false; |
723 | set_bit(cd.s.bit, pen); | 723 | set_bit(cd.s.bit, pen); |
724 | cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask); | 724 | cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask); |
725 | } else { | 725 | } else { |
726 | clear_bit(cd.s.bit, pen); | 726 | clear_bit(cd.s.bit, pen); |
727 | cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index), mask); | 727 | cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index), mask); |
728 | } | 728 | } |
729 | } | 729 | } |
730 | } else { | 730 | } else { |
731 | for_each_online_cpu(cpu) { | 731 | for_each_online_cpu(cpu) { |
732 | unsigned long *pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu); | 732 | unsigned long *pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu); |
733 | int index = octeon_coreid_for_cpu(cpu) * 2 + 1; | 733 | int index = octeon_coreid_for_cpu(cpu) * 2 + 1; |
734 | if (cpumask_test_cpu(cpu, dest) && enable_one) { | 734 | if (cpumask_test_cpu(cpu, dest) && enable_one) { |
735 | enable_one = false; | 735 | enable_one = false; |
736 | set_bit(cd.s.bit, pen); | 736 | set_bit(cd.s.bit, pen); |
737 | cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask); | 737 | cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask); |
738 | } else { | 738 | } else { |
739 | clear_bit(cd.s.bit, pen); | 739 | clear_bit(cd.s.bit, pen); |
740 | cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index), mask); | 740 | cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index), mask); |
741 | } | 741 | } |
742 | } | 742 | } |
743 | } | 743 | } |
744 | return 0; | 744 | return 0; |
745 | } | 745 | } |
746 | #endif | 746 | #endif |
747 | 747 | ||
748 | /* | 748 | /* |
749 | * Newer octeon chips have support for lockless CIU operation. | 749 | * Newer octeon chips have support for lockless CIU operation. |
750 | */ | 750 | */ |
751 | static struct irq_chip octeon_irq_chip_ciu_v2 = { | 751 | static struct irq_chip octeon_irq_chip_ciu_v2 = { |
752 | .name = "CIU", | 752 | .name = "CIU", |
753 | .irq_enable = octeon_irq_ciu_enable_v2, | 753 | .irq_enable = octeon_irq_ciu_enable_v2, |
754 | .irq_disable = octeon_irq_ciu_disable_all_v2, | 754 | .irq_disable = octeon_irq_ciu_disable_all_v2, |
755 | .irq_ack = octeon_irq_ciu_ack, | 755 | .irq_ack = octeon_irq_ciu_ack, |
756 | .irq_mask = octeon_irq_ciu_disable_local_v2, | 756 | .irq_mask = octeon_irq_ciu_disable_local_v2, |
757 | .irq_unmask = octeon_irq_ciu_enable_v2, | 757 | .irq_unmask = octeon_irq_ciu_enable_v2, |
758 | #ifdef CONFIG_SMP | 758 | #ifdef CONFIG_SMP |
759 | .irq_set_affinity = octeon_irq_ciu_set_affinity_v2, | 759 | .irq_set_affinity = octeon_irq_ciu_set_affinity_v2, |
760 | .irq_cpu_offline = octeon_irq_cpu_offline_ciu, | 760 | .irq_cpu_offline = octeon_irq_cpu_offline_ciu, |
761 | #endif | 761 | #endif |
762 | }; | 762 | }; |
763 | 763 | ||
764 | static struct irq_chip octeon_irq_chip_ciu = { | 764 | static struct irq_chip octeon_irq_chip_ciu = { |
765 | .name = "CIU", | 765 | .name = "CIU", |
766 | .irq_enable = octeon_irq_ciu_enable, | 766 | .irq_enable = octeon_irq_ciu_enable, |
767 | .irq_disable = octeon_irq_ciu_disable_all, | 767 | .irq_disable = octeon_irq_ciu_disable_all, |
768 | .irq_ack = octeon_irq_ciu_ack, | 768 | .irq_ack = octeon_irq_ciu_ack, |
769 | .irq_mask = octeon_irq_ciu_disable_local, | 769 | .irq_mask = octeon_irq_ciu_disable_local, |
770 | .irq_unmask = octeon_irq_ciu_enable, | 770 | .irq_unmask = octeon_irq_ciu_enable, |
771 | #ifdef CONFIG_SMP | 771 | #ifdef CONFIG_SMP |
772 | .irq_set_affinity = octeon_irq_ciu_set_affinity, | 772 | .irq_set_affinity = octeon_irq_ciu_set_affinity, |
773 | .irq_cpu_offline = octeon_irq_cpu_offline_ciu, | 773 | .irq_cpu_offline = octeon_irq_cpu_offline_ciu, |
774 | #endif | 774 | #endif |
775 | }; | 775 | }; |
776 | 776 | ||
777 | /* The mbox versions don't do any affinity or round-robin. */ | 777 | /* The mbox versions don't do any affinity or round-robin. */ |
778 | static struct irq_chip octeon_irq_chip_ciu_mbox_v2 = { | 778 | static struct irq_chip octeon_irq_chip_ciu_mbox_v2 = { |
779 | .name = "CIU-M", | 779 | .name = "CIU-M", |
780 | .irq_enable = octeon_irq_ciu_enable_all_v2, | 780 | .irq_enable = octeon_irq_ciu_enable_all_v2, |
781 | .irq_disable = octeon_irq_ciu_disable_all_v2, | 781 | .irq_disable = octeon_irq_ciu_disable_all_v2, |
782 | .irq_ack = octeon_irq_ciu_disable_local_v2, | 782 | .irq_ack = octeon_irq_ciu_disable_local_v2, |
783 | .irq_eoi = octeon_irq_ciu_enable_local_v2, | 783 | .irq_eoi = octeon_irq_ciu_enable_local_v2, |
784 | 784 | ||
785 | .irq_cpu_online = octeon_irq_ciu_enable_local_v2, | 785 | .irq_cpu_online = octeon_irq_ciu_enable_local_v2, |
786 | .irq_cpu_offline = octeon_irq_ciu_disable_local_v2, | 786 | .irq_cpu_offline = octeon_irq_ciu_disable_local_v2, |
787 | .flags = IRQCHIP_ONOFFLINE_ENABLED, | 787 | .flags = IRQCHIP_ONOFFLINE_ENABLED, |
788 | }; | 788 | }; |
789 | 789 | ||
790 | static struct irq_chip octeon_irq_chip_ciu_mbox = { | 790 | static struct irq_chip octeon_irq_chip_ciu_mbox = { |
791 | .name = "CIU-M", | 791 | .name = "CIU-M", |
792 | .irq_enable = octeon_irq_ciu_enable_all, | 792 | .irq_enable = octeon_irq_ciu_enable_all, |
793 | .irq_disable = octeon_irq_ciu_disable_all, | 793 | .irq_disable = octeon_irq_ciu_disable_all, |
794 | .irq_ack = octeon_irq_ciu_disable_local, | 794 | .irq_ack = octeon_irq_ciu_disable_local, |
795 | .irq_eoi = octeon_irq_ciu_enable_local, | 795 | .irq_eoi = octeon_irq_ciu_enable_local, |
796 | 796 | ||
797 | .irq_cpu_online = octeon_irq_ciu_enable_local, | 797 | .irq_cpu_online = octeon_irq_ciu_enable_local, |
798 | .irq_cpu_offline = octeon_irq_ciu_disable_local, | 798 | .irq_cpu_offline = octeon_irq_ciu_disable_local, |
799 | .flags = IRQCHIP_ONOFFLINE_ENABLED, | 799 | .flags = IRQCHIP_ONOFFLINE_ENABLED, |
800 | }; | 800 | }; |
801 | 801 | ||
802 | static struct irq_chip octeon_irq_chip_ciu_gpio_v2 = { | 802 | static struct irq_chip octeon_irq_chip_ciu_gpio_v2 = { |
803 | .name = "CIU-GPIO", | 803 | .name = "CIU-GPIO", |
804 | .irq_enable = octeon_irq_ciu_enable_gpio_v2, | 804 | .irq_enable = octeon_irq_ciu_enable_gpio_v2, |
805 | .irq_disable = octeon_irq_ciu_disable_gpio_v2, | 805 | .irq_disable = octeon_irq_ciu_disable_gpio_v2, |
806 | .irq_ack = octeon_irq_ciu_gpio_ack, | 806 | .irq_ack = octeon_irq_ciu_gpio_ack, |
807 | .irq_mask = octeon_irq_ciu_disable_local_v2, | 807 | .irq_mask = octeon_irq_ciu_disable_local_v2, |
808 | .irq_unmask = octeon_irq_ciu_enable_v2, | 808 | .irq_unmask = octeon_irq_ciu_enable_v2, |
809 | .irq_set_type = octeon_irq_ciu_gpio_set_type, | 809 | .irq_set_type = octeon_irq_ciu_gpio_set_type, |
810 | #ifdef CONFIG_SMP | 810 | #ifdef CONFIG_SMP |
811 | .irq_set_affinity = octeon_irq_ciu_set_affinity_v2, | 811 | .irq_set_affinity = octeon_irq_ciu_set_affinity_v2, |
812 | #endif | 812 | #endif |
813 | .flags = IRQCHIP_SET_TYPE_MASKED, | 813 | .flags = IRQCHIP_SET_TYPE_MASKED, |
814 | }; | 814 | }; |
815 | 815 | ||
816 | static struct irq_chip octeon_irq_chip_ciu_gpio = { | 816 | static struct irq_chip octeon_irq_chip_ciu_gpio = { |
817 | .name = "CIU-GPIO", | 817 | .name = "CIU-GPIO", |
818 | .irq_enable = octeon_irq_ciu_enable_gpio, | 818 | .irq_enable = octeon_irq_ciu_enable_gpio, |
819 | .irq_disable = octeon_irq_ciu_disable_gpio, | 819 | .irq_disable = octeon_irq_ciu_disable_gpio, |
820 | .irq_mask = octeon_irq_ciu_disable_local, | 820 | .irq_mask = octeon_irq_ciu_disable_local, |
821 | .irq_unmask = octeon_irq_ciu_enable, | 821 | .irq_unmask = octeon_irq_ciu_enable, |
822 | .irq_ack = octeon_irq_ciu_gpio_ack, | 822 | .irq_ack = octeon_irq_ciu_gpio_ack, |
823 | .irq_set_type = octeon_irq_ciu_gpio_set_type, | 823 | .irq_set_type = octeon_irq_ciu_gpio_set_type, |
824 | #ifdef CONFIG_SMP | 824 | #ifdef CONFIG_SMP |
825 | .irq_set_affinity = octeon_irq_ciu_set_affinity, | 825 | .irq_set_affinity = octeon_irq_ciu_set_affinity, |
826 | #endif | 826 | #endif |
827 | .flags = IRQCHIP_SET_TYPE_MASKED, | 827 | .flags = IRQCHIP_SET_TYPE_MASKED, |
828 | }; | 828 | }; |
829 | 829 | ||
830 | /* | 830 | /* |
831 | * Watchdog interrupts are special. They are associated with a single | 831 | * Watchdog interrupts are special. They are associated with a single |
832 | * core, so we hardwire the affinity to that core. | 832 | * core, so we hardwire the affinity to that core. |
833 | */ | 833 | */ |
834 | static void octeon_irq_ciu_wd_enable(struct irq_data *data) | 834 | static void octeon_irq_ciu_wd_enable(struct irq_data *data) |
835 | { | 835 | { |
836 | unsigned long flags; | 836 | unsigned long flags; |
837 | unsigned long *pen; | 837 | unsigned long *pen; |
838 | int coreid = data->irq - OCTEON_IRQ_WDOG0; /* Bit 0-63 of EN1 */ | 838 | int coreid = data->irq - OCTEON_IRQ_WDOG0; /* Bit 0-63 of EN1 */ |
839 | int cpu = octeon_cpu_for_coreid(coreid); | 839 | int cpu = octeon_cpu_for_coreid(coreid); |
840 | raw_spinlock_t *lock = &per_cpu(octeon_irq_ciu_spinlock, cpu); | 840 | raw_spinlock_t *lock = &per_cpu(octeon_irq_ciu_spinlock, cpu); |
841 | 841 | ||
842 | raw_spin_lock_irqsave(lock, flags); | 842 | raw_spin_lock_irqsave(lock, flags); |
843 | pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu); | 843 | pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu); |
844 | __set_bit(coreid, pen); | 844 | __set_bit(coreid, pen); |
845 | /* | 845 | /* |
846 | * Must be visible to octeon_irq_ip{2,3}_ciu() before enabling | 846 | * Must be visible to octeon_irq_ip{2,3}_ciu() before enabling |
847 | * the irq. | 847 | * the irq. |
848 | */ | 848 | */ |
849 | wmb(); | 849 | wmb(); |
850 | cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), *pen); | 850 | cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), *pen); |
851 | raw_spin_unlock_irqrestore(lock, flags); | 851 | raw_spin_unlock_irqrestore(lock, flags); |
852 | } | 852 | } |
853 | 853 | ||
854 | /* | 854 | /* |
855 | * Watchdog interrupts are special. They are associated with a single | 855 | * Watchdog interrupts are special. They are associated with a single |
856 | * core, so we hardwire the affinity to that core. | 856 | * core, so we hardwire the affinity to that core. |
857 | */ | 857 | */ |
858 | static void octeon_irq_ciu1_wd_enable_v2(struct irq_data *data) | 858 | static void octeon_irq_ciu1_wd_enable_v2(struct irq_data *data) |
859 | { | 859 | { |
860 | int coreid = data->irq - OCTEON_IRQ_WDOG0; | 860 | int coreid = data->irq - OCTEON_IRQ_WDOG0; |
861 | int cpu = octeon_cpu_for_coreid(coreid); | 861 | int cpu = octeon_cpu_for_coreid(coreid); |
862 | 862 | ||
863 | set_bit(coreid, &per_cpu(octeon_irq_ciu1_en_mirror, cpu)); | 863 | set_bit(coreid, &per_cpu(octeon_irq_ciu1_en_mirror, cpu)); |
864 | cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(coreid * 2 + 1), 1ull << coreid); | 864 | cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(coreid * 2 + 1), 1ull << coreid); |
865 | } | 865 | } |
866 | 866 | ||
867 | 867 | ||
868 | static struct irq_chip octeon_irq_chip_ciu_wd_v2 = { | 868 | static struct irq_chip octeon_irq_chip_ciu_wd_v2 = { |
869 | .name = "CIU-W", | 869 | .name = "CIU-W", |
870 | .irq_enable = octeon_irq_ciu1_wd_enable_v2, | 870 | .irq_enable = octeon_irq_ciu1_wd_enable_v2, |
871 | .irq_disable = octeon_irq_ciu_disable_all_v2, | 871 | .irq_disable = octeon_irq_ciu_disable_all_v2, |
872 | .irq_mask = octeon_irq_ciu_disable_local_v2, | 872 | .irq_mask = octeon_irq_ciu_disable_local_v2, |
873 | .irq_unmask = octeon_irq_ciu_enable_local_v2, | 873 | .irq_unmask = octeon_irq_ciu_enable_local_v2, |
874 | }; | 874 | }; |
875 | 875 | ||
876 | static struct irq_chip octeon_irq_chip_ciu_wd = { | 876 | static struct irq_chip octeon_irq_chip_ciu_wd = { |
877 | .name = "CIU-W", | 877 | .name = "CIU-W", |
878 | .irq_enable = octeon_irq_ciu_wd_enable, | 878 | .irq_enable = octeon_irq_ciu_wd_enable, |
879 | .irq_disable = octeon_irq_ciu_disable_all, | 879 | .irq_disable = octeon_irq_ciu_disable_all, |
880 | .irq_mask = octeon_irq_ciu_disable_local, | 880 | .irq_mask = octeon_irq_ciu_disable_local, |
881 | .irq_unmask = octeon_irq_ciu_enable_local, | 881 | .irq_unmask = octeon_irq_ciu_enable_local, |
882 | }; | 882 | }; |
883 | 883 | ||
884 | static bool octeon_irq_ciu_is_edge(unsigned int line, unsigned int bit) | 884 | static bool octeon_irq_ciu_is_edge(unsigned int line, unsigned int bit) |
885 | { | 885 | { |
886 | bool edge = false; | 886 | bool edge = false; |
887 | 887 | ||
888 | if (line == 0) | 888 | if (line == 0) |
889 | switch (bit) { | 889 | switch (bit) { |
890 | case 48 ... 49: /* GMX DRP */ | 890 | case 48 ... 49: /* GMX DRP */ |
891 | case 50: /* IPD_DRP */ | 891 | case 50: /* IPD_DRP */ |
892 | case 52 ... 55: /* Timers */ | 892 | case 52 ... 55: /* Timers */ |
893 | case 58: /* MPI */ | 893 | case 58: /* MPI */ |
894 | edge = true; | 894 | edge = true; |
895 | break; | 895 | break; |
896 | default: | 896 | default: |
897 | break; | 897 | break; |
898 | } | 898 | } |
899 | else /* line == 1 */ | 899 | else /* line == 1 */ |
900 | switch (bit) { | 900 | switch (bit) { |
901 | case 47: /* PTP */ | 901 | case 47: /* PTP */ |
902 | edge = true; | 902 | edge = true; |
903 | break; | 903 | break; |
904 | default: | 904 | default: |
905 | break; | 905 | break; |
906 | } | 906 | } |
907 | return edge; | 907 | return edge; |
908 | } | 908 | } |
909 | 909 | ||
910 | struct octeon_irq_gpio_domain_data { | 910 | struct octeon_irq_gpio_domain_data { |
911 | unsigned int base_hwirq; | 911 | unsigned int base_hwirq; |
912 | }; | 912 | }; |
913 | 913 | ||
914 | static int octeon_irq_gpio_xlat(struct irq_domain *d, | 914 | static int octeon_irq_gpio_xlat(struct irq_domain *d, |
915 | struct device_node *node, | 915 | struct device_node *node, |
916 | const u32 *intspec, | 916 | const u32 *intspec, |
917 | unsigned int intsize, | 917 | unsigned int intsize, |
918 | unsigned long *out_hwirq, | 918 | unsigned long *out_hwirq, |
919 | unsigned int *out_type) | 919 | unsigned int *out_type) |
920 | { | 920 | { |
921 | unsigned int type; | 921 | unsigned int type; |
922 | unsigned int pin; | 922 | unsigned int pin; |
923 | unsigned int trigger; | 923 | unsigned int trigger; |
924 | 924 | ||
925 | if (d->of_node != node) | 925 | if (d->of_node != node) |
926 | return -EINVAL; | 926 | return -EINVAL; |
927 | 927 | ||
928 | if (intsize < 2) | 928 | if (intsize < 2) |
929 | return -EINVAL; | 929 | return -EINVAL; |
930 | 930 | ||
931 | pin = intspec[0]; | 931 | pin = intspec[0]; |
932 | if (pin >= 16) | 932 | if (pin >= 16) |
933 | return -EINVAL; | 933 | return -EINVAL; |
934 | 934 | ||
935 | trigger = intspec[1]; | 935 | trigger = intspec[1]; |
936 | 936 | ||
937 | switch (trigger) { | 937 | switch (trigger) { |
938 | case 1: | 938 | case 1: |
939 | type = IRQ_TYPE_EDGE_RISING; | 939 | type = IRQ_TYPE_EDGE_RISING; |
940 | break; | 940 | break; |
941 | case 2: | 941 | case 2: |
942 | type = IRQ_TYPE_EDGE_FALLING; | 942 | type = IRQ_TYPE_EDGE_FALLING; |
943 | break; | 943 | break; |
944 | case 4: | 944 | case 4: |
945 | type = IRQ_TYPE_LEVEL_HIGH; | 945 | type = IRQ_TYPE_LEVEL_HIGH; |
946 | break; | 946 | break; |
947 | case 8: | 947 | case 8: |
948 | type = IRQ_TYPE_LEVEL_LOW; | 948 | type = IRQ_TYPE_LEVEL_LOW; |
949 | break; | 949 | break; |
950 | default: | 950 | default: |
951 | pr_err("Error: (%s) Invalid irq trigger specification: %x\n", | 951 | pr_err("Error: (%s) Invalid irq trigger specification: %x\n", |
952 | node->name, | 952 | node->name, |
953 | trigger); | 953 | trigger); |
954 | type = IRQ_TYPE_LEVEL_LOW; | 954 | type = IRQ_TYPE_LEVEL_LOW; |
955 | break; | 955 | break; |
956 | } | 956 | } |
957 | *out_type = type; | 957 | *out_type = type; |
958 | *out_hwirq = pin; | 958 | *out_hwirq = pin; |
959 | 959 | ||
960 | return 0; | 960 | return 0; |
961 | } | 961 | } |
962 | 962 | ||
963 | static int octeon_irq_ciu_xlat(struct irq_domain *d, | 963 | static int octeon_irq_ciu_xlat(struct irq_domain *d, |
964 | struct device_node *node, | 964 | struct device_node *node, |
965 | const u32 *intspec, | 965 | const u32 *intspec, |
966 | unsigned int intsize, | 966 | unsigned int intsize, |
967 | unsigned long *out_hwirq, | 967 | unsigned long *out_hwirq, |
968 | unsigned int *out_type) | 968 | unsigned int *out_type) |
969 | { | 969 | { |
970 | unsigned int ciu, bit; | 970 | unsigned int ciu, bit; |
971 | 971 | ||
972 | ciu = intspec[0]; | 972 | ciu = intspec[0]; |
973 | bit = intspec[1]; | 973 | bit = intspec[1]; |
974 | 974 | ||
975 | if (ciu > 1 || bit > 63) | 975 | if (ciu > 1 || bit > 63) |
976 | return -EINVAL; | 976 | return -EINVAL; |
977 | 977 | ||
978 | *out_hwirq = (ciu << 6) | bit; | 978 | *out_hwirq = (ciu << 6) | bit; |
979 | *out_type = 0; | 979 | *out_type = 0; |
980 | 980 | ||
981 | return 0; | 981 | return 0; |
982 | } | 982 | } |
983 | 983 | ||
984 | static struct irq_chip *octeon_irq_ciu_chip; | 984 | static struct irq_chip *octeon_irq_ciu_chip; |
985 | static struct irq_chip *octeon_irq_gpio_chip; | 985 | static struct irq_chip *octeon_irq_gpio_chip; |
986 | 986 | ||
987 | static bool octeon_irq_virq_in_range(unsigned int virq) | 987 | static bool octeon_irq_virq_in_range(unsigned int virq) |
988 | { | 988 | { |
989 | /* We cannot let it overflow the mapping array. */ | 989 | /* We cannot let it overflow the mapping array. */ |
990 | if (virq < (1ul << 8 * sizeof(octeon_irq_ciu_to_irq[0][0]))) | 990 | if (virq < (1ul << 8 * sizeof(octeon_irq_ciu_to_irq[0][0]))) |
991 | return true; | 991 | return true; |
992 | 992 | ||
993 | WARN_ONCE(true, "virq out of range %u.\n", virq); | 993 | WARN_ONCE(true, "virq out of range %u.\n", virq); |
994 | return false; | 994 | return false; |
995 | } | 995 | } |
996 | 996 | ||
997 | static int octeon_irq_ciu_map(struct irq_domain *d, | 997 | static int octeon_irq_ciu_map(struct irq_domain *d, |
998 | unsigned int virq, irq_hw_number_t hw) | 998 | unsigned int virq, irq_hw_number_t hw) |
999 | { | 999 | { |
1000 | unsigned int line = hw >> 6; | 1000 | unsigned int line = hw >> 6; |
1001 | unsigned int bit = hw & 63; | 1001 | unsigned int bit = hw & 63; |
1002 | 1002 | ||
1003 | if (!octeon_irq_virq_in_range(virq)) | 1003 | if (!octeon_irq_virq_in_range(virq)) |
1004 | return -EINVAL; | 1004 | return -EINVAL; |
1005 | 1005 | ||
1006 | /* Don't map irq if it is reserved for GPIO. */ | 1006 | /* Don't map irq if it is reserved for GPIO. */ |
1007 | if (line == 0 && bit >= 16 && bit <32) | 1007 | if (line == 0 && bit >= 16 && bit <32) |
1008 | return 0; | 1008 | return 0; |
1009 | 1009 | ||
1010 | if (line > 1 || octeon_irq_ciu_to_irq[line][bit] != 0) | 1010 | if (line > 1 || octeon_irq_ciu_to_irq[line][bit] != 0) |
1011 | return -EINVAL; | 1011 | return -EINVAL; |
1012 | 1012 | ||
1013 | if (octeon_irq_ciu_is_edge(line, bit)) | 1013 | if (octeon_irq_ciu_is_edge(line, bit)) |
1014 | octeon_irq_set_ciu_mapping(virq, line, bit, 0, | 1014 | octeon_irq_set_ciu_mapping(virq, line, bit, 0, |
1015 | octeon_irq_ciu_chip, | 1015 | octeon_irq_ciu_chip, |
1016 | handle_edge_irq); | 1016 | handle_edge_irq); |
1017 | else | 1017 | else |
1018 | octeon_irq_set_ciu_mapping(virq, line, bit, 0, | 1018 | octeon_irq_set_ciu_mapping(virq, line, bit, 0, |
1019 | octeon_irq_ciu_chip, | 1019 | octeon_irq_ciu_chip, |
1020 | handle_level_irq); | 1020 | handle_level_irq); |
1021 | 1021 | ||
1022 | return 0; | 1022 | return 0; |
1023 | } | 1023 | } |
1024 | 1024 | ||
1025 | static int octeon_irq_gpio_map_common(struct irq_domain *d, | 1025 | static int octeon_irq_gpio_map_common(struct irq_domain *d, |
1026 | unsigned int virq, irq_hw_number_t hw, | 1026 | unsigned int virq, irq_hw_number_t hw, |
1027 | int line_limit, struct irq_chip *chip) | 1027 | int line_limit, struct irq_chip *chip) |
1028 | { | 1028 | { |
1029 | struct octeon_irq_gpio_domain_data *gpiod = d->host_data; | 1029 | struct octeon_irq_gpio_domain_data *gpiod = d->host_data; |
1030 | unsigned int line, bit; | 1030 | unsigned int line, bit; |
1031 | 1031 | ||
1032 | if (!octeon_irq_virq_in_range(virq)) | 1032 | if (!octeon_irq_virq_in_range(virq)) |
1033 | return -EINVAL; | 1033 | return -EINVAL; |
1034 | 1034 | ||
1035 | line = (hw + gpiod->base_hwirq) >> 6; | 1035 | line = (hw + gpiod->base_hwirq) >> 6; |
1036 | bit = (hw + gpiod->base_hwirq) & 63; | 1036 | bit = (hw + gpiod->base_hwirq) & 63; |
1037 | if (line > line_limit || octeon_irq_ciu_to_irq[line][bit] != 0) | 1037 | if (line > line_limit || octeon_irq_ciu_to_irq[line][bit] != 0) |
1038 | return -EINVAL; | 1038 | return -EINVAL; |
1039 | 1039 | ||
1040 | octeon_irq_set_ciu_mapping(virq, line, bit, hw, | 1040 | octeon_irq_set_ciu_mapping(virq, line, bit, hw, |
1041 | chip, octeon_irq_handle_gpio); | 1041 | chip, octeon_irq_handle_gpio); |
1042 | return 0; | 1042 | return 0; |
1043 | } | 1043 | } |
1044 | 1044 | ||
1045 | static int octeon_irq_gpio_map(struct irq_domain *d, | 1045 | static int octeon_irq_gpio_map(struct irq_domain *d, |
1046 | unsigned int virq, irq_hw_number_t hw) | 1046 | unsigned int virq, irq_hw_number_t hw) |
1047 | { | 1047 | { |
1048 | return octeon_irq_gpio_map_common(d, virq, hw, 1, octeon_irq_gpio_chip); | 1048 | return octeon_irq_gpio_map_common(d, virq, hw, 1, octeon_irq_gpio_chip); |
1049 | } | 1049 | } |
1050 | 1050 | ||
1051 | static struct irq_domain_ops octeon_irq_domain_ciu_ops = { | 1051 | static struct irq_domain_ops octeon_irq_domain_ciu_ops = { |
1052 | .map = octeon_irq_ciu_map, | 1052 | .map = octeon_irq_ciu_map, |
1053 | .xlate = octeon_irq_ciu_xlat, | 1053 | .xlate = octeon_irq_ciu_xlat, |
1054 | }; | 1054 | }; |
1055 | 1055 | ||
1056 | static struct irq_domain_ops octeon_irq_domain_gpio_ops = { | 1056 | static struct irq_domain_ops octeon_irq_domain_gpio_ops = { |
1057 | .map = octeon_irq_gpio_map, | 1057 | .map = octeon_irq_gpio_map, |
1058 | .xlate = octeon_irq_gpio_xlat, | 1058 | .xlate = octeon_irq_gpio_xlat, |
1059 | }; | 1059 | }; |
1060 | 1060 | ||
1061 | static void octeon_irq_ip2_ciu(void) | 1061 | static void octeon_irq_ip2_ciu(void) |
1062 | { | 1062 | { |
1063 | const unsigned long core_id = cvmx_get_core_num(); | 1063 | const unsigned long core_id = cvmx_get_core_num(); |
1064 | u64 ciu_sum = cvmx_read_csr(CVMX_CIU_INTX_SUM0(core_id * 2)); | 1064 | u64 ciu_sum = cvmx_read_csr(CVMX_CIU_INTX_SUM0(core_id * 2)); |
1065 | 1065 | ||
1066 | ciu_sum &= __get_cpu_var(octeon_irq_ciu0_en_mirror); | 1066 | ciu_sum &= __get_cpu_var(octeon_irq_ciu0_en_mirror); |
1067 | if (likely(ciu_sum)) { | 1067 | if (likely(ciu_sum)) { |
1068 | int bit = fls64(ciu_sum) - 1; | 1068 | int bit = fls64(ciu_sum) - 1; |
1069 | int irq = octeon_irq_ciu_to_irq[0][bit]; | 1069 | int irq = octeon_irq_ciu_to_irq[0][bit]; |
1070 | if (likely(irq)) | 1070 | if (likely(irq)) |
1071 | do_IRQ(irq); | 1071 | do_IRQ(irq); |
1072 | else | 1072 | else |
1073 | spurious_interrupt(); | 1073 | spurious_interrupt(); |
1074 | } else { | 1074 | } else { |
1075 | spurious_interrupt(); | 1075 | spurious_interrupt(); |
1076 | } | 1076 | } |
1077 | } | 1077 | } |
1078 | 1078 | ||
1079 | static void octeon_irq_ip3_ciu(void) | 1079 | static void octeon_irq_ip3_ciu(void) |
1080 | { | 1080 | { |
1081 | u64 ciu_sum = cvmx_read_csr(CVMX_CIU_INT_SUM1); | 1081 | u64 ciu_sum = cvmx_read_csr(CVMX_CIU_INT_SUM1); |
1082 | 1082 | ||
1083 | ciu_sum &= __get_cpu_var(octeon_irq_ciu1_en_mirror); | 1083 | ciu_sum &= __get_cpu_var(octeon_irq_ciu1_en_mirror); |
1084 | if (likely(ciu_sum)) { | 1084 | if (likely(ciu_sum)) { |
1085 | int bit = fls64(ciu_sum) - 1; | 1085 | int bit = fls64(ciu_sum) - 1; |
1086 | int irq = octeon_irq_ciu_to_irq[1][bit]; | 1086 | int irq = octeon_irq_ciu_to_irq[1][bit]; |
1087 | if (likely(irq)) | 1087 | if (likely(irq)) |
1088 | do_IRQ(irq); | 1088 | do_IRQ(irq); |
1089 | else | 1089 | else |
1090 | spurious_interrupt(); | 1090 | spurious_interrupt(); |
1091 | } else { | 1091 | } else { |
1092 | spurious_interrupt(); | 1092 | spurious_interrupt(); |
1093 | } | 1093 | } |
1094 | } | 1094 | } |
1095 | 1095 | ||
1096 | static bool octeon_irq_use_ip4; | 1096 | static bool octeon_irq_use_ip4; |
1097 | 1097 | ||
1098 | static void octeon_irq_local_enable_ip4(void *arg) | 1098 | static void octeon_irq_local_enable_ip4(void *arg) |
1099 | { | 1099 | { |
1100 | set_c0_status(STATUSF_IP4); | 1100 | set_c0_status(STATUSF_IP4); |
1101 | } | 1101 | } |
1102 | 1102 | ||
1103 | static void octeon_irq_ip4_mask(void) | 1103 | static void octeon_irq_ip4_mask(void) |
1104 | { | 1104 | { |
1105 | clear_c0_status(STATUSF_IP4); | 1105 | clear_c0_status(STATUSF_IP4); |
1106 | spurious_interrupt(); | 1106 | spurious_interrupt(); |
1107 | } | 1107 | } |
1108 | 1108 | ||
1109 | static void (*octeon_irq_ip2)(void); | 1109 | static void (*octeon_irq_ip2)(void); |
1110 | static void (*octeon_irq_ip3)(void); | 1110 | static void (*octeon_irq_ip3)(void); |
1111 | static void (*octeon_irq_ip4)(void); | 1111 | static void (*octeon_irq_ip4)(void); |
1112 | 1112 | ||
1113 | void (*octeon_irq_setup_secondary)(void); | 1113 | void (*octeon_irq_setup_secondary)(void); |
1114 | 1114 | ||
1115 | void octeon_irq_set_ip4_handler(octeon_irq_ip4_handler_t h) | 1115 | void octeon_irq_set_ip4_handler(octeon_irq_ip4_handler_t h) |
1116 | { | 1116 | { |
1117 | octeon_irq_ip4 = h; | 1117 | octeon_irq_ip4 = h; |
1118 | octeon_irq_use_ip4 = true; | 1118 | octeon_irq_use_ip4 = true; |
1119 | on_each_cpu(octeon_irq_local_enable_ip4, NULL, 1); | 1119 | on_each_cpu(octeon_irq_local_enable_ip4, NULL, 1); |
1120 | } | 1120 | } |
1121 | 1121 | ||
1122 | static void octeon_irq_percpu_enable(void) | 1122 | static void octeon_irq_percpu_enable(void) |
1123 | { | 1123 | { |
1124 | irq_cpu_online(); | 1124 | irq_cpu_online(); |
1125 | } | 1125 | } |
1126 | 1126 | ||
1127 | static void octeon_irq_init_ciu_percpu(void) | 1127 | static void octeon_irq_init_ciu_percpu(void) |
1128 | { | 1128 | { |
1129 | int coreid = cvmx_get_core_num(); | 1129 | int coreid = cvmx_get_core_num(); |
1130 | 1130 | ||
1131 | 1131 | ||
1132 | __get_cpu_var(octeon_irq_ciu0_en_mirror) = 0; | 1132 | __get_cpu_var(octeon_irq_ciu0_en_mirror) = 0; |
1133 | __get_cpu_var(octeon_irq_ciu1_en_mirror) = 0; | 1133 | __get_cpu_var(octeon_irq_ciu1_en_mirror) = 0; |
1134 | wmb(); | 1134 | wmb(); |
1135 | raw_spin_lock_init(&__get_cpu_var(octeon_irq_ciu_spinlock)); | 1135 | raw_spin_lock_init(&__get_cpu_var(octeon_irq_ciu_spinlock)); |
1136 | /* | 1136 | /* |
1137 | * Disable All CIU Interrupts. The ones we need will be | 1137 | * Disable All CIU Interrupts. The ones we need will be |
1138 | * enabled later. Read the SUM register so we know the write | 1138 | * enabled later. Read the SUM register so we know the write |
1139 | * completed. | 1139 | * completed. |
1140 | */ | 1140 | */ |
1141 | cvmx_write_csr(CVMX_CIU_INTX_EN0((coreid * 2)), 0); | 1141 | cvmx_write_csr(CVMX_CIU_INTX_EN0((coreid * 2)), 0); |
1142 | cvmx_write_csr(CVMX_CIU_INTX_EN0((coreid * 2 + 1)), 0); | 1142 | cvmx_write_csr(CVMX_CIU_INTX_EN0((coreid * 2 + 1)), 0); |
1143 | cvmx_write_csr(CVMX_CIU_INTX_EN1((coreid * 2)), 0); | 1143 | cvmx_write_csr(CVMX_CIU_INTX_EN1((coreid * 2)), 0); |
1144 | cvmx_write_csr(CVMX_CIU_INTX_EN1((coreid * 2 + 1)), 0); | 1144 | cvmx_write_csr(CVMX_CIU_INTX_EN1((coreid * 2 + 1)), 0); |
1145 | cvmx_read_csr(CVMX_CIU_INTX_SUM0((coreid * 2))); | 1145 | cvmx_read_csr(CVMX_CIU_INTX_SUM0((coreid * 2))); |
1146 | } | 1146 | } |
1147 | 1147 | ||
1148 | static void octeon_irq_init_ciu2_percpu(void) | 1148 | static void octeon_irq_init_ciu2_percpu(void) |
1149 | { | 1149 | { |
1150 | u64 regx, ipx; | 1150 | u64 regx, ipx; |
1151 | int coreid = cvmx_get_core_num(); | 1151 | int coreid = cvmx_get_core_num(); |
1152 | u64 base = CVMX_CIU2_EN_PPX_IP2_WRKQ(coreid); | 1152 | u64 base = CVMX_CIU2_EN_PPX_IP2_WRKQ(coreid); |
1153 | 1153 | ||
1154 | /* | 1154 | /* |
1155 | * Disable All CIU2 Interrupts. The ones we need will be | 1155 | * Disable All CIU2 Interrupts. The ones we need will be |
1156 | * enabled later. Read the SUM register so we know the write | 1156 | * enabled later. Read the SUM register so we know the write |
1157 | * completed. | 1157 | * completed. |
1158 | * | 1158 | * |
1159 | * There are 9 registers and 3 IPX levels with strides 0x1000 | 1159 | * There are 9 registers and 3 IPX levels with strides 0x1000 |
1160 | * and 0x200 respectivly. Use loops to clear them. | 1160 | * and 0x200 respectivly. Use loops to clear them. |
1161 | */ | 1161 | */ |
1162 | for (regx = 0; regx <= 0x8000; regx += 0x1000) { | 1162 | for (regx = 0; regx <= 0x8000; regx += 0x1000) { |
1163 | for (ipx = 0; ipx <= 0x400; ipx += 0x200) | 1163 | for (ipx = 0; ipx <= 0x400; ipx += 0x200) |
1164 | cvmx_write_csr(base + regx + ipx, 0); | 1164 | cvmx_write_csr(base + regx + ipx, 0); |
1165 | } | 1165 | } |
1166 | 1166 | ||
1167 | cvmx_read_csr(CVMX_CIU2_SUM_PPX_IP2(coreid)); | 1167 | cvmx_read_csr(CVMX_CIU2_SUM_PPX_IP2(coreid)); |
1168 | } | 1168 | } |
1169 | 1169 | ||
1170 | static void octeon_irq_setup_secondary_ciu(void) | 1170 | static void octeon_irq_setup_secondary_ciu(void) |
1171 | { | 1171 | { |
1172 | octeon_irq_init_ciu_percpu(); | 1172 | octeon_irq_init_ciu_percpu(); |
1173 | octeon_irq_percpu_enable(); | 1173 | octeon_irq_percpu_enable(); |
1174 | 1174 | ||
1175 | /* Enable the CIU lines */ | 1175 | /* Enable the CIU lines */ |
1176 | set_c0_status(STATUSF_IP3 | STATUSF_IP2); | 1176 | set_c0_status(STATUSF_IP3 | STATUSF_IP2); |
1177 | clear_c0_status(STATUSF_IP4); | 1177 | clear_c0_status(STATUSF_IP4); |
1178 | } | 1178 | } |
1179 | 1179 | ||
1180 | static void octeon_irq_setup_secondary_ciu2(void) | 1180 | static void octeon_irq_setup_secondary_ciu2(void) |
1181 | { | 1181 | { |
1182 | octeon_irq_init_ciu2_percpu(); | 1182 | octeon_irq_init_ciu2_percpu(); |
1183 | octeon_irq_percpu_enable(); | 1183 | octeon_irq_percpu_enable(); |
1184 | 1184 | ||
1185 | /* Enable the CIU lines */ | 1185 | /* Enable the CIU lines */ |
1186 | set_c0_status(STATUSF_IP3 | STATUSF_IP2); | 1186 | set_c0_status(STATUSF_IP3 | STATUSF_IP2); |
1187 | if (octeon_irq_use_ip4) | 1187 | if (octeon_irq_use_ip4) |
1188 | set_c0_status(STATUSF_IP4); | 1188 | set_c0_status(STATUSF_IP4); |
1189 | else | 1189 | else |
1190 | clear_c0_status(STATUSF_IP4); | 1190 | clear_c0_status(STATUSF_IP4); |
1191 | } | 1191 | } |
1192 | 1192 | ||
1193 | static void __init octeon_irq_init_ciu(void) | 1193 | static void __init octeon_irq_init_ciu(void) |
1194 | { | 1194 | { |
1195 | unsigned int i; | 1195 | unsigned int i; |
1196 | struct irq_chip *chip; | 1196 | struct irq_chip *chip; |
1197 | struct irq_chip *chip_mbox; | 1197 | struct irq_chip *chip_mbox; |
1198 | struct irq_chip *chip_wd; | 1198 | struct irq_chip *chip_wd; |
1199 | struct device_node *gpio_node; | 1199 | struct device_node *gpio_node; |
1200 | struct device_node *ciu_node; | 1200 | struct device_node *ciu_node; |
1201 | struct irq_domain *ciu_domain = NULL; | 1201 | struct irq_domain *ciu_domain = NULL; |
1202 | 1202 | ||
1203 | octeon_irq_init_ciu_percpu(); | 1203 | octeon_irq_init_ciu_percpu(); |
1204 | octeon_irq_setup_secondary = octeon_irq_setup_secondary_ciu; | 1204 | octeon_irq_setup_secondary = octeon_irq_setup_secondary_ciu; |
1205 | 1205 | ||
1206 | octeon_irq_ip2 = octeon_irq_ip2_ciu; | 1206 | octeon_irq_ip2 = octeon_irq_ip2_ciu; |
1207 | octeon_irq_ip3 = octeon_irq_ip3_ciu; | 1207 | octeon_irq_ip3 = octeon_irq_ip3_ciu; |
1208 | if (OCTEON_IS_MODEL(OCTEON_CN58XX_PASS2_X) || | 1208 | if (OCTEON_IS_MODEL(OCTEON_CN58XX_PASS2_X) || |
1209 | OCTEON_IS_MODEL(OCTEON_CN56XX_PASS2_X) || | 1209 | OCTEON_IS_MODEL(OCTEON_CN56XX_PASS2_X) || |
1210 | OCTEON_IS_MODEL(OCTEON_CN52XX_PASS2_X) || | 1210 | OCTEON_IS_MODEL(OCTEON_CN52XX_PASS2_X) || |
1211 | OCTEON_IS_MODEL(OCTEON_CN6XXX)) { | 1211 | OCTEON_IS_MODEL(OCTEON_CN6XXX)) { |
1212 | chip = &octeon_irq_chip_ciu_v2; | 1212 | chip = &octeon_irq_chip_ciu_v2; |
1213 | chip_mbox = &octeon_irq_chip_ciu_mbox_v2; | 1213 | chip_mbox = &octeon_irq_chip_ciu_mbox_v2; |
1214 | chip_wd = &octeon_irq_chip_ciu_wd_v2; | 1214 | chip_wd = &octeon_irq_chip_ciu_wd_v2; |
1215 | octeon_irq_gpio_chip = &octeon_irq_chip_ciu_gpio_v2; | 1215 | octeon_irq_gpio_chip = &octeon_irq_chip_ciu_gpio_v2; |
1216 | } else { | 1216 | } else { |
1217 | chip = &octeon_irq_chip_ciu; | 1217 | chip = &octeon_irq_chip_ciu; |
1218 | chip_mbox = &octeon_irq_chip_ciu_mbox; | 1218 | chip_mbox = &octeon_irq_chip_ciu_mbox; |
1219 | chip_wd = &octeon_irq_chip_ciu_wd; | 1219 | chip_wd = &octeon_irq_chip_ciu_wd; |
1220 | octeon_irq_gpio_chip = &octeon_irq_chip_ciu_gpio; | 1220 | octeon_irq_gpio_chip = &octeon_irq_chip_ciu_gpio; |
1221 | } | 1221 | } |
1222 | octeon_irq_ciu_chip = chip; | 1222 | octeon_irq_ciu_chip = chip; |
1223 | octeon_irq_ip4 = octeon_irq_ip4_mask; | 1223 | octeon_irq_ip4 = octeon_irq_ip4_mask; |
1224 | 1224 | ||
1225 | /* Mips internal */ | 1225 | /* Mips internal */ |
1226 | octeon_irq_init_core(); | 1226 | octeon_irq_init_core(); |
1227 | 1227 | ||
1228 | gpio_node = of_find_compatible_node(NULL, NULL, "cavium,octeon-3860-gpio"); | 1228 | gpio_node = of_find_compatible_node(NULL, NULL, "cavium,octeon-3860-gpio"); |
1229 | if (gpio_node) { | 1229 | if (gpio_node) { |
1230 | struct octeon_irq_gpio_domain_data *gpiod; | 1230 | struct octeon_irq_gpio_domain_data *gpiod; |
1231 | 1231 | ||
1232 | gpiod = kzalloc(sizeof(*gpiod), GFP_KERNEL); | 1232 | gpiod = kzalloc(sizeof(*gpiod), GFP_KERNEL); |
1233 | if (gpiod) { | 1233 | if (gpiod) { |
1234 | /* gpio domain host_data is the base hwirq number. */ | 1234 | /* gpio domain host_data is the base hwirq number. */ |
1235 | gpiod->base_hwirq = 16; | 1235 | gpiod->base_hwirq = 16; |
1236 | irq_domain_add_linear(gpio_node, 16, &octeon_irq_domain_gpio_ops, gpiod); | 1236 | irq_domain_add_linear(gpio_node, 16, &octeon_irq_domain_gpio_ops, gpiod); |
1237 | of_node_put(gpio_node); | 1237 | of_node_put(gpio_node); |
1238 | } else | 1238 | } else |
1239 | pr_warn("Cannot allocate memory for GPIO irq_domain.\n"); | 1239 | pr_warn("Cannot allocate memory for GPIO irq_domain.\n"); |
1240 | } else | 1240 | } else |
1241 | pr_warn("Cannot find device node for cavium,octeon-3860-gpio.\n"); | 1241 | pr_warn("Cannot find device node for cavium,octeon-3860-gpio.\n"); |
1242 | 1242 | ||
1243 | ciu_node = of_find_compatible_node(NULL, NULL, "cavium,octeon-3860-ciu"); | 1243 | ciu_node = of_find_compatible_node(NULL, NULL, "cavium,octeon-3860-ciu"); |
1244 | if (ciu_node) { | 1244 | if (ciu_node) { |
1245 | ciu_domain = irq_domain_add_tree(ciu_node, &octeon_irq_domain_ciu_ops, NULL); | 1245 | ciu_domain = irq_domain_add_tree(ciu_node, &octeon_irq_domain_ciu_ops, NULL); |
1246 | irq_set_default_host(ciu_domain); | 1246 | irq_set_default_host(ciu_domain); |
1247 | of_node_put(ciu_node); | 1247 | of_node_put(ciu_node); |
1248 | } else | 1248 | } else |
1249 | panic("Cannot find device node for cavium,octeon-3860-ciu."); | 1249 | panic("Cannot find device node for cavium,octeon-3860-ciu."); |
1250 | 1250 | ||
1251 | /* CIU_0 */ | 1251 | /* CIU_0 */ |
1252 | for (i = 0; i < 16; i++) | 1252 | for (i = 0; i < 16; i++) |
1253 | octeon_irq_force_ciu_mapping(ciu_domain, i + OCTEON_IRQ_WORKQ0, 0, i + 0); | 1253 | octeon_irq_force_ciu_mapping(ciu_domain, i + OCTEON_IRQ_WORKQ0, 0, i + 0); |
1254 | 1254 | ||
1255 | octeon_irq_set_ciu_mapping(OCTEON_IRQ_MBOX0, 0, 32, 0, chip_mbox, handle_percpu_irq); | 1255 | octeon_irq_set_ciu_mapping(OCTEON_IRQ_MBOX0, 0, 32, 0, chip_mbox, handle_percpu_irq); |
1256 | octeon_irq_set_ciu_mapping(OCTEON_IRQ_MBOX1, 0, 33, 0, chip_mbox, handle_percpu_irq); | 1256 | octeon_irq_set_ciu_mapping(OCTEON_IRQ_MBOX1, 0, 33, 0, chip_mbox, handle_percpu_irq); |
1257 | 1257 | ||
1258 | for (i = 0; i < 4; i++) | 1258 | for (i = 0; i < 4; i++) |
1259 | octeon_irq_force_ciu_mapping(ciu_domain, i + OCTEON_IRQ_PCI_INT0, 0, i + 36); | 1259 | octeon_irq_force_ciu_mapping(ciu_domain, i + OCTEON_IRQ_PCI_INT0, 0, i + 36); |
1260 | for (i = 0; i < 4; i++) | 1260 | for (i = 0; i < 4; i++) |
1261 | octeon_irq_force_ciu_mapping(ciu_domain, i + OCTEON_IRQ_PCI_MSI0, 0, i + 40); | 1261 | octeon_irq_force_ciu_mapping(ciu_domain, i + OCTEON_IRQ_PCI_MSI0, 0, i + 40); |
1262 | 1262 | ||
1263 | octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_RML, 0, 46); | 1263 | octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_RML, 0, 46); |
1264 | for (i = 0; i < 4; i++) | 1264 | for (i = 0; i < 4; i++) |
1265 | octeon_irq_force_ciu_mapping(ciu_domain, i + OCTEON_IRQ_TIMER0, 0, i + 52); | 1265 | octeon_irq_force_ciu_mapping(ciu_domain, i + OCTEON_IRQ_TIMER0, 0, i + 52); |
1266 | 1266 | ||
1267 | octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_USB0, 0, 56); | 1267 | octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_USB0, 0, 56); |
1268 | 1268 | ||
1269 | /* CIU_1 */ | 1269 | /* CIU_1 */ |
1270 | for (i = 0; i < 16; i++) | 1270 | for (i = 0; i < 16; i++) |
1271 | octeon_irq_set_ciu_mapping(i + OCTEON_IRQ_WDOG0, 1, i + 0, 0, chip_wd, handle_level_irq); | 1271 | octeon_irq_set_ciu_mapping(i + OCTEON_IRQ_WDOG0, 1, i + 0, 0, chip_wd, handle_level_irq); |
1272 | 1272 | ||
1273 | octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_USB1, 1, 17); | 1273 | octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_USB1, 1, 17); |
1274 | 1274 | ||
1275 | /* Enable the CIU lines */ | 1275 | /* Enable the CIU lines */ |
1276 | set_c0_status(STATUSF_IP3 | STATUSF_IP2); | 1276 | set_c0_status(STATUSF_IP3 | STATUSF_IP2); |
1277 | clear_c0_status(STATUSF_IP4); | 1277 | clear_c0_status(STATUSF_IP4); |
1278 | } | 1278 | } |
1279 | 1279 | ||
1280 | /* | 1280 | /* |
1281 | * Watchdog interrupts are special. They are associated with a single | 1281 | * Watchdog interrupts are special. They are associated with a single |
1282 | * core, so we hardwire the affinity to that core. | 1282 | * core, so we hardwire the affinity to that core. |
1283 | */ | 1283 | */ |
1284 | static void octeon_irq_ciu2_wd_enable(struct irq_data *data) | 1284 | static void octeon_irq_ciu2_wd_enable(struct irq_data *data) |
1285 | { | 1285 | { |
1286 | u64 mask; | 1286 | u64 mask; |
1287 | u64 en_addr; | 1287 | u64 en_addr; |
1288 | int coreid = data->irq - OCTEON_IRQ_WDOG0; | 1288 | int coreid = data->irq - OCTEON_IRQ_WDOG0; |
1289 | union octeon_ciu_chip_data cd; | 1289 | union octeon_ciu_chip_data cd; |
1290 | 1290 | ||
1291 | cd.p = irq_data_get_irq_chip_data(data); | 1291 | cd.p = irq_data_get_irq_chip_data(data); |
1292 | mask = 1ull << (cd.s.bit); | 1292 | mask = 1ull << (cd.s.bit); |
1293 | 1293 | ||
1294 | en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1S(coreid) + (0x1000ull * cd.s.line); | 1294 | en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1S(coreid) + (0x1000ull * cd.s.line); |
1295 | cvmx_write_csr(en_addr, mask); | 1295 | cvmx_write_csr(en_addr, mask); |
1296 | 1296 | ||
1297 | } | 1297 | } |
1298 | 1298 | ||
1299 | static void octeon_irq_ciu2_enable(struct irq_data *data) | 1299 | static void octeon_irq_ciu2_enable(struct irq_data *data) |
1300 | { | 1300 | { |
1301 | u64 mask; | 1301 | u64 mask; |
1302 | u64 en_addr; | 1302 | u64 en_addr; |
1303 | int cpu = next_cpu_for_irq(data); | 1303 | int cpu = next_cpu_for_irq(data); |
1304 | int coreid = octeon_coreid_for_cpu(cpu); | 1304 | int coreid = octeon_coreid_for_cpu(cpu); |
1305 | union octeon_ciu_chip_data cd; | 1305 | union octeon_ciu_chip_data cd; |
1306 | 1306 | ||
1307 | cd.p = irq_data_get_irq_chip_data(data); | 1307 | cd.p = irq_data_get_irq_chip_data(data); |
1308 | mask = 1ull << (cd.s.bit); | 1308 | mask = 1ull << (cd.s.bit); |
1309 | 1309 | ||
1310 | en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1S(coreid) + (0x1000ull * cd.s.line); | 1310 | en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1S(coreid) + (0x1000ull * cd.s.line); |
1311 | cvmx_write_csr(en_addr, mask); | 1311 | cvmx_write_csr(en_addr, mask); |
1312 | } | 1312 | } |
1313 | 1313 | ||
1314 | static void octeon_irq_ciu2_enable_local(struct irq_data *data) | 1314 | static void octeon_irq_ciu2_enable_local(struct irq_data *data) |
1315 | { | 1315 | { |
1316 | u64 mask; | 1316 | u64 mask; |
1317 | u64 en_addr; | 1317 | u64 en_addr; |
1318 | int coreid = cvmx_get_core_num(); | 1318 | int coreid = cvmx_get_core_num(); |
1319 | union octeon_ciu_chip_data cd; | 1319 | union octeon_ciu_chip_data cd; |
1320 | 1320 | ||
1321 | cd.p = irq_data_get_irq_chip_data(data); | 1321 | cd.p = irq_data_get_irq_chip_data(data); |
1322 | mask = 1ull << (cd.s.bit); | 1322 | mask = 1ull << (cd.s.bit); |
1323 | 1323 | ||
1324 | en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1S(coreid) + (0x1000ull * cd.s.line); | 1324 | en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1S(coreid) + (0x1000ull * cd.s.line); |
1325 | cvmx_write_csr(en_addr, mask); | 1325 | cvmx_write_csr(en_addr, mask); |
1326 | 1326 | ||
1327 | } | 1327 | } |
1328 | 1328 | ||
1329 | static void octeon_irq_ciu2_disable_local(struct irq_data *data) | 1329 | static void octeon_irq_ciu2_disable_local(struct irq_data *data) |
1330 | { | 1330 | { |
1331 | u64 mask; | 1331 | u64 mask; |
1332 | u64 en_addr; | 1332 | u64 en_addr; |
1333 | int coreid = cvmx_get_core_num(); | 1333 | int coreid = cvmx_get_core_num(); |
1334 | union octeon_ciu_chip_data cd; | 1334 | union octeon_ciu_chip_data cd; |
1335 | 1335 | ||
1336 | cd.p = irq_data_get_irq_chip_data(data); | 1336 | cd.p = irq_data_get_irq_chip_data(data); |
1337 | mask = 1ull << (cd.s.bit); | 1337 | mask = 1ull << (cd.s.bit); |
1338 | 1338 | ||
1339 | en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1C(coreid) + (0x1000ull * cd.s.line); | 1339 | en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1C(coreid) + (0x1000ull * cd.s.line); |
1340 | cvmx_write_csr(en_addr, mask); | 1340 | cvmx_write_csr(en_addr, mask); |
1341 | 1341 | ||
1342 | } | 1342 | } |
1343 | 1343 | ||
1344 | static void octeon_irq_ciu2_ack(struct irq_data *data) | 1344 | static void octeon_irq_ciu2_ack(struct irq_data *data) |
1345 | { | 1345 | { |
1346 | u64 mask; | 1346 | u64 mask; |
1347 | u64 en_addr; | 1347 | u64 en_addr; |
1348 | int coreid = cvmx_get_core_num(); | 1348 | int coreid = cvmx_get_core_num(); |
1349 | union octeon_ciu_chip_data cd; | 1349 | union octeon_ciu_chip_data cd; |
1350 | 1350 | ||
1351 | cd.p = irq_data_get_irq_chip_data(data); | 1351 | cd.p = irq_data_get_irq_chip_data(data); |
1352 | mask = 1ull << (cd.s.bit); | 1352 | mask = 1ull << (cd.s.bit); |
1353 | 1353 | ||
1354 | en_addr = CVMX_CIU2_RAW_PPX_IP2_WRKQ(coreid) + (0x1000ull * cd.s.line); | 1354 | en_addr = CVMX_CIU2_RAW_PPX_IP2_WRKQ(coreid) + (0x1000ull * cd.s.line); |
1355 | cvmx_write_csr(en_addr, mask); | 1355 | cvmx_write_csr(en_addr, mask); |
1356 | 1356 | ||
1357 | } | 1357 | } |
1358 | 1358 | ||
1359 | static void octeon_irq_ciu2_disable_all(struct irq_data *data) | 1359 | static void octeon_irq_ciu2_disable_all(struct irq_data *data) |
1360 | { | 1360 | { |
1361 | int cpu; | 1361 | int cpu; |
1362 | u64 mask; | 1362 | u64 mask; |
1363 | union octeon_ciu_chip_data cd; | 1363 | union octeon_ciu_chip_data cd; |
1364 | 1364 | ||
1365 | cd.p = irq_data_get_irq_chip_data(data); | 1365 | cd.p = irq_data_get_irq_chip_data(data); |
1366 | mask = 1ull << (cd.s.bit); | 1366 | mask = 1ull << (cd.s.bit); |
1367 | 1367 | ||
1368 | for_each_online_cpu(cpu) { | 1368 | for_each_online_cpu(cpu) { |
1369 | u64 en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1C(octeon_coreid_for_cpu(cpu)) + (0x1000ull * cd.s.line); | 1369 | u64 en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1C(octeon_coreid_for_cpu(cpu)) + (0x1000ull * cd.s.line); |
1370 | cvmx_write_csr(en_addr, mask); | 1370 | cvmx_write_csr(en_addr, mask); |
1371 | } | 1371 | } |
1372 | } | 1372 | } |
1373 | 1373 | ||
1374 | static void octeon_irq_ciu2_mbox_enable_all(struct irq_data *data) | 1374 | static void octeon_irq_ciu2_mbox_enable_all(struct irq_data *data) |
1375 | { | 1375 | { |
1376 | int cpu; | 1376 | int cpu; |
1377 | u64 mask; | 1377 | u64 mask; |
1378 | 1378 | ||
1379 | mask = 1ull << (data->irq - OCTEON_IRQ_MBOX0); | 1379 | mask = 1ull << (data->irq - OCTEON_IRQ_MBOX0); |
1380 | 1380 | ||
1381 | for_each_online_cpu(cpu) { | 1381 | for_each_online_cpu(cpu) { |
1382 | u64 en_addr = CVMX_CIU2_EN_PPX_IP3_MBOX_W1S(octeon_coreid_for_cpu(cpu)); | 1382 | u64 en_addr = CVMX_CIU2_EN_PPX_IP3_MBOX_W1S(octeon_coreid_for_cpu(cpu)); |
1383 | cvmx_write_csr(en_addr, mask); | 1383 | cvmx_write_csr(en_addr, mask); |
1384 | } | 1384 | } |
1385 | } | 1385 | } |
1386 | 1386 | ||
1387 | static void octeon_irq_ciu2_mbox_disable_all(struct irq_data *data) | 1387 | static void octeon_irq_ciu2_mbox_disable_all(struct irq_data *data) |
1388 | { | 1388 | { |
1389 | int cpu; | 1389 | int cpu; |
1390 | u64 mask; | 1390 | u64 mask; |
1391 | 1391 | ||
1392 | mask = 1ull << (data->irq - OCTEON_IRQ_MBOX0); | 1392 | mask = 1ull << (data->irq - OCTEON_IRQ_MBOX0); |
1393 | 1393 | ||
1394 | for_each_online_cpu(cpu) { | 1394 | for_each_online_cpu(cpu) { |
1395 | u64 en_addr = CVMX_CIU2_EN_PPX_IP3_MBOX_W1C(octeon_coreid_for_cpu(cpu)); | 1395 | u64 en_addr = CVMX_CIU2_EN_PPX_IP3_MBOX_W1C(octeon_coreid_for_cpu(cpu)); |
1396 | cvmx_write_csr(en_addr, mask); | 1396 | cvmx_write_csr(en_addr, mask); |
1397 | } | 1397 | } |
1398 | } | 1398 | } |
1399 | 1399 | ||
1400 | static void octeon_irq_ciu2_mbox_enable_local(struct irq_data *data) | 1400 | static void octeon_irq_ciu2_mbox_enable_local(struct irq_data *data) |
1401 | { | 1401 | { |
1402 | u64 mask; | 1402 | u64 mask; |
1403 | u64 en_addr; | 1403 | u64 en_addr; |
1404 | int coreid = cvmx_get_core_num(); | 1404 | int coreid = cvmx_get_core_num(); |
1405 | 1405 | ||
1406 | mask = 1ull << (data->irq - OCTEON_IRQ_MBOX0); | 1406 | mask = 1ull << (data->irq - OCTEON_IRQ_MBOX0); |
1407 | en_addr = CVMX_CIU2_EN_PPX_IP3_MBOX_W1S(coreid); | 1407 | en_addr = CVMX_CIU2_EN_PPX_IP3_MBOX_W1S(coreid); |
1408 | cvmx_write_csr(en_addr, mask); | 1408 | cvmx_write_csr(en_addr, mask); |
1409 | } | 1409 | } |
1410 | 1410 | ||
1411 | static void octeon_irq_ciu2_mbox_disable_local(struct irq_data *data) | 1411 | static void octeon_irq_ciu2_mbox_disable_local(struct irq_data *data) |
1412 | { | 1412 | { |
1413 | u64 mask; | 1413 | u64 mask; |
1414 | u64 en_addr; | 1414 | u64 en_addr; |
1415 | int coreid = cvmx_get_core_num(); | 1415 | int coreid = cvmx_get_core_num(); |
1416 | 1416 | ||
1417 | mask = 1ull << (data->irq - OCTEON_IRQ_MBOX0); | 1417 | mask = 1ull << (data->irq - OCTEON_IRQ_MBOX0); |
1418 | en_addr = CVMX_CIU2_EN_PPX_IP3_MBOX_W1C(coreid); | 1418 | en_addr = CVMX_CIU2_EN_PPX_IP3_MBOX_W1C(coreid); |
1419 | cvmx_write_csr(en_addr, mask); | 1419 | cvmx_write_csr(en_addr, mask); |
1420 | } | 1420 | } |
1421 | 1421 | ||
1422 | #ifdef CONFIG_SMP | 1422 | #ifdef CONFIG_SMP |
1423 | static int octeon_irq_ciu2_set_affinity(struct irq_data *data, | 1423 | static int octeon_irq_ciu2_set_affinity(struct irq_data *data, |
1424 | const struct cpumask *dest, bool force) | 1424 | const struct cpumask *dest, bool force) |
1425 | { | 1425 | { |
1426 | int cpu; | 1426 | int cpu; |
1427 | bool enable_one = !irqd_irq_disabled(data) && !irqd_irq_masked(data); | 1427 | bool enable_one = !irqd_irq_disabled(data) && !irqd_irq_masked(data); |
1428 | u64 mask; | 1428 | u64 mask; |
1429 | union octeon_ciu_chip_data cd; | 1429 | union octeon_ciu_chip_data cd; |
1430 | 1430 | ||
1431 | if (!enable_one) | 1431 | if (!enable_one) |
1432 | return 0; | 1432 | return 0; |
1433 | 1433 | ||
1434 | cd.p = irq_data_get_irq_chip_data(data); | 1434 | cd.p = irq_data_get_irq_chip_data(data); |
1435 | mask = 1ull << cd.s.bit; | 1435 | mask = 1ull << cd.s.bit; |
1436 | 1436 | ||
1437 | for_each_online_cpu(cpu) { | 1437 | for_each_online_cpu(cpu) { |
1438 | u64 en_addr; | 1438 | u64 en_addr; |
1439 | if (cpumask_test_cpu(cpu, dest) && enable_one) { | 1439 | if (cpumask_test_cpu(cpu, dest) && enable_one) { |
1440 | enable_one = false; | 1440 | enable_one = false; |
1441 | en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1S(octeon_coreid_for_cpu(cpu)) + (0x1000ull * cd.s.line); | 1441 | en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1S(octeon_coreid_for_cpu(cpu)) + (0x1000ull * cd.s.line); |
1442 | } else { | 1442 | } else { |
1443 | en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1C(octeon_coreid_for_cpu(cpu)) + (0x1000ull * cd.s.line); | 1443 | en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1C(octeon_coreid_for_cpu(cpu)) + (0x1000ull * cd.s.line); |
1444 | } | 1444 | } |
1445 | cvmx_write_csr(en_addr, mask); | 1445 | cvmx_write_csr(en_addr, mask); |
1446 | } | 1446 | } |
1447 | 1447 | ||
1448 | return 0; | 1448 | return 0; |
1449 | } | 1449 | } |
1450 | #endif | 1450 | #endif |
1451 | 1451 | ||
1452 | static void octeon_irq_ciu2_enable_gpio(struct irq_data *data) | 1452 | static void octeon_irq_ciu2_enable_gpio(struct irq_data *data) |
1453 | { | 1453 | { |
1454 | octeon_irq_gpio_setup(data); | 1454 | octeon_irq_gpio_setup(data); |
1455 | octeon_irq_ciu2_enable(data); | 1455 | octeon_irq_ciu2_enable(data); |
1456 | } | 1456 | } |
1457 | 1457 | ||
1458 | static void octeon_irq_ciu2_disable_gpio(struct irq_data *data) | 1458 | static void octeon_irq_ciu2_disable_gpio(struct irq_data *data) |
1459 | { | 1459 | { |
1460 | union octeon_ciu_chip_data cd; | 1460 | union octeon_ciu_chip_data cd; |
1461 | cd.p = irq_data_get_irq_chip_data(data); | 1461 | cd.p = irq_data_get_irq_chip_data(data); |
1462 | 1462 | ||
1463 | cvmx_write_csr(CVMX_GPIO_BIT_CFGX(cd.s.gpio_line), 0); | 1463 | cvmx_write_csr(CVMX_GPIO_BIT_CFGX(cd.s.gpio_line), 0); |
1464 | 1464 | ||
1465 | octeon_irq_ciu2_disable_all(data); | 1465 | octeon_irq_ciu2_disable_all(data); |
1466 | } | 1466 | } |
1467 | 1467 | ||
1468 | static struct irq_chip octeon_irq_chip_ciu2 = { | 1468 | static struct irq_chip octeon_irq_chip_ciu2 = { |
1469 | .name = "CIU2-E", | 1469 | .name = "CIU2-E", |
1470 | .irq_enable = octeon_irq_ciu2_enable, | 1470 | .irq_enable = octeon_irq_ciu2_enable, |
1471 | .irq_disable = octeon_irq_ciu2_disable_all, | 1471 | .irq_disable = octeon_irq_ciu2_disable_all, |
1472 | .irq_ack = octeon_irq_ciu2_ack, | 1472 | .irq_ack = octeon_irq_ciu2_ack, |
1473 | .irq_mask = octeon_irq_ciu2_disable_local, | 1473 | .irq_mask = octeon_irq_ciu2_disable_local, |
1474 | .irq_unmask = octeon_irq_ciu2_enable, | 1474 | .irq_unmask = octeon_irq_ciu2_enable, |
1475 | #ifdef CONFIG_SMP | 1475 | #ifdef CONFIG_SMP |
1476 | .irq_set_affinity = octeon_irq_ciu2_set_affinity, | 1476 | .irq_set_affinity = octeon_irq_ciu2_set_affinity, |
1477 | .irq_cpu_offline = octeon_irq_cpu_offline_ciu, | 1477 | .irq_cpu_offline = octeon_irq_cpu_offline_ciu, |
1478 | #endif | 1478 | #endif |
1479 | }; | 1479 | }; |
1480 | 1480 | ||
1481 | static struct irq_chip octeon_irq_chip_ciu2_mbox = { | 1481 | static struct irq_chip octeon_irq_chip_ciu2_mbox = { |
1482 | .name = "CIU2-M", | 1482 | .name = "CIU2-M", |
1483 | .irq_enable = octeon_irq_ciu2_mbox_enable_all, | 1483 | .irq_enable = octeon_irq_ciu2_mbox_enable_all, |
1484 | .irq_disable = octeon_irq_ciu2_mbox_disable_all, | 1484 | .irq_disable = octeon_irq_ciu2_mbox_disable_all, |
1485 | .irq_ack = octeon_irq_ciu2_mbox_disable_local, | 1485 | .irq_ack = octeon_irq_ciu2_mbox_disable_local, |
1486 | .irq_eoi = octeon_irq_ciu2_mbox_enable_local, | 1486 | .irq_eoi = octeon_irq_ciu2_mbox_enable_local, |
1487 | 1487 | ||
1488 | .irq_cpu_online = octeon_irq_ciu2_mbox_enable_local, | 1488 | .irq_cpu_online = octeon_irq_ciu2_mbox_enable_local, |
1489 | .irq_cpu_offline = octeon_irq_ciu2_mbox_disable_local, | 1489 | .irq_cpu_offline = octeon_irq_ciu2_mbox_disable_local, |
1490 | .flags = IRQCHIP_ONOFFLINE_ENABLED, | 1490 | .flags = IRQCHIP_ONOFFLINE_ENABLED, |
1491 | }; | 1491 | }; |
1492 | 1492 | ||
1493 | static struct irq_chip octeon_irq_chip_ciu2_wd = { | 1493 | static struct irq_chip octeon_irq_chip_ciu2_wd = { |
1494 | .name = "CIU2-W", | 1494 | .name = "CIU2-W", |
1495 | .irq_enable = octeon_irq_ciu2_wd_enable, | 1495 | .irq_enable = octeon_irq_ciu2_wd_enable, |
1496 | .irq_disable = octeon_irq_ciu2_disable_all, | 1496 | .irq_disable = octeon_irq_ciu2_disable_all, |
1497 | .irq_mask = octeon_irq_ciu2_disable_local, | 1497 | .irq_mask = octeon_irq_ciu2_disable_local, |
1498 | .irq_unmask = octeon_irq_ciu2_enable_local, | 1498 | .irq_unmask = octeon_irq_ciu2_enable_local, |
1499 | }; | 1499 | }; |
1500 | 1500 | ||
1501 | static struct irq_chip octeon_irq_chip_ciu2_gpio = { | 1501 | static struct irq_chip octeon_irq_chip_ciu2_gpio = { |
1502 | .name = "CIU-GPIO", | 1502 | .name = "CIU-GPIO", |
1503 | .irq_enable = octeon_irq_ciu2_enable_gpio, | 1503 | .irq_enable = octeon_irq_ciu2_enable_gpio, |
1504 | .irq_disable = octeon_irq_ciu2_disable_gpio, | 1504 | .irq_disable = octeon_irq_ciu2_disable_gpio, |
1505 | .irq_ack = octeon_irq_ciu_gpio_ack, | 1505 | .irq_ack = octeon_irq_ciu_gpio_ack, |
1506 | .irq_mask = octeon_irq_ciu2_disable_local, | 1506 | .irq_mask = octeon_irq_ciu2_disable_local, |
1507 | .irq_unmask = octeon_irq_ciu2_enable, | 1507 | .irq_unmask = octeon_irq_ciu2_enable, |
1508 | .irq_set_type = octeon_irq_ciu_gpio_set_type, | 1508 | .irq_set_type = octeon_irq_ciu_gpio_set_type, |
1509 | #ifdef CONFIG_SMP | 1509 | #ifdef CONFIG_SMP |
1510 | .irq_set_affinity = octeon_irq_ciu2_set_affinity, | 1510 | .irq_set_affinity = octeon_irq_ciu2_set_affinity, |
1511 | .irq_cpu_offline = octeon_irq_cpu_offline_ciu, | 1511 | .irq_cpu_offline = octeon_irq_cpu_offline_ciu, |
1512 | #endif | 1512 | #endif |
1513 | .flags = IRQCHIP_SET_TYPE_MASKED, | 1513 | .flags = IRQCHIP_SET_TYPE_MASKED, |
1514 | }; | 1514 | }; |
1515 | 1515 | ||
1516 | static int octeon_irq_ciu2_xlat(struct irq_domain *d, | 1516 | static int octeon_irq_ciu2_xlat(struct irq_domain *d, |
1517 | struct device_node *node, | 1517 | struct device_node *node, |
1518 | const u32 *intspec, | 1518 | const u32 *intspec, |
1519 | unsigned int intsize, | 1519 | unsigned int intsize, |
1520 | unsigned long *out_hwirq, | 1520 | unsigned long *out_hwirq, |
1521 | unsigned int *out_type) | 1521 | unsigned int *out_type) |
1522 | { | 1522 | { |
1523 | unsigned int ciu, bit; | 1523 | unsigned int ciu, bit; |
1524 | 1524 | ||
1525 | ciu = intspec[0]; | 1525 | ciu = intspec[0]; |
1526 | bit = intspec[1]; | 1526 | bit = intspec[1]; |
1527 | 1527 | ||
1528 | *out_hwirq = (ciu << 6) | bit; | 1528 | *out_hwirq = (ciu << 6) | bit; |
1529 | *out_type = 0; | 1529 | *out_type = 0; |
1530 | 1530 | ||
1531 | return 0; | 1531 | return 0; |
1532 | } | 1532 | } |
1533 | 1533 | ||
1534 | static bool octeon_irq_ciu2_is_edge(unsigned int line, unsigned int bit) | 1534 | static bool octeon_irq_ciu2_is_edge(unsigned int line, unsigned int bit) |
1535 | { | 1535 | { |
1536 | bool edge = false; | 1536 | bool edge = false; |
1537 | 1537 | ||
1538 | if (line == 3) /* MIO */ | 1538 | if (line == 3) /* MIO */ |
1539 | switch (bit) { | 1539 | switch (bit) { |
1540 | case 2: /* IPD_DRP */ | 1540 | case 2: /* IPD_DRP */ |
1541 | case 8 ... 11: /* Timers */ | 1541 | case 8 ... 11: /* Timers */ |
1542 | case 48: /* PTP */ | 1542 | case 48: /* PTP */ |
1543 | edge = true; | 1543 | edge = true; |
1544 | break; | 1544 | break; |
1545 | default: | 1545 | default: |
1546 | break; | 1546 | break; |
1547 | } | 1547 | } |
1548 | else if (line == 6) /* PKT */ | 1548 | else if (line == 6) /* PKT */ |
1549 | switch (bit) { | 1549 | switch (bit) { |
1550 | case 52 ... 53: /* ILK_DRP */ | 1550 | case 52 ... 53: /* ILK_DRP */ |
1551 | case 8 ... 12: /* GMX_DRP */ | 1551 | case 8 ... 12: /* GMX_DRP */ |
1552 | edge = true; | 1552 | edge = true; |
1553 | break; | 1553 | break; |
1554 | default: | 1554 | default: |
1555 | break; | 1555 | break; |
1556 | } | 1556 | } |
1557 | return edge; | 1557 | return edge; |
1558 | } | 1558 | } |
1559 | 1559 | ||
1560 | static int octeon_irq_ciu2_map(struct irq_domain *d, | 1560 | static int octeon_irq_ciu2_map(struct irq_domain *d, |
1561 | unsigned int virq, irq_hw_number_t hw) | 1561 | unsigned int virq, irq_hw_number_t hw) |
1562 | { | 1562 | { |
1563 | unsigned int line = hw >> 6; | 1563 | unsigned int line = hw >> 6; |
1564 | unsigned int bit = hw & 63; | 1564 | unsigned int bit = hw & 63; |
1565 | 1565 | ||
1566 | if (!octeon_irq_virq_in_range(virq)) | 1566 | if (!octeon_irq_virq_in_range(virq)) |
1567 | return -EINVAL; | 1567 | return -EINVAL; |
1568 | 1568 | ||
1569 | /* | 1569 | /* |
1570 | * Don't map irq if it is reserved for GPIO. | 1570 | * Don't map irq if it is reserved for GPIO. |
1571 | * (Line 7 are the GPIO lines.) | 1571 | * (Line 7 are the GPIO lines.) |
1572 | */ | 1572 | */ |
1573 | if (line == 7) | 1573 | if (line == 7) |
1574 | return 0; | 1574 | return 0; |
1575 | 1575 | ||
1576 | if (line > 7 || octeon_irq_ciu_to_irq[line][bit] != 0) | 1576 | if (line > 7 || octeon_irq_ciu_to_irq[line][bit] != 0) |
1577 | return -EINVAL; | 1577 | return -EINVAL; |
1578 | 1578 | ||
1579 | if (octeon_irq_ciu2_is_edge(line, bit)) | 1579 | if (octeon_irq_ciu2_is_edge(line, bit)) |
1580 | octeon_irq_set_ciu_mapping(virq, line, bit, 0, | 1580 | octeon_irq_set_ciu_mapping(virq, line, bit, 0, |
1581 | &octeon_irq_chip_ciu2, | 1581 | &octeon_irq_chip_ciu2, |
1582 | handle_edge_irq); | 1582 | handle_edge_irq); |
1583 | else | 1583 | else |
1584 | octeon_irq_set_ciu_mapping(virq, line, bit, 0, | 1584 | octeon_irq_set_ciu_mapping(virq, line, bit, 0, |
1585 | &octeon_irq_chip_ciu2, | 1585 | &octeon_irq_chip_ciu2, |
1586 | handle_level_irq); | 1586 | handle_level_irq); |
1587 | 1587 | ||
1588 | return 0; | 1588 | return 0; |
1589 | } | 1589 | } |
1590 | static int octeon_irq_ciu2_gpio_map(struct irq_domain *d, | 1590 | static int octeon_irq_ciu2_gpio_map(struct irq_domain *d, |
1591 | unsigned int virq, irq_hw_number_t hw) | 1591 | unsigned int virq, irq_hw_number_t hw) |
1592 | { | 1592 | { |
1593 | return octeon_irq_gpio_map_common(d, virq, hw, 7, &octeon_irq_chip_ciu2_gpio); | 1593 | return octeon_irq_gpio_map_common(d, virq, hw, 7, &octeon_irq_chip_ciu2_gpio); |
1594 | } | 1594 | } |
1595 | 1595 | ||
1596 | static struct irq_domain_ops octeon_irq_domain_ciu2_ops = { | 1596 | static struct irq_domain_ops octeon_irq_domain_ciu2_ops = { |
1597 | .map = octeon_irq_ciu2_map, | 1597 | .map = octeon_irq_ciu2_map, |
1598 | .xlate = octeon_irq_ciu2_xlat, | 1598 | .xlate = octeon_irq_ciu2_xlat, |
1599 | }; | 1599 | }; |
1600 | 1600 | ||
1601 | static struct irq_domain_ops octeon_irq_domain_ciu2_gpio_ops = { | 1601 | static struct irq_domain_ops octeon_irq_domain_ciu2_gpio_ops = { |
1602 | .map = octeon_irq_ciu2_gpio_map, | 1602 | .map = octeon_irq_ciu2_gpio_map, |
1603 | .xlate = octeon_irq_gpio_xlat, | 1603 | .xlate = octeon_irq_gpio_xlat, |
1604 | }; | 1604 | }; |
1605 | 1605 | ||
1606 | static void octeon_irq_ciu2(void) | 1606 | static void octeon_irq_ciu2(void) |
1607 | { | 1607 | { |
1608 | int line; | 1608 | int line; |
1609 | int bit; | 1609 | int bit; |
1610 | int irq; | 1610 | int irq; |
1611 | u64 src_reg, src, sum; | 1611 | u64 src_reg, src, sum; |
1612 | const unsigned long core_id = cvmx_get_core_num(); | 1612 | const unsigned long core_id = cvmx_get_core_num(); |
1613 | 1613 | ||
1614 | sum = cvmx_read_csr(CVMX_CIU2_SUM_PPX_IP2(core_id)) & 0xfful; | 1614 | sum = cvmx_read_csr(CVMX_CIU2_SUM_PPX_IP2(core_id)) & 0xfful; |
1615 | 1615 | ||
1616 | if (unlikely(!sum)) | 1616 | if (unlikely(!sum)) |
1617 | goto spurious; | 1617 | goto spurious; |
1618 | 1618 | ||
1619 | line = fls64(sum) - 1; | 1619 | line = fls64(sum) - 1; |
1620 | src_reg = CVMX_CIU2_SRC_PPX_IP2_WRKQ(core_id) + (0x1000 * line); | 1620 | src_reg = CVMX_CIU2_SRC_PPX_IP2_WRKQ(core_id) + (0x1000 * line); |
1621 | src = cvmx_read_csr(src_reg); | 1621 | src = cvmx_read_csr(src_reg); |
1622 | 1622 | ||
1623 | if (unlikely(!src)) | 1623 | if (unlikely(!src)) |
1624 | goto spurious; | 1624 | goto spurious; |
1625 | 1625 | ||
1626 | bit = fls64(src) - 1; | 1626 | bit = fls64(src) - 1; |
1627 | irq = octeon_irq_ciu_to_irq[line][bit]; | 1627 | irq = octeon_irq_ciu_to_irq[line][bit]; |
1628 | if (unlikely(!irq)) | 1628 | if (unlikely(!irq)) |
1629 | goto spurious; | 1629 | goto spurious; |
1630 | 1630 | ||
1631 | do_IRQ(irq); | 1631 | do_IRQ(irq); |
1632 | goto out; | 1632 | goto out; |
1633 | 1633 | ||
1634 | spurious: | 1634 | spurious: |
1635 | spurious_interrupt(); | 1635 | spurious_interrupt(); |
1636 | out: | 1636 | out: |
1637 | /* CN68XX pass 1.x has an errata that accessing the ACK registers | 1637 | /* CN68XX pass 1.x has an errata that accessing the ACK registers |
1638 | can stop interrupts from propagating */ | 1638 | can stop interrupts from propagating */ |
1639 | if (OCTEON_IS_MODEL(OCTEON_CN68XX)) | 1639 | if (OCTEON_IS_MODEL(OCTEON_CN68XX)) |
1640 | cvmx_read_csr(CVMX_CIU2_INTR_CIU_READY); | 1640 | cvmx_read_csr(CVMX_CIU2_INTR_CIU_READY); |
1641 | else | 1641 | else |
1642 | cvmx_read_csr(CVMX_CIU2_ACK_PPX_IP2(core_id)); | 1642 | cvmx_read_csr(CVMX_CIU2_ACK_PPX_IP2(core_id)); |
1643 | return; | 1643 | return; |
1644 | } | 1644 | } |
1645 | 1645 | ||
1646 | static void octeon_irq_ciu2_mbox(void) | 1646 | static void octeon_irq_ciu2_mbox(void) |
1647 | { | 1647 | { |
1648 | int line; | 1648 | int line; |
1649 | 1649 | ||
1650 | const unsigned long core_id = cvmx_get_core_num(); | 1650 | const unsigned long core_id = cvmx_get_core_num(); |
1651 | u64 sum = cvmx_read_csr(CVMX_CIU2_SUM_PPX_IP3(core_id)) >> 60; | 1651 | u64 sum = cvmx_read_csr(CVMX_CIU2_SUM_PPX_IP3(core_id)) >> 60; |
1652 | 1652 | ||
1653 | if (unlikely(!sum)) | 1653 | if (unlikely(!sum)) |
1654 | goto spurious; | 1654 | goto spurious; |
1655 | 1655 | ||
1656 | line = fls64(sum) - 1; | 1656 | line = fls64(sum) - 1; |
1657 | 1657 | ||
1658 | do_IRQ(OCTEON_IRQ_MBOX0 + line); | 1658 | do_IRQ(OCTEON_IRQ_MBOX0 + line); |
1659 | goto out; | 1659 | goto out; |
1660 | 1660 | ||
1661 | spurious: | 1661 | spurious: |
1662 | spurious_interrupt(); | 1662 | spurious_interrupt(); |
1663 | out: | 1663 | out: |
1664 | /* CN68XX pass 1.x has an errata that accessing the ACK registers | 1664 | /* CN68XX pass 1.x has an errata that accessing the ACK registers |
1665 | can stop interrupts from propagating */ | 1665 | can stop interrupts from propagating */ |
1666 | if (OCTEON_IS_MODEL(OCTEON_CN68XX)) | 1666 | if (OCTEON_IS_MODEL(OCTEON_CN68XX)) |
1667 | cvmx_read_csr(CVMX_CIU2_INTR_CIU_READY); | 1667 | cvmx_read_csr(CVMX_CIU2_INTR_CIU_READY); |
1668 | else | 1668 | else |
1669 | cvmx_read_csr(CVMX_CIU2_ACK_PPX_IP3(core_id)); | 1669 | cvmx_read_csr(CVMX_CIU2_ACK_PPX_IP3(core_id)); |
1670 | return; | 1670 | return; |
1671 | } | 1671 | } |
1672 | 1672 | ||
1673 | static void __init octeon_irq_init_ciu2(void) | 1673 | static void __init octeon_irq_init_ciu2(void) |
1674 | { | 1674 | { |
1675 | unsigned int i; | 1675 | unsigned int i; |
1676 | struct device_node *gpio_node; | 1676 | struct device_node *gpio_node; |
1677 | struct device_node *ciu_node; | 1677 | struct device_node *ciu_node; |
1678 | struct irq_domain *ciu_domain = NULL; | 1678 | struct irq_domain *ciu_domain = NULL; |
1679 | 1679 | ||
1680 | octeon_irq_init_ciu2_percpu(); | 1680 | octeon_irq_init_ciu2_percpu(); |
1681 | octeon_irq_setup_secondary = octeon_irq_setup_secondary_ciu2; | 1681 | octeon_irq_setup_secondary = octeon_irq_setup_secondary_ciu2; |
1682 | 1682 | ||
1683 | octeon_irq_ip2 = octeon_irq_ciu2; | 1683 | octeon_irq_ip2 = octeon_irq_ciu2; |
1684 | octeon_irq_ip3 = octeon_irq_ciu2_mbox; | 1684 | octeon_irq_ip3 = octeon_irq_ciu2_mbox; |
1685 | octeon_irq_ip4 = octeon_irq_ip4_mask; | 1685 | octeon_irq_ip4 = octeon_irq_ip4_mask; |
1686 | 1686 | ||
1687 | /* Mips internal */ | 1687 | /* Mips internal */ |
1688 | octeon_irq_init_core(); | 1688 | octeon_irq_init_core(); |
1689 | 1689 | ||
1690 | gpio_node = of_find_compatible_node(NULL, NULL, "cavium,octeon-3860-gpio"); | 1690 | gpio_node = of_find_compatible_node(NULL, NULL, "cavium,octeon-3860-gpio"); |
1691 | if (gpio_node) { | 1691 | if (gpio_node) { |
1692 | struct octeon_irq_gpio_domain_data *gpiod; | 1692 | struct octeon_irq_gpio_domain_data *gpiod; |
1693 | 1693 | ||
1694 | gpiod = kzalloc(sizeof(*gpiod), GFP_KERNEL); | 1694 | gpiod = kzalloc(sizeof(*gpiod), GFP_KERNEL); |
1695 | if (gpiod) { | 1695 | if (gpiod) { |
1696 | /* gpio domain host_data is the base hwirq number. */ | 1696 | /* gpio domain host_data is the base hwirq number. */ |
1697 | gpiod->base_hwirq = 7 << 6; | 1697 | gpiod->base_hwirq = 7 << 6; |
1698 | irq_domain_add_linear(gpio_node, 16, &octeon_irq_domain_ciu2_gpio_ops, gpiod); | 1698 | irq_domain_add_linear(gpio_node, 16, &octeon_irq_domain_ciu2_gpio_ops, gpiod); |
1699 | of_node_put(gpio_node); | 1699 | of_node_put(gpio_node); |
1700 | } else | 1700 | } else |
1701 | pr_warn("Cannot allocate memory for GPIO irq_domain.\n"); | 1701 | pr_warn("Cannot allocate memory for GPIO irq_domain.\n"); |
1702 | } else | 1702 | } else |
1703 | pr_warn("Cannot find device node for cavium,octeon-3860-gpio.\n"); | 1703 | pr_warn("Cannot find device node for cavium,octeon-3860-gpio.\n"); |
1704 | 1704 | ||
1705 | ciu_node = of_find_compatible_node(NULL, NULL, "cavium,octeon-6880-ciu2"); | 1705 | ciu_node = of_find_compatible_node(NULL, NULL, "cavium,octeon-6880-ciu2"); |
1706 | if (ciu_node) { | 1706 | if (ciu_node) { |
1707 | ciu_domain = irq_domain_add_tree(ciu_node, &octeon_irq_domain_ciu2_ops, NULL); | 1707 | ciu_domain = irq_domain_add_tree(ciu_node, &octeon_irq_domain_ciu2_ops, NULL); |
1708 | irq_set_default_host(ciu_domain); | 1708 | irq_set_default_host(ciu_domain); |
1709 | of_node_put(ciu_node); | 1709 | of_node_put(ciu_node); |
1710 | } else | 1710 | } else |
1711 | panic("Cannot find device node for cavium,octeon-6880-ciu2."); | 1711 | panic("Cannot find device node for cavium,octeon-6880-ciu2."); |
1712 | 1712 | ||
1713 | /* CUI2 */ | 1713 | /* CUI2 */ |
1714 | for (i = 0; i < 64; i++) | 1714 | for (i = 0; i < 64; i++) |
1715 | octeon_irq_force_ciu_mapping(ciu_domain, i + OCTEON_IRQ_WORKQ0, 0, i); | 1715 | octeon_irq_force_ciu_mapping(ciu_domain, i + OCTEON_IRQ_WORKQ0, 0, i); |
1716 | 1716 | ||
1717 | for (i = 0; i < 32; i++) | 1717 | for (i = 0; i < 32; i++) |
1718 | octeon_irq_set_ciu_mapping(i + OCTEON_IRQ_WDOG0, 1, i, 0, | 1718 | octeon_irq_set_ciu_mapping(i + OCTEON_IRQ_WDOG0, 1, i, 0, |
1719 | &octeon_irq_chip_ciu2_wd, handle_level_irq); | 1719 | &octeon_irq_chip_ciu2_wd, handle_level_irq); |
1720 | 1720 | ||
1721 | for (i = 0; i < 4; i++) | 1721 | for (i = 0; i < 4; i++) |
1722 | octeon_irq_force_ciu_mapping(ciu_domain, i + OCTEON_IRQ_TIMER0, 3, i + 8); | 1722 | octeon_irq_force_ciu_mapping(ciu_domain, i + OCTEON_IRQ_TIMER0, 3, i + 8); |
1723 | 1723 | ||
1724 | octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_USB0, 3, 44); | 1724 | octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_USB0, 3, 44); |
1725 | 1725 | ||
1726 | for (i = 0; i < 4; i++) | 1726 | for (i = 0; i < 4; i++) |
1727 | octeon_irq_force_ciu_mapping(ciu_domain, i + OCTEON_IRQ_PCI_INT0, 4, i); | 1727 | octeon_irq_force_ciu_mapping(ciu_domain, i + OCTEON_IRQ_PCI_INT0, 4, i); |
1728 | 1728 | ||
1729 | for (i = 0; i < 4; i++) | 1729 | for (i = 0; i < 4; i++) |
1730 | octeon_irq_force_ciu_mapping(ciu_domain, i + OCTEON_IRQ_PCI_MSI0, 4, i + 8); | 1730 | octeon_irq_force_ciu_mapping(ciu_domain, i + OCTEON_IRQ_PCI_MSI0, 4, i + 8); |
1731 | 1731 | ||
1732 | irq_set_chip_and_handler(OCTEON_IRQ_MBOX0, &octeon_irq_chip_ciu2_mbox, handle_percpu_irq); | 1732 | irq_set_chip_and_handler(OCTEON_IRQ_MBOX0, &octeon_irq_chip_ciu2_mbox, handle_percpu_irq); |
1733 | irq_set_chip_and_handler(OCTEON_IRQ_MBOX1, &octeon_irq_chip_ciu2_mbox, handle_percpu_irq); | 1733 | irq_set_chip_and_handler(OCTEON_IRQ_MBOX1, &octeon_irq_chip_ciu2_mbox, handle_percpu_irq); |
1734 | irq_set_chip_and_handler(OCTEON_IRQ_MBOX2, &octeon_irq_chip_ciu2_mbox, handle_percpu_irq); | 1734 | irq_set_chip_and_handler(OCTEON_IRQ_MBOX2, &octeon_irq_chip_ciu2_mbox, handle_percpu_irq); |
1735 | irq_set_chip_and_handler(OCTEON_IRQ_MBOX3, &octeon_irq_chip_ciu2_mbox, handle_percpu_irq); | 1735 | irq_set_chip_and_handler(OCTEON_IRQ_MBOX3, &octeon_irq_chip_ciu2_mbox, handle_percpu_irq); |
1736 | 1736 | ||
1737 | /* Enable the CIU lines */ | 1737 | /* Enable the CIU lines */ |
1738 | set_c0_status(STATUSF_IP3 | STATUSF_IP2); | 1738 | set_c0_status(STATUSF_IP3 | STATUSF_IP2); |
1739 | clear_c0_status(STATUSF_IP4); | 1739 | clear_c0_status(STATUSF_IP4); |
1740 | } | 1740 | } |
1741 | 1741 | ||
1742 | void __init arch_init_irq(void) | 1742 | void __init arch_init_irq(void) |
1743 | { | 1743 | { |
1744 | #ifdef CONFIG_SMP | 1744 | #ifdef CONFIG_SMP |
1745 | /* Set the default affinity to the boot cpu. */ | 1745 | /* Set the default affinity to the boot cpu. */ |
1746 | cpumask_clear(irq_default_affinity); | 1746 | cpumask_clear(irq_default_affinity); |
1747 | cpumask_set_cpu(smp_processor_id(), irq_default_affinity); | 1747 | cpumask_set_cpu(smp_processor_id(), irq_default_affinity); |
1748 | #endif | 1748 | #endif |
1749 | if (OCTEON_IS_MODEL(OCTEON_CN68XX)) | 1749 | if (OCTEON_IS_MODEL(OCTEON_CN68XX)) |
1750 | octeon_irq_init_ciu2(); | 1750 | octeon_irq_init_ciu2(); |
1751 | else | 1751 | else |
1752 | octeon_irq_init_ciu(); | 1752 | octeon_irq_init_ciu(); |
1753 | } | 1753 | } |
1754 | 1754 | ||
1755 | asmlinkage void plat_irq_dispatch(void) | 1755 | asmlinkage void plat_irq_dispatch(void) |
1756 | { | 1756 | { |
1757 | unsigned long cop0_cause; | 1757 | unsigned long cop0_cause; |
1758 | unsigned long cop0_status; | 1758 | unsigned long cop0_status; |
1759 | 1759 | ||
1760 | while (1) { | 1760 | while (1) { |
1761 | cop0_cause = read_c0_cause(); | 1761 | cop0_cause = read_c0_cause(); |
1762 | cop0_status = read_c0_status(); | 1762 | cop0_status = read_c0_status(); |
1763 | cop0_cause &= cop0_status; | 1763 | cop0_cause &= cop0_status; |
1764 | cop0_cause &= ST0_IM; | 1764 | cop0_cause &= ST0_IM; |
1765 | 1765 | ||
1766 | if (unlikely(cop0_cause & STATUSF_IP2)) | 1766 | if (unlikely(cop0_cause & STATUSF_IP2)) |
1767 | octeon_irq_ip2(); | 1767 | octeon_irq_ip2(); |
1768 | else if (unlikely(cop0_cause & STATUSF_IP3)) | 1768 | else if (unlikely(cop0_cause & STATUSF_IP3)) |
1769 | octeon_irq_ip3(); | 1769 | octeon_irq_ip3(); |
1770 | else if (unlikely(cop0_cause & STATUSF_IP4)) | 1770 | else if (unlikely(cop0_cause & STATUSF_IP4)) |
1771 | octeon_irq_ip4(); | 1771 | octeon_irq_ip4(); |
1772 | else if (likely(cop0_cause)) | 1772 | else if (likely(cop0_cause)) |
1773 | do_IRQ(fls(cop0_cause) - 9 + MIPS_CPU_IRQ_BASE); | 1773 | do_IRQ(fls(cop0_cause) - 9 + MIPS_CPU_IRQ_BASE); |
1774 | else | 1774 | else |
1775 | break; | 1775 | break; |
1776 | } | 1776 | } |
1777 | } | 1777 | } |
1778 | 1778 | ||
1779 | #ifdef CONFIG_HOTPLUG_CPU | 1779 | #ifdef CONFIG_HOTPLUG_CPU |
1780 | 1780 | ||
1781 | void octeon_fixup_irqs(void) | 1781 | void octeon_fixup_irqs(void) |
1782 | { | 1782 | { |
1783 | irq_cpu_offline(); | 1783 | irq_cpu_offline(); |
1784 | } | 1784 | } |
1785 | 1785 | ||
1786 | #endif /* CONFIG_HOTPLUG_CPU */ | 1786 | #endif /* CONFIG_HOTPLUG_CPU */ |
1787 | 1787 |
1 | /* interrupt.h */ | 1 | /* interrupt.h */ |
2 | #ifndef _LINUX_INTERRUPT_H | 2 | #ifndef _LINUX_INTERRUPT_H |
3 | #define _LINUX_INTERRUPT_H | 3 | #define _LINUX_INTERRUPT_H |
4 | 4 | ||
5 | #include <linux/kernel.h> | 5 | #include <linux/kernel.h> |
6 | #include <linux/linkage.h> | 6 | #include <linux/linkage.h> |
7 | #include <linux/bitops.h> | 7 | #include <linux/bitops.h> |
8 | #include <linux/preempt.h> | 8 | #include <linux/preempt.h> |
9 | #include <linux/cpumask.h> | 9 | #include <linux/cpumask.h> |
10 | #include <linux/irqreturn.h> | 10 | #include <linux/irqreturn.h> |
11 | #include <linux/irqnr.h> | 11 | #include <linux/irqnr.h> |
12 | #include <linux/hardirq.h> | 12 | #include <linux/hardirq.h> |
13 | #include <linux/irqflags.h> | 13 | #include <linux/irqflags.h> |
14 | #include <linux/hrtimer.h> | 14 | #include <linux/hrtimer.h> |
15 | #include <linux/kref.h> | 15 | #include <linux/kref.h> |
16 | #include <linux/workqueue.h> | 16 | #include <linux/workqueue.h> |
17 | 17 | ||
18 | #include <linux/atomic.h> | 18 | #include <linux/atomic.h> |
19 | #include <asm/ptrace.h> | 19 | #include <asm/ptrace.h> |
20 | #include <asm/irq.h> | 20 | #include <asm/irq.h> |
21 | 21 | ||
22 | /* | 22 | /* |
23 | * These correspond to the IORESOURCE_IRQ_* defines in | 23 | * These correspond to the IORESOURCE_IRQ_* defines in |
24 | * linux/ioport.h to select the interrupt line behaviour. When | 24 | * linux/ioport.h to select the interrupt line behaviour. When |
25 | * requesting an interrupt without specifying a IRQF_TRIGGER, the | 25 | * requesting an interrupt without specifying a IRQF_TRIGGER, the |
26 | * setting should be assumed to be "as already configured", which | 26 | * setting should be assumed to be "as already configured", which |
27 | * may be as per machine or firmware initialisation. | 27 | * may be as per machine or firmware initialisation. |
28 | */ | 28 | */ |
29 | #define IRQF_TRIGGER_NONE 0x00000000 | 29 | #define IRQF_TRIGGER_NONE 0x00000000 |
30 | #define IRQF_TRIGGER_RISING 0x00000001 | 30 | #define IRQF_TRIGGER_RISING 0x00000001 |
31 | #define IRQF_TRIGGER_FALLING 0x00000002 | 31 | #define IRQF_TRIGGER_FALLING 0x00000002 |
32 | #define IRQF_TRIGGER_HIGH 0x00000004 | 32 | #define IRQF_TRIGGER_HIGH 0x00000004 |
33 | #define IRQF_TRIGGER_LOW 0x00000008 | 33 | #define IRQF_TRIGGER_LOW 0x00000008 |
34 | #define IRQF_TRIGGER_MASK (IRQF_TRIGGER_HIGH | IRQF_TRIGGER_LOW | \ | 34 | #define IRQF_TRIGGER_MASK (IRQF_TRIGGER_HIGH | IRQF_TRIGGER_LOW | \ |
35 | IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING) | 35 | IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING) |
36 | #define IRQF_TRIGGER_PROBE 0x00000010 | 36 | #define IRQF_TRIGGER_PROBE 0x00000010 |
37 | 37 | ||
38 | /* | 38 | /* |
39 | * These flags used only by the kernel as part of the | 39 | * These flags used only by the kernel as part of the |
40 | * irq handling routines. | 40 | * irq handling routines. |
41 | * | 41 | * |
42 | * IRQF_DISABLED - keep irqs disabled when calling the action handler. | 42 | * IRQF_DISABLED - keep irqs disabled when calling the action handler. |
43 | * DEPRECATED. This flag is a NOOP and scheduled to be removed | 43 | * DEPRECATED. This flag is a NOOP and scheduled to be removed |
44 | * IRQF_SHARED - allow sharing the irq among several devices | 44 | * IRQF_SHARED - allow sharing the irq among several devices |
45 | * IRQF_PROBE_SHARED - set by callers when they expect sharing mismatches to occur | 45 | * IRQF_PROBE_SHARED - set by callers when they expect sharing mismatches to occur |
46 | * IRQF_TIMER - Flag to mark this interrupt as timer interrupt | 46 | * IRQF_TIMER - Flag to mark this interrupt as timer interrupt |
47 | * IRQF_PERCPU - Interrupt is per cpu | 47 | * IRQF_PERCPU - Interrupt is per cpu |
48 | * IRQF_NOBALANCING - Flag to exclude this interrupt from irq balancing | 48 | * IRQF_NOBALANCING - Flag to exclude this interrupt from irq balancing |
49 | * IRQF_IRQPOLL - Interrupt is used for polling (only the interrupt that is | 49 | * IRQF_IRQPOLL - Interrupt is used for polling (only the interrupt that is |
50 | * registered first in an shared interrupt is considered for | 50 | * registered first in an shared interrupt is considered for |
51 | * performance reasons) | 51 | * performance reasons) |
52 | * IRQF_ONESHOT - Interrupt is not reenabled after the hardirq handler finished. | 52 | * IRQF_ONESHOT - Interrupt is not reenabled after the hardirq handler finished. |
53 | * Used by threaded interrupts which need to keep the | 53 | * Used by threaded interrupts which need to keep the |
54 | * irq line disabled until the threaded handler has been run. | 54 | * irq line disabled until the threaded handler has been run. |
55 | * IRQF_NO_SUSPEND - Do not disable this IRQ during suspend | 55 | * IRQF_NO_SUSPEND - Do not disable this IRQ during suspend |
56 | * IRQF_FORCE_RESUME - Force enable it on resume even if IRQF_NO_SUSPEND is set | 56 | * IRQF_FORCE_RESUME - Force enable it on resume even if IRQF_NO_SUSPEND is set |
57 | * IRQF_NO_THREAD - Interrupt cannot be threaded | 57 | * IRQF_NO_THREAD - Interrupt cannot be threaded |
58 | * IRQF_EARLY_RESUME - Resume IRQ early during syscore instead of at device | 58 | * IRQF_EARLY_RESUME - Resume IRQ early during syscore instead of at device |
59 | * resume time. | 59 | * resume time. |
60 | */ | 60 | */ |
61 | #define IRQF_DISABLED 0x00000020 | 61 | #define IRQF_DISABLED 0x00000020 |
62 | #define IRQF_SHARED 0x00000080 | 62 | #define IRQF_SHARED 0x00000080 |
63 | #define IRQF_PROBE_SHARED 0x00000100 | 63 | #define IRQF_PROBE_SHARED 0x00000100 |
64 | #define __IRQF_TIMER 0x00000200 | 64 | #define __IRQF_TIMER 0x00000200 |
65 | #define IRQF_PERCPU 0x00000400 | 65 | #define IRQF_PERCPU 0x00000400 |
66 | #define IRQF_NOBALANCING 0x00000800 | 66 | #define IRQF_NOBALANCING 0x00000800 |
67 | #define IRQF_IRQPOLL 0x00001000 | 67 | #define IRQF_IRQPOLL 0x00001000 |
68 | #define IRQF_ONESHOT 0x00002000 | 68 | #define IRQF_ONESHOT 0x00002000 |
69 | #define IRQF_NO_SUSPEND 0x00004000 | 69 | #define IRQF_NO_SUSPEND 0x00004000 |
70 | #define IRQF_FORCE_RESUME 0x00008000 | 70 | #define IRQF_FORCE_RESUME 0x00008000 |
71 | #define IRQF_NO_THREAD 0x00010000 | 71 | #define IRQF_NO_THREAD 0x00010000 |
72 | #define IRQF_EARLY_RESUME 0x00020000 | 72 | #define IRQF_EARLY_RESUME 0x00020000 |
73 | 73 | ||
74 | #define IRQF_TIMER (__IRQF_TIMER | IRQF_NO_SUSPEND | IRQF_NO_THREAD) | 74 | #define IRQF_TIMER (__IRQF_TIMER | IRQF_NO_SUSPEND | IRQF_NO_THREAD) |
75 | 75 | ||
76 | /* | 76 | /* |
77 | * These values can be returned by request_any_context_irq() and | 77 | * These values can be returned by request_any_context_irq() and |
78 | * describe the context the interrupt will be run in. | 78 | * describe the context the interrupt will be run in. |
79 | * | 79 | * |
80 | * IRQC_IS_HARDIRQ - interrupt runs in hardirq context | 80 | * IRQC_IS_HARDIRQ - interrupt runs in hardirq context |
81 | * IRQC_IS_NESTED - interrupt runs in a nested threaded context | 81 | * IRQC_IS_NESTED - interrupt runs in a nested threaded context |
82 | */ | 82 | */ |
83 | enum { | 83 | enum { |
84 | IRQC_IS_HARDIRQ = 0, | 84 | IRQC_IS_HARDIRQ = 0, |
85 | IRQC_IS_NESTED, | 85 | IRQC_IS_NESTED, |
86 | }; | 86 | }; |
87 | 87 | ||
88 | typedef irqreturn_t (*irq_handler_t)(int, void *); | 88 | typedef irqreturn_t (*irq_handler_t)(int, void *); |
89 | 89 | ||
90 | /** | 90 | /** |
91 | * struct irqaction - per interrupt action descriptor | 91 | * struct irqaction - per interrupt action descriptor |
92 | * @handler: interrupt handler function | 92 | * @handler: interrupt handler function |
93 | * @name: name of the device | 93 | * @name: name of the device |
94 | * @dev_id: cookie to identify the device | 94 | * @dev_id: cookie to identify the device |
95 | * @percpu_dev_id: cookie to identify the device | 95 | * @percpu_dev_id: cookie to identify the device |
96 | * @next: pointer to the next irqaction for shared interrupts | 96 | * @next: pointer to the next irqaction for shared interrupts |
97 | * @irq: interrupt number | 97 | * @irq: interrupt number |
98 | * @flags: flags (see IRQF_* above) | 98 | * @flags: flags (see IRQF_* above) |
99 | * @thread_fn: interrupt handler function for threaded interrupts | 99 | * @thread_fn: interrupt handler function for threaded interrupts |
100 | * @thread: thread pointer for threaded interrupts | 100 | * @thread: thread pointer for threaded interrupts |
101 | * @thread_flags: flags related to @thread | 101 | * @thread_flags: flags related to @thread |
102 | * @thread_mask: bitmask for keeping track of @thread activity | 102 | * @thread_mask: bitmask for keeping track of @thread activity |
103 | * @dir: pointer to the proc/irq/NN/name entry | 103 | * @dir: pointer to the proc/irq/NN/name entry |
104 | */ | 104 | */ |
105 | struct irqaction { | 105 | struct irqaction { |
106 | irq_handler_t handler; | 106 | irq_handler_t handler; |
107 | void *dev_id; | 107 | void *dev_id; |
108 | void __percpu *percpu_dev_id; | 108 | void __percpu *percpu_dev_id; |
109 | struct irqaction *next; | 109 | struct irqaction *next; |
110 | irq_handler_t thread_fn; | 110 | irq_handler_t thread_fn; |
111 | struct task_struct *thread; | 111 | struct task_struct *thread; |
112 | unsigned int irq; | 112 | unsigned int irq; |
113 | unsigned int flags; | 113 | unsigned int flags; |
114 | unsigned long thread_flags; | 114 | unsigned long thread_flags; |
115 | unsigned long thread_mask; | 115 | unsigned long thread_mask; |
116 | const char *name; | 116 | const char *name; |
117 | struct proc_dir_entry *dir; | 117 | struct proc_dir_entry *dir; |
118 | } ____cacheline_internodealigned_in_smp; | 118 | } ____cacheline_internodealigned_in_smp; |
119 | 119 | ||
120 | extern irqreturn_t no_action(int cpl, void *dev_id); | 120 | extern irqreturn_t no_action(int cpl, void *dev_id); |
121 | 121 | ||
122 | extern int __must_check | 122 | extern int __must_check |
123 | request_threaded_irq(unsigned int irq, irq_handler_t handler, | 123 | request_threaded_irq(unsigned int irq, irq_handler_t handler, |
124 | irq_handler_t thread_fn, | 124 | irq_handler_t thread_fn, |
125 | unsigned long flags, const char *name, void *dev); | 125 | unsigned long flags, const char *name, void *dev); |
126 | 126 | ||
127 | static inline int __must_check | 127 | static inline int __must_check |
128 | request_irq(unsigned int irq, irq_handler_t handler, unsigned long flags, | 128 | request_irq(unsigned int irq, irq_handler_t handler, unsigned long flags, |
129 | const char *name, void *dev) | 129 | const char *name, void *dev) |
130 | { | 130 | { |
131 | return request_threaded_irq(irq, handler, NULL, flags, name, dev); | 131 | return request_threaded_irq(irq, handler, NULL, flags, name, dev); |
132 | } | 132 | } |
133 | 133 | ||
134 | extern int __must_check | 134 | extern int __must_check |
135 | request_any_context_irq(unsigned int irq, irq_handler_t handler, | 135 | request_any_context_irq(unsigned int irq, irq_handler_t handler, |
136 | unsigned long flags, const char *name, void *dev_id); | 136 | unsigned long flags, const char *name, void *dev_id); |
137 | 137 | ||
138 | extern int __must_check | 138 | extern int __must_check |
139 | request_percpu_irq(unsigned int irq, irq_handler_t handler, | 139 | request_percpu_irq(unsigned int irq, irq_handler_t handler, |
140 | const char *devname, void __percpu *percpu_dev_id); | 140 | const char *devname, void __percpu *percpu_dev_id); |
141 | 141 | ||
142 | extern void free_irq(unsigned int, void *); | 142 | extern void free_irq(unsigned int, void *); |
143 | extern void free_percpu_irq(unsigned int, void __percpu *); | 143 | extern void free_percpu_irq(unsigned int, void __percpu *); |
144 | 144 | ||
145 | struct device; | 145 | struct device; |
146 | 146 | ||
147 | extern int __must_check | 147 | extern int __must_check |
148 | devm_request_threaded_irq(struct device *dev, unsigned int irq, | 148 | devm_request_threaded_irq(struct device *dev, unsigned int irq, |
149 | irq_handler_t handler, irq_handler_t thread_fn, | 149 | irq_handler_t handler, irq_handler_t thread_fn, |
150 | unsigned long irqflags, const char *devname, | 150 | unsigned long irqflags, const char *devname, |
151 | void *dev_id); | 151 | void *dev_id); |
152 | 152 | ||
153 | static inline int __must_check | 153 | static inline int __must_check |
154 | devm_request_irq(struct device *dev, unsigned int irq, irq_handler_t handler, | 154 | devm_request_irq(struct device *dev, unsigned int irq, irq_handler_t handler, |
155 | unsigned long irqflags, const char *devname, void *dev_id) | 155 | unsigned long irqflags, const char *devname, void *dev_id) |
156 | { | 156 | { |
157 | return devm_request_threaded_irq(dev, irq, handler, NULL, irqflags, | 157 | return devm_request_threaded_irq(dev, irq, handler, NULL, irqflags, |
158 | devname, dev_id); | 158 | devname, dev_id); |
159 | } | 159 | } |
160 | 160 | ||
161 | extern int __must_check | 161 | extern int __must_check |
162 | devm_request_any_context_irq(struct device *dev, unsigned int irq, | 162 | devm_request_any_context_irq(struct device *dev, unsigned int irq, |
163 | irq_handler_t handler, unsigned long irqflags, | 163 | irq_handler_t handler, unsigned long irqflags, |
164 | const char *devname, void *dev_id); | 164 | const char *devname, void *dev_id); |
165 | 165 | ||
166 | extern void devm_free_irq(struct device *dev, unsigned int irq, void *dev_id); | 166 | extern void devm_free_irq(struct device *dev, unsigned int irq, void *dev_id); |
167 | 167 | ||
168 | /* | 168 | /* |
169 | * On lockdep we dont want to enable hardirqs in hardirq | 169 | * On lockdep we dont want to enable hardirqs in hardirq |
170 | * context. Use local_irq_enable_in_hardirq() to annotate | 170 | * context. Use local_irq_enable_in_hardirq() to annotate |
171 | * kernel code that has to do this nevertheless (pretty much | 171 | * kernel code that has to do this nevertheless (pretty much |
172 | * the only valid case is for old/broken hardware that is | 172 | * the only valid case is for old/broken hardware that is |
173 | * insanely slow). | 173 | * insanely slow). |
174 | * | 174 | * |
175 | * NOTE: in theory this might break fragile code that relies | 175 | * NOTE: in theory this might break fragile code that relies |
176 | * on hardirq delivery - in practice we dont seem to have such | 176 | * on hardirq delivery - in practice we dont seem to have such |
177 | * places left. So the only effect should be slightly increased | 177 | * places left. So the only effect should be slightly increased |
178 | * irqs-off latencies. | 178 | * irqs-off latencies. |
179 | */ | 179 | */ |
180 | #ifdef CONFIG_LOCKDEP | 180 | #ifdef CONFIG_LOCKDEP |
181 | # define local_irq_enable_in_hardirq() do { } while (0) | 181 | # define local_irq_enable_in_hardirq() do { } while (0) |
182 | #else | 182 | #else |
183 | # define local_irq_enable_in_hardirq() local_irq_enable() | 183 | # define local_irq_enable_in_hardirq() local_irq_enable() |
184 | #endif | 184 | #endif |
185 | 185 | ||
186 | extern void disable_irq_nosync(unsigned int irq); | 186 | extern void disable_irq_nosync(unsigned int irq); |
187 | extern void disable_irq(unsigned int irq); | 187 | extern void disable_irq(unsigned int irq); |
188 | extern void disable_percpu_irq(unsigned int irq); | 188 | extern void disable_percpu_irq(unsigned int irq); |
189 | extern void enable_irq(unsigned int irq); | 189 | extern void enable_irq(unsigned int irq); |
190 | extern void enable_percpu_irq(unsigned int irq, unsigned int type); | 190 | extern void enable_percpu_irq(unsigned int irq, unsigned int type); |
191 | extern void irq_wake_thread(unsigned int irq, void *dev_id); | 191 | extern void irq_wake_thread(unsigned int irq, void *dev_id); |
192 | 192 | ||
193 | /* The following three functions are for the core kernel use only. */ | 193 | /* The following three functions are for the core kernel use only. */ |
194 | extern void suspend_device_irqs(void); | 194 | extern void suspend_device_irqs(void); |
195 | extern void resume_device_irqs(void); | 195 | extern void resume_device_irqs(void); |
196 | #ifdef CONFIG_PM_SLEEP | 196 | #ifdef CONFIG_PM_SLEEP |
197 | extern int check_wakeup_irqs(void); | 197 | extern int check_wakeup_irqs(void); |
198 | #else | 198 | #else |
199 | static inline int check_wakeup_irqs(void) { return 0; } | 199 | static inline int check_wakeup_irqs(void) { return 0; } |
200 | #endif | 200 | #endif |
201 | 201 | ||
202 | #if defined(CONFIG_SMP) | 202 | #if defined(CONFIG_SMP) |
203 | 203 | ||
204 | extern cpumask_var_t irq_default_affinity; | 204 | extern cpumask_var_t irq_default_affinity; |
205 | 205 | ||
206 | extern int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask); | 206 | /* Internal implementation. Use the helpers below */ |
207 | extern int __irq_set_affinity(unsigned int irq, const struct cpumask *cpumask, | ||
208 | bool force); | ||
209 | |||
210 | /** | ||
211 | * irq_set_affinity - Set the irq affinity of a given irq | ||
212 | * @irq: Interrupt to set affinity | ||
213 | * @mask: cpumask | ||
214 | * | ||
215 | * Fails if cpumask does not contain an online CPU | ||
216 | */ | ||
217 | static inline int | ||
218 | irq_set_affinity(unsigned int irq, const struct cpumask *cpumask) | ||
219 | { | ||
220 | return __irq_set_affinity(irq, cpumask, false); | ||
221 | } | ||
222 | |||
223 | /** | ||
224 | * irq_force_affinity - Force the irq affinity of a given irq | ||
225 | * @irq: Interrupt to set affinity | ||
226 | * @mask: cpumask | ||
227 | * | ||
228 | * Same as irq_set_affinity, but without checking the mask against | ||
229 | * online cpus. | ||
230 | * | ||
231 | * Solely for low level cpu hotplug code, where we need to make per | ||
232 | * cpu interrupts affine before the cpu becomes online. | ||
233 | */ | ||
234 | static inline int | ||
235 | irq_force_affinity(unsigned int irq, const struct cpumask *cpumask) | ||
236 | { | ||
237 | return __irq_set_affinity(irq, cpumask, true); | ||
238 | } | ||
239 | |||
207 | extern int irq_can_set_affinity(unsigned int irq); | 240 | extern int irq_can_set_affinity(unsigned int irq); |
208 | extern int irq_select_affinity(unsigned int irq); | 241 | extern int irq_select_affinity(unsigned int irq); |
209 | 242 | ||
210 | extern int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m); | 243 | extern int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m); |
211 | 244 | ||
212 | /** | 245 | /** |
213 | * struct irq_affinity_notify - context for notification of IRQ affinity changes | 246 | * struct irq_affinity_notify - context for notification of IRQ affinity changes |
214 | * @irq: Interrupt to which notification applies | 247 | * @irq: Interrupt to which notification applies |
215 | * @kref: Reference count, for internal use | 248 | * @kref: Reference count, for internal use |
216 | * @work: Work item, for internal use | 249 | * @work: Work item, for internal use |
217 | * @notify: Function to be called on change. This will be | 250 | * @notify: Function to be called on change. This will be |
218 | * called in process context. | 251 | * called in process context. |
219 | * @release: Function to be called on release. This will be | 252 | * @release: Function to be called on release. This will be |
220 | * called in process context. Once registered, the | 253 | * called in process context. Once registered, the |
221 | * structure must only be freed when this function is | 254 | * structure must only be freed when this function is |
222 | * called or later. | 255 | * called or later. |
223 | */ | 256 | */ |
224 | struct irq_affinity_notify { | 257 | struct irq_affinity_notify { |
225 | unsigned int irq; | 258 | unsigned int irq; |
226 | struct kref kref; | 259 | struct kref kref; |
227 | struct work_struct work; | 260 | struct work_struct work; |
228 | void (*notify)(struct irq_affinity_notify *, const cpumask_t *mask); | 261 | void (*notify)(struct irq_affinity_notify *, const cpumask_t *mask); |
229 | void (*release)(struct kref *ref); | 262 | void (*release)(struct kref *ref); |
230 | }; | 263 | }; |
231 | 264 | ||
232 | extern int | 265 | extern int |
233 | irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify); | 266 | irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify); |
234 | 267 | ||
235 | #else /* CONFIG_SMP */ | 268 | #else /* CONFIG_SMP */ |
236 | 269 | ||
237 | static inline int irq_set_affinity(unsigned int irq, const struct cpumask *m) | 270 | static inline int irq_set_affinity(unsigned int irq, const struct cpumask *m) |
238 | { | 271 | { |
239 | return -EINVAL; | 272 | return -EINVAL; |
240 | } | 273 | } |
241 | 274 | ||
242 | static inline int irq_can_set_affinity(unsigned int irq) | 275 | static inline int irq_can_set_affinity(unsigned int irq) |
243 | { | 276 | { |
244 | return 0; | 277 | return 0; |
245 | } | 278 | } |
246 | 279 | ||
247 | static inline int irq_select_affinity(unsigned int irq) { return 0; } | 280 | static inline int irq_select_affinity(unsigned int irq) { return 0; } |
248 | 281 | ||
249 | static inline int irq_set_affinity_hint(unsigned int irq, | 282 | static inline int irq_set_affinity_hint(unsigned int irq, |
250 | const struct cpumask *m) | 283 | const struct cpumask *m) |
251 | { | 284 | { |
252 | return -EINVAL; | 285 | return -EINVAL; |
253 | } | 286 | } |
254 | #endif /* CONFIG_SMP */ | 287 | #endif /* CONFIG_SMP */ |
255 | 288 | ||
256 | /* | 289 | /* |
257 | * Special lockdep variants of irq disabling/enabling. | 290 | * Special lockdep variants of irq disabling/enabling. |
258 | * These should be used for locking constructs that | 291 | * These should be used for locking constructs that |
259 | * know that a particular irq context which is disabled, | 292 | * know that a particular irq context which is disabled, |
260 | * and which is the only irq-context user of a lock, | 293 | * and which is the only irq-context user of a lock, |
261 | * that it's safe to take the lock in the irq-disabled | 294 | * that it's safe to take the lock in the irq-disabled |
262 | * section without disabling hardirqs. | 295 | * section without disabling hardirqs. |
263 | * | 296 | * |
264 | * On !CONFIG_LOCKDEP they are equivalent to the normal | 297 | * On !CONFIG_LOCKDEP they are equivalent to the normal |
265 | * irq disable/enable methods. | 298 | * irq disable/enable methods. |
266 | */ | 299 | */ |
267 | static inline void disable_irq_nosync_lockdep(unsigned int irq) | 300 | static inline void disable_irq_nosync_lockdep(unsigned int irq) |
268 | { | 301 | { |
269 | disable_irq_nosync(irq); | 302 | disable_irq_nosync(irq); |
270 | #ifdef CONFIG_LOCKDEP | 303 | #ifdef CONFIG_LOCKDEP |
271 | local_irq_disable(); | 304 | local_irq_disable(); |
272 | #endif | 305 | #endif |
273 | } | 306 | } |
274 | 307 | ||
275 | static inline void disable_irq_nosync_lockdep_irqsave(unsigned int irq, unsigned long *flags) | 308 | static inline void disable_irq_nosync_lockdep_irqsave(unsigned int irq, unsigned long *flags) |
276 | { | 309 | { |
277 | disable_irq_nosync(irq); | 310 | disable_irq_nosync(irq); |
278 | #ifdef CONFIG_LOCKDEP | 311 | #ifdef CONFIG_LOCKDEP |
279 | local_irq_save(*flags); | 312 | local_irq_save(*flags); |
280 | #endif | 313 | #endif |
281 | } | 314 | } |
282 | 315 | ||
283 | static inline void disable_irq_lockdep(unsigned int irq) | 316 | static inline void disable_irq_lockdep(unsigned int irq) |
284 | { | 317 | { |
285 | disable_irq(irq); | 318 | disable_irq(irq); |
286 | #ifdef CONFIG_LOCKDEP | 319 | #ifdef CONFIG_LOCKDEP |
287 | local_irq_disable(); | 320 | local_irq_disable(); |
288 | #endif | 321 | #endif |
289 | } | 322 | } |
290 | 323 | ||
291 | static inline void enable_irq_lockdep(unsigned int irq) | 324 | static inline void enable_irq_lockdep(unsigned int irq) |
292 | { | 325 | { |
293 | #ifdef CONFIG_LOCKDEP | 326 | #ifdef CONFIG_LOCKDEP |
294 | local_irq_enable(); | 327 | local_irq_enable(); |
295 | #endif | 328 | #endif |
296 | enable_irq(irq); | 329 | enable_irq(irq); |
297 | } | 330 | } |
298 | 331 | ||
299 | static inline void enable_irq_lockdep_irqrestore(unsigned int irq, unsigned long *flags) | 332 | static inline void enable_irq_lockdep_irqrestore(unsigned int irq, unsigned long *flags) |
300 | { | 333 | { |
301 | #ifdef CONFIG_LOCKDEP | 334 | #ifdef CONFIG_LOCKDEP |
302 | local_irq_restore(*flags); | 335 | local_irq_restore(*flags); |
303 | #endif | 336 | #endif |
304 | enable_irq(irq); | 337 | enable_irq(irq); |
305 | } | 338 | } |
306 | 339 | ||
307 | /* IRQ wakeup (PM) control: */ | 340 | /* IRQ wakeup (PM) control: */ |
308 | extern int irq_set_irq_wake(unsigned int irq, unsigned int on); | 341 | extern int irq_set_irq_wake(unsigned int irq, unsigned int on); |
309 | 342 | ||
310 | static inline int enable_irq_wake(unsigned int irq) | 343 | static inline int enable_irq_wake(unsigned int irq) |
311 | { | 344 | { |
312 | return irq_set_irq_wake(irq, 1); | 345 | return irq_set_irq_wake(irq, 1); |
313 | } | 346 | } |
314 | 347 | ||
315 | static inline int disable_irq_wake(unsigned int irq) | 348 | static inline int disable_irq_wake(unsigned int irq) |
316 | { | 349 | { |
317 | return irq_set_irq_wake(irq, 0); | 350 | return irq_set_irq_wake(irq, 0); |
318 | } | 351 | } |
319 | 352 | ||
320 | 353 | ||
321 | #ifdef CONFIG_IRQ_FORCED_THREADING | 354 | #ifdef CONFIG_IRQ_FORCED_THREADING |
322 | extern bool force_irqthreads; | 355 | extern bool force_irqthreads; |
323 | #else | 356 | #else |
324 | #define force_irqthreads (0) | 357 | #define force_irqthreads (0) |
325 | #endif | 358 | #endif |
326 | 359 | ||
327 | #ifndef __ARCH_SET_SOFTIRQ_PENDING | 360 | #ifndef __ARCH_SET_SOFTIRQ_PENDING |
328 | #define set_softirq_pending(x) (local_softirq_pending() = (x)) | 361 | #define set_softirq_pending(x) (local_softirq_pending() = (x)) |
329 | #define or_softirq_pending(x) (local_softirq_pending() |= (x)) | 362 | #define or_softirq_pending(x) (local_softirq_pending() |= (x)) |
330 | #endif | 363 | #endif |
331 | 364 | ||
332 | /* Some architectures might implement lazy enabling/disabling of | 365 | /* Some architectures might implement lazy enabling/disabling of |
333 | * interrupts. In some cases, such as stop_machine, we might want | 366 | * interrupts. In some cases, such as stop_machine, we might want |
334 | * to ensure that after a local_irq_disable(), interrupts have | 367 | * to ensure that after a local_irq_disable(), interrupts have |
335 | * really been disabled in hardware. Such architectures need to | 368 | * really been disabled in hardware. Such architectures need to |
336 | * implement the following hook. | 369 | * implement the following hook. |
337 | */ | 370 | */ |
338 | #ifndef hard_irq_disable | 371 | #ifndef hard_irq_disable |
339 | #define hard_irq_disable() do { } while(0) | 372 | #define hard_irq_disable() do { } while(0) |
340 | #endif | 373 | #endif |
341 | 374 | ||
342 | /* PLEASE, avoid to allocate new softirqs, if you need not _really_ high | 375 | /* PLEASE, avoid to allocate new softirqs, if you need not _really_ high |
343 | frequency threaded job scheduling. For almost all the purposes | 376 | frequency threaded job scheduling. For almost all the purposes |
344 | tasklets are more than enough. F.e. all serial device BHs et | 377 | tasklets are more than enough. F.e. all serial device BHs et |
345 | al. should be converted to tasklets, not to softirqs. | 378 | al. should be converted to tasklets, not to softirqs. |
346 | */ | 379 | */ |
347 | 380 | ||
348 | enum | 381 | enum |
349 | { | 382 | { |
350 | HI_SOFTIRQ=0, | 383 | HI_SOFTIRQ=0, |
351 | TIMER_SOFTIRQ, | 384 | TIMER_SOFTIRQ, |
352 | NET_TX_SOFTIRQ, | 385 | NET_TX_SOFTIRQ, |
353 | NET_RX_SOFTIRQ, | 386 | NET_RX_SOFTIRQ, |
354 | BLOCK_SOFTIRQ, | 387 | BLOCK_SOFTIRQ, |
355 | BLOCK_IOPOLL_SOFTIRQ, | 388 | BLOCK_IOPOLL_SOFTIRQ, |
356 | TASKLET_SOFTIRQ, | 389 | TASKLET_SOFTIRQ, |
357 | SCHED_SOFTIRQ, | 390 | SCHED_SOFTIRQ, |
358 | HRTIMER_SOFTIRQ, | 391 | HRTIMER_SOFTIRQ, |
359 | RCU_SOFTIRQ, /* Preferable RCU should always be the last softirq */ | 392 | RCU_SOFTIRQ, /* Preferable RCU should always be the last softirq */ |
360 | 393 | ||
361 | NR_SOFTIRQS | 394 | NR_SOFTIRQS |
362 | }; | 395 | }; |
363 | 396 | ||
364 | #define SOFTIRQ_STOP_IDLE_MASK (~(1 << RCU_SOFTIRQ)) | 397 | #define SOFTIRQ_STOP_IDLE_MASK (~(1 << RCU_SOFTIRQ)) |
365 | 398 | ||
366 | /* map softirq index to softirq name. update 'softirq_to_name' in | 399 | /* map softirq index to softirq name. update 'softirq_to_name' in |
367 | * kernel/softirq.c when adding a new softirq. | 400 | * kernel/softirq.c when adding a new softirq. |
368 | */ | 401 | */ |
369 | extern const char * const softirq_to_name[NR_SOFTIRQS]; | 402 | extern const char * const softirq_to_name[NR_SOFTIRQS]; |
370 | 403 | ||
371 | /* softirq mask and active fields moved to irq_cpustat_t in | 404 | /* softirq mask and active fields moved to irq_cpustat_t in |
372 | * asm/hardirq.h to get better cache usage. KAO | 405 | * asm/hardirq.h to get better cache usage. KAO |
373 | */ | 406 | */ |
374 | 407 | ||
375 | struct softirq_action | 408 | struct softirq_action |
376 | { | 409 | { |
377 | void (*action)(struct softirq_action *); | 410 | void (*action)(struct softirq_action *); |
378 | }; | 411 | }; |
379 | 412 | ||
380 | asmlinkage void do_softirq(void); | 413 | asmlinkage void do_softirq(void); |
381 | asmlinkage void __do_softirq(void); | 414 | asmlinkage void __do_softirq(void); |
382 | 415 | ||
383 | #ifdef __ARCH_HAS_DO_SOFTIRQ | 416 | #ifdef __ARCH_HAS_DO_SOFTIRQ |
384 | void do_softirq_own_stack(void); | 417 | void do_softirq_own_stack(void); |
385 | #else | 418 | #else |
386 | static inline void do_softirq_own_stack(void) | 419 | static inline void do_softirq_own_stack(void) |
387 | { | 420 | { |
388 | __do_softirq(); | 421 | __do_softirq(); |
389 | } | 422 | } |
390 | #endif | 423 | #endif |
391 | 424 | ||
392 | extern void open_softirq(int nr, void (*action)(struct softirq_action *)); | 425 | extern void open_softirq(int nr, void (*action)(struct softirq_action *)); |
393 | extern void softirq_init(void); | 426 | extern void softirq_init(void); |
394 | extern void __raise_softirq_irqoff(unsigned int nr); | 427 | extern void __raise_softirq_irqoff(unsigned int nr); |
395 | 428 | ||
396 | extern void raise_softirq_irqoff(unsigned int nr); | 429 | extern void raise_softirq_irqoff(unsigned int nr); |
397 | extern void raise_softirq(unsigned int nr); | 430 | extern void raise_softirq(unsigned int nr); |
398 | 431 | ||
399 | DECLARE_PER_CPU(struct task_struct *, ksoftirqd); | 432 | DECLARE_PER_CPU(struct task_struct *, ksoftirqd); |
400 | 433 | ||
401 | static inline struct task_struct *this_cpu_ksoftirqd(void) | 434 | static inline struct task_struct *this_cpu_ksoftirqd(void) |
402 | { | 435 | { |
403 | return this_cpu_read(ksoftirqd); | 436 | return this_cpu_read(ksoftirqd); |
404 | } | 437 | } |
405 | 438 | ||
406 | /* Tasklets --- multithreaded analogue of BHs. | 439 | /* Tasklets --- multithreaded analogue of BHs. |
407 | 440 | ||
408 | Main feature differing them of generic softirqs: tasklet | 441 | Main feature differing them of generic softirqs: tasklet |
409 | is running only on one CPU simultaneously. | 442 | is running only on one CPU simultaneously. |
410 | 443 | ||
411 | Main feature differing them of BHs: different tasklets | 444 | Main feature differing them of BHs: different tasklets |
412 | may be run simultaneously on different CPUs. | 445 | may be run simultaneously on different CPUs. |
413 | 446 | ||
414 | Properties: | 447 | Properties: |
415 | * If tasklet_schedule() is called, then tasklet is guaranteed | 448 | * If tasklet_schedule() is called, then tasklet is guaranteed |
416 | to be executed on some cpu at least once after this. | 449 | to be executed on some cpu at least once after this. |
417 | * If the tasklet is already scheduled, but its execution is still not | 450 | * If the tasklet is already scheduled, but its execution is still not |
418 | started, it will be executed only once. | 451 | started, it will be executed only once. |
419 | * If this tasklet is already running on another CPU (or schedule is called | 452 | * If this tasklet is already running on another CPU (or schedule is called |
420 | from tasklet itself), it is rescheduled for later. | 453 | from tasklet itself), it is rescheduled for later. |
421 | * Tasklet is strictly serialized wrt itself, but not | 454 | * Tasklet is strictly serialized wrt itself, but not |
422 | wrt another tasklets. If client needs some intertask synchronization, | 455 | wrt another tasklets. If client needs some intertask synchronization, |
423 | he makes it with spinlocks. | 456 | he makes it with spinlocks. |
424 | */ | 457 | */ |
425 | 458 | ||
426 | struct tasklet_struct | 459 | struct tasklet_struct |
427 | { | 460 | { |
428 | struct tasklet_struct *next; | 461 | struct tasklet_struct *next; |
429 | unsigned long state; | 462 | unsigned long state; |
430 | atomic_t count; | 463 | atomic_t count; |
431 | void (*func)(unsigned long); | 464 | void (*func)(unsigned long); |
432 | unsigned long data; | 465 | unsigned long data; |
433 | }; | 466 | }; |
434 | 467 | ||
435 | #define DECLARE_TASKLET(name, func, data) \ | 468 | #define DECLARE_TASKLET(name, func, data) \ |
436 | struct tasklet_struct name = { NULL, 0, ATOMIC_INIT(0), func, data } | 469 | struct tasklet_struct name = { NULL, 0, ATOMIC_INIT(0), func, data } |
437 | 470 | ||
438 | #define DECLARE_TASKLET_DISABLED(name, func, data) \ | 471 | #define DECLARE_TASKLET_DISABLED(name, func, data) \ |
439 | struct tasklet_struct name = { NULL, 0, ATOMIC_INIT(1), func, data } | 472 | struct tasklet_struct name = { NULL, 0, ATOMIC_INIT(1), func, data } |
440 | 473 | ||
441 | 474 | ||
442 | enum | 475 | enum |
443 | { | 476 | { |
444 | TASKLET_STATE_SCHED, /* Tasklet is scheduled for execution */ | 477 | TASKLET_STATE_SCHED, /* Tasklet is scheduled for execution */ |
445 | TASKLET_STATE_RUN /* Tasklet is running (SMP only) */ | 478 | TASKLET_STATE_RUN /* Tasklet is running (SMP only) */ |
446 | }; | 479 | }; |
447 | 480 | ||
448 | #ifdef CONFIG_SMP | 481 | #ifdef CONFIG_SMP |
449 | static inline int tasklet_trylock(struct tasklet_struct *t) | 482 | static inline int tasklet_trylock(struct tasklet_struct *t) |
450 | { | 483 | { |
451 | return !test_and_set_bit(TASKLET_STATE_RUN, &(t)->state); | 484 | return !test_and_set_bit(TASKLET_STATE_RUN, &(t)->state); |
452 | } | 485 | } |
453 | 486 | ||
454 | static inline void tasklet_unlock(struct tasklet_struct *t) | 487 | static inline void tasklet_unlock(struct tasklet_struct *t) |
455 | { | 488 | { |
456 | smp_mb__before_clear_bit(); | 489 | smp_mb__before_clear_bit(); |
457 | clear_bit(TASKLET_STATE_RUN, &(t)->state); | 490 | clear_bit(TASKLET_STATE_RUN, &(t)->state); |
458 | } | 491 | } |
459 | 492 | ||
460 | static inline void tasklet_unlock_wait(struct tasklet_struct *t) | 493 | static inline void tasklet_unlock_wait(struct tasklet_struct *t) |
461 | { | 494 | { |
462 | while (test_bit(TASKLET_STATE_RUN, &(t)->state)) { barrier(); } | 495 | while (test_bit(TASKLET_STATE_RUN, &(t)->state)) { barrier(); } |
463 | } | 496 | } |
464 | #else | 497 | #else |
465 | #define tasklet_trylock(t) 1 | 498 | #define tasklet_trylock(t) 1 |
466 | #define tasklet_unlock_wait(t) do { } while (0) | 499 | #define tasklet_unlock_wait(t) do { } while (0) |
467 | #define tasklet_unlock(t) do { } while (0) | 500 | #define tasklet_unlock(t) do { } while (0) |
468 | #endif | 501 | #endif |
469 | 502 | ||
470 | extern void __tasklet_schedule(struct tasklet_struct *t); | 503 | extern void __tasklet_schedule(struct tasklet_struct *t); |
471 | 504 | ||
472 | static inline void tasklet_schedule(struct tasklet_struct *t) | 505 | static inline void tasklet_schedule(struct tasklet_struct *t) |
473 | { | 506 | { |
474 | if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) | 507 | if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) |
475 | __tasklet_schedule(t); | 508 | __tasklet_schedule(t); |
476 | } | 509 | } |
477 | 510 | ||
478 | extern void __tasklet_hi_schedule(struct tasklet_struct *t); | 511 | extern void __tasklet_hi_schedule(struct tasklet_struct *t); |
479 | 512 | ||
480 | static inline void tasklet_hi_schedule(struct tasklet_struct *t) | 513 | static inline void tasklet_hi_schedule(struct tasklet_struct *t) |
481 | { | 514 | { |
482 | if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) | 515 | if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) |
483 | __tasklet_hi_schedule(t); | 516 | __tasklet_hi_schedule(t); |
484 | } | 517 | } |
485 | 518 | ||
486 | extern void __tasklet_hi_schedule_first(struct tasklet_struct *t); | 519 | extern void __tasklet_hi_schedule_first(struct tasklet_struct *t); |
487 | 520 | ||
488 | /* | 521 | /* |
489 | * This version avoids touching any other tasklets. Needed for kmemcheck | 522 | * This version avoids touching any other tasklets. Needed for kmemcheck |
490 | * in order not to take any page faults while enqueueing this tasklet; | 523 | * in order not to take any page faults while enqueueing this tasklet; |
491 | * consider VERY carefully whether you really need this or | 524 | * consider VERY carefully whether you really need this or |
492 | * tasklet_hi_schedule()... | 525 | * tasklet_hi_schedule()... |
493 | */ | 526 | */ |
494 | static inline void tasklet_hi_schedule_first(struct tasklet_struct *t) | 527 | static inline void tasklet_hi_schedule_first(struct tasklet_struct *t) |
495 | { | 528 | { |
496 | if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) | 529 | if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) |
497 | __tasklet_hi_schedule_first(t); | 530 | __tasklet_hi_schedule_first(t); |
498 | } | 531 | } |
499 | 532 | ||
500 | 533 | ||
501 | static inline void tasklet_disable_nosync(struct tasklet_struct *t) | 534 | static inline void tasklet_disable_nosync(struct tasklet_struct *t) |
502 | { | 535 | { |
503 | atomic_inc(&t->count); | 536 | atomic_inc(&t->count); |
504 | smp_mb__after_atomic_inc(); | 537 | smp_mb__after_atomic_inc(); |
505 | } | 538 | } |
506 | 539 | ||
507 | static inline void tasklet_disable(struct tasklet_struct *t) | 540 | static inline void tasklet_disable(struct tasklet_struct *t) |
508 | { | 541 | { |
509 | tasklet_disable_nosync(t); | 542 | tasklet_disable_nosync(t); |
510 | tasklet_unlock_wait(t); | 543 | tasklet_unlock_wait(t); |
511 | smp_mb(); | 544 | smp_mb(); |
512 | } | 545 | } |
513 | 546 | ||
514 | static inline void tasklet_enable(struct tasklet_struct *t) | 547 | static inline void tasklet_enable(struct tasklet_struct *t) |
515 | { | 548 | { |
516 | smp_mb__before_atomic_dec(); | 549 | smp_mb__before_atomic_dec(); |
517 | atomic_dec(&t->count); | 550 | atomic_dec(&t->count); |
518 | } | 551 | } |
519 | 552 | ||
520 | static inline void tasklet_hi_enable(struct tasklet_struct *t) | 553 | static inline void tasklet_hi_enable(struct tasklet_struct *t) |
521 | { | 554 | { |
522 | smp_mb__before_atomic_dec(); | 555 | smp_mb__before_atomic_dec(); |
523 | atomic_dec(&t->count); | 556 | atomic_dec(&t->count); |
524 | } | 557 | } |
525 | 558 | ||
526 | extern void tasklet_kill(struct tasklet_struct *t); | 559 | extern void tasklet_kill(struct tasklet_struct *t); |
527 | extern void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu); | 560 | extern void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu); |
528 | extern void tasklet_init(struct tasklet_struct *t, | 561 | extern void tasklet_init(struct tasklet_struct *t, |
529 | void (*func)(unsigned long), unsigned long data); | 562 | void (*func)(unsigned long), unsigned long data); |
530 | 563 | ||
531 | struct tasklet_hrtimer { | 564 | struct tasklet_hrtimer { |
532 | struct hrtimer timer; | 565 | struct hrtimer timer; |
533 | struct tasklet_struct tasklet; | 566 | struct tasklet_struct tasklet; |
534 | enum hrtimer_restart (*function)(struct hrtimer *); | 567 | enum hrtimer_restart (*function)(struct hrtimer *); |
535 | }; | 568 | }; |
536 | 569 | ||
537 | extern void | 570 | extern void |
538 | tasklet_hrtimer_init(struct tasklet_hrtimer *ttimer, | 571 | tasklet_hrtimer_init(struct tasklet_hrtimer *ttimer, |
539 | enum hrtimer_restart (*function)(struct hrtimer *), | 572 | enum hrtimer_restart (*function)(struct hrtimer *), |
540 | clockid_t which_clock, enum hrtimer_mode mode); | 573 | clockid_t which_clock, enum hrtimer_mode mode); |
541 | 574 | ||
542 | static inline | 575 | static inline |
543 | int tasklet_hrtimer_start(struct tasklet_hrtimer *ttimer, ktime_t time, | 576 | int tasklet_hrtimer_start(struct tasklet_hrtimer *ttimer, ktime_t time, |
544 | const enum hrtimer_mode mode) | 577 | const enum hrtimer_mode mode) |
545 | { | 578 | { |
546 | return hrtimer_start(&ttimer->timer, time, mode); | 579 | return hrtimer_start(&ttimer->timer, time, mode); |
547 | } | 580 | } |
548 | 581 | ||
549 | static inline | 582 | static inline |
550 | void tasklet_hrtimer_cancel(struct tasklet_hrtimer *ttimer) | 583 | void tasklet_hrtimer_cancel(struct tasklet_hrtimer *ttimer) |
551 | { | 584 | { |
552 | hrtimer_cancel(&ttimer->timer); | 585 | hrtimer_cancel(&ttimer->timer); |
553 | tasklet_kill(&ttimer->tasklet); | 586 | tasklet_kill(&ttimer->tasklet); |
554 | } | 587 | } |
555 | 588 | ||
556 | /* | 589 | /* |
557 | * Autoprobing for irqs: | 590 | * Autoprobing for irqs: |
558 | * | 591 | * |
559 | * probe_irq_on() and probe_irq_off() provide robust primitives | 592 | * probe_irq_on() and probe_irq_off() provide robust primitives |
560 | * for accurate IRQ probing during kernel initialization. They are | 593 | * for accurate IRQ probing during kernel initialization. They are |
561 | * reasonably simple to use, are not "fooled" by spurious interrupts, | 594 | * reasonably simple to use, are not "fooled" by spurious interrupts, |
562 | * and, unlike other attempts at IRQ probing, they do not get hung on | 595 | * and, unlike other attempts at IRQ probing, they do not get hung on |
563 | * stuck interrupts (such as unused PS2 mouse interfaces on ASUS boards). | 596 | * stuck interrupts (such as unused PS2 mouse interfaces on ASUS boards). |
564 | * | 597 | * |
565 | * For reasonably foolproof probing, use them as follows: | 598 | * For reasonably foolproof probing, use them as follows: |
566 | * | 599 | * |
567 | * 1. clear and/or mask the device's internal interrupt. | 600 | * 1. clear and/or mask the device's internal interrupt. |
568 | * 2. sti(); | 601 | * 2. sti(); |
569 | * 3. irqs = probe_irq_on(); // "take over" all unassigned idle IRQs | 602 | * 3. irqs = probe_irq_on(); // "take over" all unassigned idle IRQs |
570 | * 4. enable the device and cause it to trigger an interrupt. | 603 | * 4. enable the device and cause it to trigger an interrupt. |
571 | * 5. wait for the device to interrupt, using non-intrusive polling or a delay. | 604 | * 5. wait for the device to interrupt, using non-intrusive polling or a delay. |
572 | * 6. irq = probe_irq_off(irqs); // get IRQ number, 0=none, negative=multiple | 605 | * 6. irq = probe_irq_off(irqs); // get IRQ number, 0=none, negative=multiple |
573 | * 7. service the device to clear its pending interrupt. | 606 | * 7. service the device to clear its pending interrupt. |
574 | * 8. loop again if paranoia is required. | 607 | * 8. loop again if paranoia is required. |
575 | * | 608 | * |
576 | * probe_irq_on() returns a mask of allocated irq's. | 609 | * probe_irq_on() returns a mask of allocated irq's. |
577 | * | 610 | * |
578 | * probe_irq_off() takes the mask as a parameter, | 611 | * probe_irq_off() takes the mask as a parameter, |
579 | * and returns the irq number which occurred, | 612 | * and returns the irq number which occurred, |
580 | * or zero if none occurred, or a negative irq number | 613 | * or zero if none occurred, or a negative irq number |
581 | * if more than one irq occurred. | 614 | * if more than one irq occurred. |
582 | */ | 615 | */ |
583 | 616 | ||
584 | #if !defined(CONFIG_GENERIC_IRQ_PROBE) | 617 | #if !defined(CONFIG_GENERIC_IRQ_PROBE) |
585 | static inline unsigned long probe_irq_on(void) | 618 | static inline unsigned long probe_irq_on(void) |
586 | { | 619 | { |
587 | return 0; | 620 | return 0; |
588 | } | 621 | } |
589 | static inline int probe_irq_off(unsigned long val) | 622 | static inline int probe_irq_off(unsigned long val) |
590 | { | 623 | { |
591 | return 0; | 624 | return 0; |
592 | } | 625 | } |
593 | static inline unsigned int probe_irq_mask(unsigned long val) | 626 | static inline unsigned int probe_irq_mask(unsigned long val) |
594 | { | 627 | { |
595 | return 0; | 628 | return 0; |
596 | } | 629 | } |
597 | #else | 630 | #else |
598 | extern unsigned long probe_irq_on(void); /* returns 0 on failure */ | 631 | extern unsigned long probe_irq_on(void); /* returns 0 on failure */ |
599 | extern int probe_irq_off(unsigned long); /* returns 0 or negative on failure */ | 632 | extern int probe_irq_off(unsigned long); /* returns 0 or negative on failure */ |
600 | extern unsigned int probe_irq_mask(unsigned long); /* returns mask of ISA interrupts */ | 633 | extern unsigned int probe_irq_mask(unsigned long); /* returns mask of ISA interrupts */ |
601 | #endif | 634 | #endif |
602 | 635 | ||
603 | #ifdef CONFIG_PROC_FS | 636 | #ifdef CONFIG_PROC_FS |
604 | /* Initialize /proc/irq/ */ | 637 | /* Initialize /proc/irq/ */ |
605 | extern void init_irq_proc(void); | 638 | extern void init_irq_proc(void); |
606 | #else | 639 | #else |
607 | static inline void init_irq_proc(void) | 640 | static inline void init_irq_proc(void) |
608 | { | 641 | { |
609 | } | 642 | } |
610 | #endif | 643 | #endif |
611 | 644 | ||
612 | struct seq_file; | 645 | struct seq_file; |
613 | int show_interrupts(struct seq_file *p, void *v); | 646 | int show_interrupts(struct seq_file *p, void *v); |
614 | int arch_show_interrupts(struct seq_file *p, int prec); | 647 | int arch_show_interrupts(struct seq_file *p, int prec); |
615 | 648 | ||
616 | extern int early_irq_init(void); | 649 | extern int early_irq_init(void); |
617 | extern int arch_probe_nr_irqs(void); | 650 | extern int arch_probe_nr_irqs(void); |
618 | extern int arch_early_irq_init(void); | 651 | extern int arch_early_irq_init(void); |
619 | 652 | ||
620 | #endif | 653 | #endif |
621 | 654 |
1 | #ifndef _LINUX_IRQ_H | 1 | #ifndef _LINUX_IRQ_H |
2 | #define _LINUX_IRQ_H | 2 | #define _LINUX_IRQ_H |
3 | 3 | ||
4 | /* | 4 | /* |
5 | * Please do not include this file in generic code. There is currently | 5 | * Please do not include this file in generic code. There is currently |
6 | * no requirement for any architecture to implement anything held | 6 | * no requirement for any architecture to implement anything held |
7 | * within this file. | 7 | * within this file. |
8 | * | 8 | * |
9 | * Thanks. --rmk | 9 | * Thanks. --rmk |
10 | */ | 10 | */ |
11 | 11 | ||
12 | #include <linux/smp.h> | 12 | #include <linux/smp.h> |
13 | #include <linux/linkage.h> | 13 | #include <linux/linkage.h> |
14 | #include <linux/cache.h> | 14 | #include <linux/cache.h> |
15 | #include <linux/spinlock.h> | 15 | #include <linux/spinlock.h> |
16 | #include <linux/cpumask.h> | 16 | #include <linux/cpumask.h> |
17 | #include <linux/gfp.h> | 17 | #include <linux/gfp.h> |
18 | #include <linux/irqreturn.h> | 18 | #include <linux/irqreturn.h> |
19 | #include <linux/irqnr.h> | 19 | #include <linux/irqnr.h> |
20 | #include <linux/errno.h> | 20 | #include <linux/errno.h> |
21 | #include <linux/topology.h> | 21 | #include <linux/topology.h> |
22 | #include <linux/wait.h> | 22 | #include <linux/wait.h> |
23 | 23 | ||
24 | #include <asm/irq.h> | 24 | #include <asm/irq.h> |
25 | #include <asm/ptrace.h> | 25 | #include <asm/ptrace.h> |
26 | #include <asm/irq_regs.h> | 26 | #include <asm/irq_regs.h> |
27 | 27 | ||
28 | struct seq_file; | 28 | struct seq_file; |
29 | struct module; | 29 | struct module; |
30 | struct irq_desc; | 30 | struct irq_desc; |
31 | struct irq_data; | 31 | struct irq_data; |
32 | typedef void (*irq_flow_handler_t)(unsigned int irq, | 32 | typedef void (*irq_flow_handler_t)(unsigned int irq, |
33 | struct irq_desc *desc); | 33 | struct irq_desc *desc); |
34 | typedef void (*irq_preflow_handler_t)(struct irq_data *data); | 34 | typedef void (*irq_preflow_handler_t)(struct irq_data *data); |
35 | 35 | ||
36 | /* | 36 | /* |
37 | * IRQ line status. | 37 | * IRQ line status. |
38 | * | 38 | * |
39 | * Bits 0-7 are the same as the IRQF_* bits in linux/interrupt.h | 39 | * Bits 0-7 are the same as the IRQF_* bits in linux/interrupt.h |
40 | * | 40 | * |
41 | * IRQ_TYPE_NONE - default, unspecified type | 41 | * IRQ_TYPE_NONE - default, unspecified type |
42 | * IRQ_TYPE_EDGE_RISING - rising edge triggered | 42 | * IRQ_TYPE_EDGE_RISING - rising edge triggered |
43 | * IRQ_TYPE_EDGE_FALLING - falling edge triggered | 43 | * IRQ_TYPE_EDGE_FALLING - falling edge triggered |
44 | * IRQ_TYPE_EDGE_BOTH - rising and falling edge triggered | 44 | * IRQ_TYPE_EDGE_BOTH - rising and falling edge triggered |
45 | * IRQ_TYPE_LEVEL_HIGH - high level triggered | 45 | * IRQ_TYPE_LEVEL_HIGH - high level triggered |
46 | * IRQ_TYPE_LEVEL_LOW - low level triggered | 46 | * IRQ_TYPE_LEVEL_LOW - low level triggered |
47 | * IRQ_TYPE_LEVEL_MASK - Mask to filter out the level bits | 47 | * IRQ_TYPE_LEVEL_MASK - Mask to filter out the level bits |
48 | * IRQ_TYPE_SENSE_MASK - Mask for all the above bits | 48 | * IRQ_TYPE_SENSE_MASK - Mask for all the above bits |
49 | * IRQ_TYPE_DEFAULT - For use by some PICs to ask irq_set_type | 49 | * IRQ_TYPE_DEFAULT - For use by some PICs to ask irq_set_type |
50 | * to setup the HW to a sane default (used | 50 | * to setup the HW to a sane default (used |
51 | * by irqdomain map() callbacks to synchronize | 51 | * by irqdomain map() callbacks to synchronize |
52 | * the HW state and SW flags for a newly | 52 | * the HW state and SW flags for a newly |
53 | * allocated descriptor). | 53 | * allocated descriptor). |
54 | * | 54 | * |
55 | * IRQ_TYPE_PROBE - Special flag for probing in progress | 55 | * IRQ_TYPE_PROBE - Special flag for probing in progress |
56 | * | 56 | * |
57 | * Bits which can be modified via irq_set/clear/modify_status_flags() | 57 | * Bits which can be modified via irq_set/clear/modify_status_flags() |
58 | * IRQ_LEVEL - Interrupt is level type. Will be also | 58 | * IRQ_LEVEL - Interrupt is level type. Will be also |
59 | * updated in the code when the above trigger | 59 | * updated in the code when the above trigger |
60 | * bits are modified via irq_set_irq_type() | 60 | * bits are modified via irq_set_irq_type() |
61 | * IRQ_PER_CPU - Mark an interrupt PER_CPU. Will protect | 61 | * IRQ_PER_CPU - Mark an interrupt PER_CPU. Will protect |
62 | * it from affinity setting | 62 | * it from affinity setting |
63 | * IRQ_NOPROBE - Interrupt cannot be probed by autoprobing | 63 | * IRQ_NOPROBE - Interrupt cannot be probed by autoprobing |
64 | * IRQ_NOREQUEST - Interrupt cannot be requested via | 64 | * IRQ_NOREQUEST - Interrupt cannot be requested via |
65 | * request_irq() | 65 | * request_irq() |
66 | * IRQ_NOTHREAD - Interrupt cannot be threaded | 66 | * IRQ_NOTHREAD - Interrupt cannot be threaded |
67 | * IRQ_NOAUTOEN - Interrupt is not automatically enabled in | 67 | * IRQ_NOAUTOEN - Interrupt is not automatically enabled in |
68 | * request/setup_irq() | 68 | * request/setup_irq() |
69 | * IRQ_NO_BALANCING - Interrupt cannot be balanced (affinity set) | 69 | * IRQ_NO_BALANCING - Interrupt cannot be balanced (affinity set) |
70 | * IRQ_MOVE_PCNTXT - Interrupt can be migrated from process context | 70 | * IRQ_MOVE_PCNTXT - Interrupt can be migrated from process context |
71 | * IRQ_NESTED_TRHEAD - Interrupt nests into another thread | 71 | * IRQ_NESTED_TRHEAD - Interrupt nests into another thread |
72 | * IRQ_PER_CPU_DEVID - Dev_id is a per-cpu variable | 72 | * IRQ_PER_CPU_DEVID - Dev_id is a per-cpu variable |
73 | * IRQ_IS_POLLED - Always polled by another interrupt. Exclude | 73 | * IRQ_IS_POLLED - Always polled by another interrupt. Exclude |
74 | * it from the spurious interrupt detection | 74 | * it from the spurious interrupt detection |
75 | * mechanism and from core side polling. | 75 | * mechanism and from core side polling. |
76 | */ | 76 | */ |
77 | enum { | 77 | enum { |
78 | IRQ_TYPE_NONE = 0x00000000, | 78 | IRQ_TYPE_NONE = 0x00000000, |
79 | IRQ_TYPE_EDGE_RISING = 0x00000001, | 79 | IRQ_TYPE_EDGE_RISING = 0x00000001, |
80 | IRQ_TYPE_EDGE_FALLING = 0x00000002, | 80 | IRQ_TYPE_EDGE_FALLING = 0x00000002, |
81 | IRQ_TYPE_EDGE_BOTH = (IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING), | 81 | IRQ_TYPE_EDGE_BOTH = (IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING), |
82 | IRQ_TYPE_LEVEL_HIGH = 0x00000004, | 82 | IRQ_TYPE_LEVEL_HIGH = 0x00000004, |
83 | IRQ_TYPE_LEVEL_LOW = 0x00000008, | 83 | IRQ_TYPE_LEVEL_LOW = 0x00000008, |
84 | IRQ_TYPE_LEVEL_MASK = (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH), | 84 | IRQ_TYPE_LEVEL_MASK = (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH), |
85 | IRQ_TYPE_SENSE_MASK = 0x0000000f, | 85 | IRQ_TYPE_SENSE_MASK = 0x0000000f, |
86 | IRQ_TYPE_DEFAULT = IRQ_TYPE_SENSE_MASK, | 86 | IRQ_TYPE_DEFAULT = IRQ_TYPE_SENSE_MASK, |
87 | 87 | ||
88 | IRQ_TYPE_PROBE = 0x00000010, | 88 | IRQ_TYPE_PROBE = 0x00000010, |
89 | 89 | ||
90 | IRQ_LEVEL = (1 << 8), | 90 | IRQ_LEVEL = (1 << 8), |
91 | IRQ_PER_CPU = (1 << 9), | 91 | IRQ_PER_CPU = (1 << 9), |
92 | IRQ_NOPROBE = (1 << 10), | 92 | IRQ_NOPROBE = (1 << 10), |
93 | IRQ_NOREQUEST = (1 << 11), | 93 | IRQ_NOREQUEST = (1 << 11), |
94 | IRQ_NOAUTOEN = (1 << 12), | 94 | IRQ_NOAUTOEN = (1 << 12), |
95 | IRQ_NO_BALANCING = (1 << 13), | 95 | IRQ_NO_BALANCING = (1 << 13), |
96 | IRQ_MOVE_PCNTXT = (1 << 14), | 96 | IRQ_MOVE_PCNTXT = (1 << 14), |
97 | IRQ_NESTED_THREAD = (1 << 15), | 97 | IRQ_NESTED_THREAD = (1 << 15), |
98 | IRQ_NOTHREAD = (1 << 16), | 98 | IRQ_NOTHREAD = (1 << 16), |
99 | IRQ_PER_CPU_DEVID = (1 << 17), | 99 | IRQ_PER_CPU_DEVID = (1 << 17), |
100 | IRQ_IS_POLLED = (1 << 18), | 100 | IRQ_IS_POLLED = (1 << 18), |
101 | }; | 101 | }; |
102 | 102 | ||
103 | #define IRQF_MODIFY_MASK \ | 103 | #define IRQF_MODIFY_MASK \ |
104 | (IRQ_TYPE_SENSE_MASK | IRQ_NOPROBE | IRQ_NOREQUEST | \ | 104 | (IRQ_TYPE_SENSE_MASK | IRQ_NOPROBE | IRQ_NOREQUEST | \ |
105 | IRQ_NOAUTOEN | IRQ_MOVE_PCNTXT | IRQ_LEVEL | IRQ_NO_BALANCING | \ | 105 | IRQ_NOAUTOEN | IRQ_MOVE_PCNTXT | IRQ_LEVEL | IRQ_NO_BALANCING | \ |
106 | IRQ_PER_CPU | IRQ_NESTED_THREAD | IRQ_NOTHREAD | IRQ_PER_CPU_DEVID | \ | 106 | IRQ_PER_CPU | IRQ_NESTED_THREAD | IRQ_NOTHREAD | IRQ_PER_CPU_DEVID | \ |
107 | IRQ_IS_POLLED) | 107 | IRQ_IS_POLLED) |
108 | 108 | ||
109 | #define IRQ_NO_BALANCING_MASK (IRQ_PER_CPU | IRQ_NO_BALANCING) | 109 | #define IRQ_NO_BALANCING_MASK (IRQ_PER_CPU | IRQ_NO_BALANCING) |
110 | 110 | ||
111 | /* | 111 | /* |
112 | * Return value for chip->irq_set_affinity() | 112 | * Return value for chip->irq_set_affinity() |
113 | * | 113 | * |
114 | * IRQ_SET_MASK_OK - OK, core updates irq_data.affinity | 114 | * IRQ_SET_MASK_OK - OK, core updates irq_data.affinity |
115 | * IRQ_SET_MASK_NOCPY - OK, chip did update irq_data.affinity | 115 | * IRQ_SET_MASK_NOCPY - OK, chip did update irq_data.affinity |
116 | */ | 116 | */ |
117 | enum { | 117 | enum { |
118 | IRQ_SET_MASK_OK = 0, | 118 | IRQ_SET_MASK_OK = 0, |
119 | IRQ_SET_MASK_OK_NOCOPY, | 119 | IRQ_SET_MASK_OK_NOCOPY, |
120 | }; | 120 | }; |
121 | 121 | ||
122 | struct msi_desc; | 122 | struct msi_desc; |
123 | struct irq_domain; | 123 | struct irq_domain; |
124 | 124 | ||
125 | /** | 125 | /** |
126 | * struct irq_data - per irq and irq chip data passed down to chip functions | 126 | * struct irq_data - per irq and irq chip data passed down to chip functions |
127 | * @mask: precomputed bitmask for accessing the chip registers | 127 | * @mask: precomputed bitmask for accessing the chip registers |
128 | * @irq: interrupt number | 128 | * @irq: interrupt number |
129 | * @hwirq: hardware interrupt number, local to the interrupt domain | 129 | * @hwirq: hardware interrupt number, local to the interrupt domain |
130 | * @node: node index useful for balancing | 130 | * @node: node index useful for balancing |
131 | * @state_use_accessors: status information for irq chip functions. | 131 | * @state_use_accessors: status information for irq chip functions. |
132 | * Use accessor functions to deal with it | 132 | * Use accessor functions to deal with it |
133 | * @chip: low level interrupt hardware access | 133 | * @chip: low level interrupt hardware access |
134 | * @domain: Interrupt translation domain; responsible for mapping | 134 | * @domain: Interrupt translation domain; responsible for mapping |
135 | * between hwirq number and linux irq number. | 135 | * between hwirq number and linux irq number. |
136 | * @handler_data: per-IRQ data for the irq_chip methods | 136 | * @handler_data: per-IRQ data for the irq_chip methods |
137 | * @chip_data: platform-specific per-chip private data for the chip | 137 | * @chip_data: platform-specific per-chip private data for the chip |
138 | * methods, to allow shared chip implementations | 138 | * methods, to allow shared chip implementations |
139 | * @msi_desc: MSI descriptor | 139 | * @msi_desc: MSI descriptor |
140 | * @affinity: IRQ affinity on SMP | 140 | * @affinity: IRQ affinity on SMP |
141 | * | 141 | * |
142 | * The fields here need to overlay the ones in irq_desc until we | 142 | * The fields here need to overlay the ones in irq_desc until we |
143 | * cleaned up the direct references and switched everything over to | 143 | * cleaned up the direct references and switched everything over to |
144 | * irq_data. | 144 | * irq_data. |
145 | */ | 145 | */ |
146 | struct irq_data { | 146 | struct irq_data { |
147 | u32 mask; | 147 | u32 mask; |
148 | unsigned int irq; | 148 | unsigned int irq; |
149 | unsigned long hwirq; | 149 | unsigned long hwirq; |
150 | unsigned int node; | 150 | unsigned int node; |
151 | unsigned int state_use_accessors; | 151 | unsigned int state_use_accessors; |
152 | struct irq_chip *chip; | 152 | struct irq_chip *chip; |
153 | struct irq_domain *domain; | 153 | struct irq_domain *domain; |
154 | void *handler_data; | 154 | void *handler_data; |
155 | void *chip_data; | 155 | void *chip_data; |
156 | struct msi_desc *msi_desc; | 156 | struct msi_desc *msi_desc; |
157 | cpumask_var_t affinity; | 157 | cpumask_var_t affinity; |
158 | }; | 158 | }; |
159 | 159 | ||
160 | /* | 160 | /* |
161 | * Bit masks for irq_data.state | 161 | * Bit masks for irq_data.state |
162 | * | 162 | * |
163 | * IRQD_TRIGGER_MASK - Mask for the trigger type bits | 163 | * IRQD_TRIGGER_MASK - Mask for the trigger type bits |
164 | * IRQD_SETAFFINITY_PENDING - Affinity setting is pending | 164 | * IRQD_SETAFFINITY_PENDING - Affinity setting is pending |
165 | * IRQD_NO_BALANCING - Balancing disabled for this IRQ | 165 | * IRQD_NO_BALANCING - Balancing disabled for this IRQ |
166 | * IRQD_PER_CPU - Interrupt is per cpu | 166 | * IRQD_PER_CPU - Interrupt is per cpu |
167 | * IRQD_AFFINITY_SET - Interrupt affinity was set | 167 | * IRQD_AFFINITY_SET - Interrupt affinity was set |
168 | * IRQD_LEVEL - Interrupt is level triggered | 168 | * IRQD_LEVEL - Interrupt is level triggered |
169 | * IRQD_WAKEUP_STATE - Interrupt is configured for wakeup | 169 | * IRQD_WAKEUP_STATE - Interrupt is configured for wakeup |
170 | * from suspend | 170 | * from suspend |
171 | * IRDQ_MOVE_PCNTXT - Interrupt can be moved in process | 171 | * IRDQ_MOVE_PCNTXT - Interrupt can be moved in process |
172 | * context | 172 | * context |
173 | * IRQD_IRQ_DISABLED - Disabled state of the interrupt | 173 | * IRQD_IRQ_DISABLED - Disabled state of the interrupt |
174 | * IRQD_IRQ_MASKED - Masked state of the interrupt | 174 | * IRQD_IRQ_MASKED - Masked state of the interrupt |
175 | * IRQD_IRQ_INPROGRESS - In progress state of the interrupt | 175 | * IRQD_IRQ_INPROGRESS - In progress state of the interrupt |
176 | */ | 176 | */ |
177 | enum { | 177 | enum { |
178 | IRQD_TRIGGER_MASK = 0xf, | 178 | IRQD_TRIGGER_MASK = 0xf, |
179 | IRQD_SETAFFINITY_PENDING = (1 << 8), | 179 | IRQD_SETAFFINITY_PENDING = (1 << 8), |
180 | IRQD_NO_BALANCING = (1 << 10), | 180 | IRQD_NO_BALANCING = (1 << 10), |
181 | IRQD_PER_CPU = (1 << 11), | 181 | IRQD_PER_CPU = (1 << 11), |
182 | IRQD_AFFINITY_SET = (1 << 12), | 182 | IRQD_AFFINITY_SET = (1 << 12), |
183 | IRQD_LEVEL = (1 << 13), | 183 | IRQD_LEVEL = (1 << 13), |
184 | IRQD_WAKEUP_STATE = (1 << 14), | 184 | IRQD_WAKEUP_STATE = (1 << 14), |
185 | IRQD_MOVE_PCNTXT = (1 << 15), | 185 | IRQD_MOVE_PCNTXT = (1 << 15), |
186 | IRQD_IRQ_DISABLED = (1 << 16), | 186 | IRQD_IRQ_DISABLED = (1 << 16), |
187 | IRQD_IRQ_MASKED = (1 << 17), | 187 | IRQD_IRQ_MASKED = (1 << 17), |
188 | IRQD_IRQ_INPROGRESS = (1 << 18), | 188 | IRQD_IRQ_INPROGRESS = (1 << 18), |
189 | }; | 189 | }; |
190 | 190 | ||
191 | static inline bool irqd_is_setaffinity_pending(struct irq_data *d) | 191 | static inline bool irqd_is_setaffinity_pending(struct irq_data *d) |
192 | { | 192 | { |
193 | return d->state_use_accessors & IRQD_SETAFFINITY_PENDING; | 193 | return d->state_use_accessors & IRQD_SETAFFINITY_PENDING; |
194 | } | 194 | } |
195 | 195 | ||
196 | static inline bool irqd_is_per_cpu(struct irq_data *d) | 196 | static inline bool irqd_is_per_cpu(struct irq_data *d) |
197 | { | 197 | { |
198 | return d->state_use_accessors & IRQD_PER_CPU; | 198 | return d->state_use_accessors & IRQD_PER_CPU; |
199 | } | 199 | } |
200 | 200 | ||
201 | static inline bool irqd_can_balance(struct irq_data *d) | 201 | static inline bool irqd_can_balance(struct irq_data *d) |
202 | { | 202 | { |
203 | return !(d->state_use_accessors & (IRQD_PER_CPU | IRQD_NO_BALANCING)); | 203 | return !(d->state_use_accessors & (IRQD_PER_CPU | IRQD_NO_BALANCING)); |
204 | } | 204 | } |
205 | 205 | ||
206 | static inline bool irqd_affinity_was_set(struct irq_data *d) | 206 | static inline bool irqd_affinity_was_set(struct irq_data *d) |
207 | { | 207 | { |
208 | return d->state_use_accessors & IRQD_AFFINITY_SET; | 208 | return d->state_use_accessors & IRQD_AFFINITY_SET; |
209 | } | 209 | } |
210 | 210 | ||
211 | static inline void irqd_mark_affinity_was_set(struct irq_data *d) | 211 | static inline void irqd_mark_affinity_was_set(struct irq_data *d) |
212 | { | 212 | { |
213 | d->state_use_accessors |= IRQD_AFFINITY_SET; | 213 | d->state_use_accessors |= IRQD_AFFINITY_SET; |
214 | } | 214 | } |
215 | 215 | ||
216 | static inline u32 irqd_get_trigger_type(struct irq_data *d) | 216 | static inline u32 irqd_get_trigger_type(struct irq_data *d) |
217 | { | 217 | { |
218 | return d->state_use_accessors & IRQD_TRIGGER_MASK; | 218 | return d->state_use_accessors & IRQD_TRIGGER_MASK; |
219 | } | 219 | } |
220 | 220 | ||
221 | /* | 221 | /* |
222 | * Must only be called inside irq_chip.irq_set_type() functions. | 222 | * Must only be called inside irq_chip.irq_set_type() functions. |
223 | */ | 223 | */ |
224 | static inline void irqd_set_trigger_type(struct irq_data *d, u32 type) | 224 | static inline void irqd_set_trigger_type(struct irq_data *d, u32 type) |
225 | { | 225 | { |
226 | d->state_use_accessors &= ~IRQD_TRIGGER_MASK; | 226 | d->state_use_accessors &= ~IRQD_TRIGGER_MASK; |
227 | d->state_use_accessors |= type & IRQD_TRIGGER_MASK; | 227 | d->state_use_accessors |= type & IRQD_TRIGGER_MASK; |
228 | } | 228 | } |
229 | 229 | ||
230 | static inline bool irqd_is_level_type(struct irq_data *d) | 230 | static inline bool irqd_is_level_type(struct irq_data *d) |
231 | { | 231 | { |
232 | return d->state_use_accessors & IRQD_LEVEL; | 232 | return d->state_use_accessors & IRQD_LEVEL; |
233 | } | 233 | } |
234 | 234 | ||
235 | static inline bool irqd_is_wakeup_set(struct irq_data *d) | 235 | static inline bool irqd_is_wakeup_set(struct irq_data *d) |
236 | { | 236 | { |
237 | return d->state_use_accessors & IRQD_WAKEUP_STATE; | 237 | return d->state_use_accessors & IRQD_WAKEUP_STATE; |
238 | } | 238 | } |
239 | 239 | ||
240 | static inline bool irqd_can_move_in_process_context(struct irq_data *d) | 240 | static inline bool irqd_can_move_in_process_context(struct irq_data *d) |
241 | { | 241 | { |
242 | return d->state_use_accessors & IRQD_MOVE_PCNTXT; | 242 | return d->state_use_accessors & IRQD_MOVE_PCNTXT; |
243 | } | 243 | } |
244 | 244 | ||
245 | static inline bool irqd_irq_disabled(struct irq_data *d) | 245 | static inline bool irqd_irq_disabled(struct irq_data *d) |
246 | { | 246 | { |
247 | return d->state_use_accessors & IRQD_IRQ_DISABLED; | 247 | return d->state_use_accessors & IRQD_IRQ_DISABLED; |
248 | } | 248 | } |
249 | 249 | ||
250 | static inline bool irqd_irq_masked(struct irq_data *d) | 250 | static inline bool irqd_irq_masked(struct irq_data *d) |
251 | { | 251 | { |
252 | return d->state_use_accessors & IRQD_IRQ_MASKED; | 252 | return d->state_use_accessors & IRQD_IRQ_MASKED; |
253 | } | 253 | } |
254 | 254 | ||
255 | static inline bool irqd_irq_inprogress(struct irq_data *d) | 255 | static inline bool irqd_irq_inprogress(struct irq_data *d) |
256 | { | 256 | { |
257 | return d->state_use_accessors & IRQD_IRQ_INPROGRESS; | 257 | return d->state_use_accessors & IRQD_IRQ_INPROGRESS; |
258 | } | 258 | } |
259 | 259 | ||
260 | /* | 260 | /* |
261 | * Functions for chained handlers which can be enabled/disabled by the | 261 | * Functions for chained handlers which can be enabled/disabled by the |
262 | * standard disable_irq/enable_irq calls. Must be called with | 262 | * standard disable_irq/enable_irq calls. Must be called with |
263 | * irq_desc->lock held. | 263 | * irq_desc->lock held. |
264 | */ | 264 | */ |
265 | static inline void irqd_set_chained_irq_inprogress(struct irq_data *d) | 265 | static inline void irqd_set_chained_irq_inprogress(struct irq_data *d) |
266 | { | 266 | { |
267 | d->state_use_accessors |= IRQD_IRQ_INPROGRESS; | 267 | d->state_use_accessors |= IRQD_IRQ_INPROGRESS; |
268 | } | 268 | } |
269 | 269 | ||
270 | static inline void irqd_clr_chained_irq_inprogress(struct irq_data *d) | 270 | static inline void irqd_clr_chained_irq_inprogress(struct irq_data *d) |
271 | { | 271 | { |
272 | d->state_use_accessors &= ~IRQD_IRQ_INPROGRESS; | 272 | d->state_use_accessors &= ~IRQD_IRQ_INPROGRESS; |
273 | } | 273 | } |
274 | 274 | ||
275 | static inline irq_hw_number_t irqd_to_hwirq(struct irq_data *d) | 275 | static inline irq_hw_number_t irqd_to_hwirq(struct irq_data *d) |
276 | { | 276 | { |
277 | return d->hwirq; | 277 | return d->hwirq; |
278 | } | 278 | } |
279 | 279 | ||
280 | /** | 280 | /** |
281 | * struct irq_chip - hardware interrupt chip descriptor | 281 | * struct irq_chip - hardware interrupt chip descriptor |
282 | * | 282 | * |
283 | * @name: name for /proc/interrupts | 283 | * @name: name for /proc/interrupts |
284 | * @irq_startup: start up the interrupt (defaults to ->enable if NULL) | 284 | * @irq_startup: start up the interrupt (defaults to ->enable if NULL) |
285 | * @irq_shutdown: shut down the interrupt (defaults to ->disable if NULL) | 285 | * @irq_shutdown: shut down the interrupt (defaults to ->disable if NULL) |
286 | * @irq_enable: enable the interrupt (defaults to chip->unmask if NULL) | 286 | * @irq_enable: enable the interrupt (defaults to chip->unmask if NULL) |
287 | * @irq_disable: disable the interrupt | 287 | * @irq_disable: disable the interrupt |
288 | * @irq_ack: start of a new interrupt | 288 | * @irq_ack: start of a new interrupt |
289 | * @irq_mask: mask an interrupt source | 289 | * @irq_mask: mask an interrupt source |
290 | * @irq_mask_ack: ack and mask an interrupt source | 290 | * @irq_mask_ack: ack and mask an interrupt source |
291 | * @irq_unmask: unmask an interrupt source | 291 | * @irq_unmask: unmask an interrupt source |
292 | * @irq_eoi: end of interrupt | 292 | * @irq_eoi: end of interrupt |
293 | * @irq_set_affinity: set the CPU affinity on SMP machines | 293 | * @irq_set_affinity: set the CPU affinity on SMP machines |
294 | * @irq_retrigger: resend an IRQ to the CPU | 294 | * @irq_retrigger: resend an IRQ to the CPU |
295 | * @irq_set_type: set the flow type (IRQ_TYPE_LEVEL/etc.) of an IRQ | 295 | * @irq_set_type: set the flow type (IRQ_TYPE_LEVEL/etc.) of an IRQ |
296 | * @irq_set_wake: enable/disable power-management wake-on of an IRQ | 296 | * @irq_set_wake: enable/disable power-management wake-on of an IRQ |
297 | * @irq_bus_lock: function to lock access to slow bus (i2c) chips | 297 | * @irq_bus_lock: function to lock access to slow bus (i2c) chips |
298 | * @irq_bus_sync_unlock:function to sync and unlock slow bus (i2c) chips | 298 | * @irq_bus_sync_unlock:function to sync and unlock slow bus (i2c) chips |
299 | * @irq_cpu_online: configure an interrupt source for a secondary CPU | 299 | * @irq_cpu_online: configure an interrupt source for a secondary CPU |
300 | * @irq_cpu_offline: un-configure an interrupt source for a secondary CPU | 300 | * @irq_cpu_offline: un-configure an interrupt source for a secondary CPU |
301 | * @irq_suspend: function called from core code on suspend once per chip | 301 | * @irq_suspend: function called from core code on suspend once per chip |
302 | * @irq_resume: function called from core code on resume once per chip | 302 | * @irq_resume: function called from core code on resume once per chip |
303 | * @irq_pm_shutdown: function called from core code on shutdown once per chip | 303 | * @irq_pm_shutdown: function called from core code on shutdown once per chip |
304 | * @irq_calc_mask: Optional function to set irq_data.mask for special cases | 304 | * @irq_calc_mask: Optional function to set irq_data.mask for special cases |
305 | * @irq_print_chip: optional to print special chip info in show_interrupts | 305 | * @irq_print_chip: optional to print special chip info in show_interrupts |
306 | * @irq_request_resources: optional to request resources before calling | 306 | * @irq_request_resources: optional to request resources before calling |
307 | * any other callback related to this irq | 307 | * any other callback related to this irq |
308 | * @irq_release_resources: optional to release resources acquired with | 308 | * @irq_release_resources: optional to release resources acquired with |
309 | * irq_request_resources | 309 | * irq_request_resources |
310 | * @flags: chip specific flags | 310 | * @flags: chip specific flags |
311 | */ | 311 | */ |
312 | struct irq_chip { | 312 | struct irq_chip { |
313 | const char *name; | 313 | const char *name; |
314 | unsigned int (*irq_startup)(struct irq_data *data); | 314 | unsigned int (*irq_startup)(struct irq_data *data); |
315 | void (*irq_shutdown)(struct irq_data *data); | 315 | void (*irq_shutdown)(struct irq_data *data); |
316 | void (*irq_enable)(struct irq_data *data); | 316 | void (*irq_enable)(struct irq_data *data); |
317 | void (*irq_disable)(struct irq_data *data); | 317 | void (*irq_disable)(struct irq_data *data); |
318 | 318 | ||
319 | void (*irq_ack)(struct irq_data *data); | 319 | void (*irq_ack)(struct irq_data *data); |
320 | void (*irq_mask)(struct irq_data *data); | 320 | void (*irq_mask)(struct irq_data *data); |
321 | void (*irq_mask_ack)(struct irq_data *data); | 321 | void (*irq_mask_ack)(struct irq_data *data); |
322 | void (*irq_unmask)(struct irq_data *data); | 322 | void (*irq_unmask)(struct irq_data *data); |
323 | void (*irq_eoi)(struct irq_data *data); | 323 | void (*irq_eoi)(struct irq_data *data); |
324 | 324 | ||
325 | int (*irq_set_affinity)(struct irq_data *data, const struct cpumask *dest, bool force); | 325 | int (*irq_set_affinity)(struct irq_data *data, const struct cpumask *dest, bool force); |
326 | int (*irq_retrigger)(struct irq_data *data); | 326 | int (*irq_retrigger)(struct irq_data *data); |
327 | int (*irq_set_type)(struct irq_data *data, unsigned int flow_type); | 327 | int (*irq_set_type)(struct irq_data *data, unsigned int flow_type); |
328 | int (*irq_set_wake)(struct irq_data *data, unsigned int on); | 328 | int (*irq_set_wake)(struct irq_data *data, unsigned int on); |
329 | 329 | ||
330 | void (*irq_bus_lock)(struct irq_data *data); | 330 | void (*irq_bus_lock)(struct irq_data *data); |
331 | void (*irq_bus_sync_unlock)(struct irq_data *data); | 331 | void (*irq_bus_sync_unlock)(struct irq_data *data); |
332 | 332 | ||
333 | void (*irq_cpu_online)(struct irq_data *data); | 333 | void (*irq_cpu_online)(struct irq_data *data); |
334 | void (*irq_cpu_offline)(struct irq_data *data); | 334 | void (*irq_cpu_offline)(struct irq_data *data); |
335 | 335 | ||
336 | void (*irq_suspend)(struct irq_data *data); | 336 | void (*irq_suspend)(struct irq_data *data); |
337 | void (*irq_resume)(struct irq_data *data); | 337 | void (*irq_resume)(struct irq_data *data); |
338 | void (*irq_pm_shutdown)(struct irq_data *data); | 338 | void (*irq_pm_shutdown)(struct irq_data *data); |
339 | 339 | ||
340 | void (*irq_calc_mask)(struct irq_data *data); | 340 | void (*irq_calc_mask)(struct irq_data *data); |
341 | 341 | ||
342 | void (*irq_print_chip)(struct irq_data *data, struct seq_file *p); | 342 | void (*irq_print_chip)(struct irq_data *data, struct seq_file *p); |
343 | int (*irq_request_resources)(struct irq_data *data); | 343 | int (*irq_request_resources)(struct irq_data *data); |
344 | void (*irq_release_resources)(struct irq_data *data); | 344 | void (*irq_release_resources)(struct irq_data *data); |
345 | 345 | ||
346 | unsigned long flags; | 346 | unsigned long flags; |
347 | }; | 347 | }; |
348 | 348 | ||
349 | /* | 349 | /* |
350 | * irq_chip specific flags | 350 | * irq_chip specific flags |
351 | * | 351 | * |
352 | * IRQCHIP_SET_TYPE_MASKED: Mask before calling chip.irq_set_type() | 352 | * IRQCHIP_SET_TYPE_MASKED: Mask before calling chip.irq_set_type() |
353 | * IRQCHIP_EOI_IF_HANDLED: Only issue irq_eoi() when irq was handled | 353 | * IRQCHIP_EOI_IF_HANDLED: Only issue irq_eoi() when irq was handled |
354 | * IRQCHIP_MASK_ON_SUSPEND: Mask non wake irqs in the suspend path | 354 | * IRQCHIP_MASK_ON_SUSPEND: Mask non wake irqs in the suspend path |
355 | * IRQCHIP_ONOFFLINE_ENABLED: Only call irq_on/off_line callbacks | 355 | * IRQCHIP_ONOFFLINE_ENABLED: Only call irq_on/off_line callbacks |
356 | * when irq enabled | 356 | * when irq enabled |
357 | * IRQCHIP_SKIP_SET_WAKE: Skip chip.irq_set_wake(), for this irq chip | 357 | * IRQCHIP_SKIP_SET_WAKE: Skip chip.irq_set_wake(), for this irq chip |
358 | * IRQCHIP_ONESHOT_SAFE: One shot does not require mask/unmask | 358 | * IRQCHIP_ONESHOT_SAFE: One shot does not require mask/unmask |
359 | * IRQCHIP_EOI_THREADED: Chip requires eoi() on unmask in threaded mode | 359 | * IRQCHIP_EOI_THREADED: Chip requires eoi() on unmask in threaded mode |
360 | */ | 360 | */ |
361 | enum { | 361 | enum { |
362 | IRQCHIP_SET_TYPE_MASKED = (1 << 0), | 362 | IRQCHIP_SET_TYPE_MASKED = (1 << 0), |
363 | IRQCHIP_EOI_IF_HANDLED = (1 << 1), | 363 | IRQCHIP_EOI_IF_HANDLED = (1 << 1), |
364 | IRQCHIP_MASK_ON_SUSPEND = (1 << 2), | 364 | IRQCHIP_MASK_ON_SUSPEND = (1 << 2), |
365 | IRQCHIP_ONOFFLINE_ENABLED = (1 << 3), | 365 | IRQCHIP_ONOFFLINE_ENABLED = (1 << 3), |
366 | IRQCHIP_SKIP_SET_WAKE = (1 << 4), | 366 | IRQCHIP_SKIP_SET_WAKE = (1 << 4), |
367 | IRQCHIP_ONESHOT_SAFE = (1 << 5), | 367 | IRQCHIP_ONESHOT_SAFE = (1 << 5), |
368 | IRQCHIP_EOI_THREADED = (1 << 6), | 368 | IRQCHIP_EOI_THREADED = (1 << 6), |
369 | }; | 369 | }; |
370 | 370 | ||
371 | /* This include will go away once we isolated irq_desc usage to core code */ | 371 | /* This include will go away once we isolated irq_desc usage to core code */ |
372 | #include <linux/irqdesc.h> | 372 | #include <linux/irqdesc.h> |
373 | 373 | ||
374 | /* | 374 | /* |
375 | * Pick up the arch-dependent methods: | 375 | * Pick up the arch-dependent methods: |
376 | */ | 376 | */ |
377 | #include <asm/hw_irq.h> | 377 | #include <asm/hw_irq.h> |
378 | 378 | ||
379 | #ifndef NR_IRQS_LEGACY | 379 | #ifndef NR_IRQS_LEGACY |
380 | # define NR_IRQS_LEGACY 0 | 380 | # define NR_IRQS_LEGACY 0 |
381 | #endif | 381 | #endif |
382 | 382 | ||
383 | #ifndef ARCH_IRQ_INIT_FLAGS | 383 | #ifndef ARCH_IRQ_INIT_FLAGS |
384 | # define ARCH_IRQ_INIT_FLAGS 0 | 384 | # define ARCH_IRQ_INIT_FLAGS 0 |
385 | #endif | 385 | #endif |
386 | 386 | ||
387 | #define IRQ_DEFAULT_INIT_FLAGS ARCH_IRQ_INIT_FLAGS | 387 | #define IRQ_DEFAULT_INIT_FLAGS ARCH_IRQ_INIT_FLAGS |
388 | 388 | ||
389 | struct irqaction; | 389 | struct irqaction; |
390 | extern int setup_irq(unsigned int irq, struct irqaction *new); | 390 | extern int setup_irq(unsigned int irq, struct irqaction *new); |
391 | extern void remove_irq(unsigned int irq, struct irqaction *act); | 391 | extern void remove_irq(unsigned int irq, struct irqaction *act); |
392 | extern int setup_percpu_irq(unsigned int irq, struct irqaction *new); | 392 | extern int setup_percpu_irq(unsigned int irq, struct irqaction *new); |
393 | extern void remove_percpu_irq(unsigned int irq, struct irqaction *act); | 393 | extern void remove_percpu_irq(unsigned int irq, struct irqaction *act); |
394 | 394 | ||
395 | extern void irq_cpu_online(void); | 395 | extern void irq_cpu_online(void); |
396 | extern void irq_cpu_offline(void); | 396 | extern void irq_cpu_offline(void); |
397 | extern int __irq_set_affinity_locked(struct irq_data *data, const struct cpumask *cpumask); | 397 | extern int irq_set_affinity_locked(struct irq_data *data, |
398 | const struct cpumask *cpumask, bool force); | ||
398 | 399 | ||
399 | #if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_PENDING_IRQ) | 400 | #if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_PENDING_IRQ) |
400 | void irq_move_irq(struct irq_data *data); | 401 | void irq_move_irq(struct irq_data *data); |
401 | void irq_move_masked_irq(struct irq_data *data); | 402 | void irq_move_masked_irq(struct irq_data *data); |
402 | #else | 403 | #else |
403 | static inline void irq_move_irq(struct irq_data *data) { } | 404 | static inline void irq_move_irq(struct irq_data *data) { } |
404 | static inline void irq_move_masked_irq(struct irq_data *data) { } | 405 | static inline void irq_move_masked_irq(struct irq_data *data) { } |
405 | #endif | 406 | #endif |
406 | 407 | ||
407 | extern int no_irq_affinity; | 408 | extern int no_irq_affinity; |
408 | 409 | ||
409 | #ifdef CONFIG_HARDIRQS_SW_RESEND | 410 | #ifdef CONFIG_HARDIRQS_SW_RESEND |
410 | int irq_set_parent(int irq, int parent_irq); | 411 | int irq_set_parent(int irq, int parent_irq); |
411 | #else | 412 | #else |
412 | static inline int irq_set_parent(int irq, int parent_irq) | 413 | static inline int irq_set_parent(int irq, int parent_irq) |
413 | { | 414 | { |
414 | return 0; | 415 | return 0; |
415 | } | 416 | } |
416 | #endif | 417 | #endif |
417 | 418 | ||
418 | /* | 419 | /* |
419 | * Built-in IRQ handlers for various IRQ types, | 420 | * Built-in IRQ handlers for various IRQ types, |
420 | * callable via desc->handle_irq() | 421 | * callable via desc->handle_irq() |
421 | */ | 422 | */ |
422 | extern void handle_level_irq(unsigned int irq, struct irq_desc *desc); | 423 | extern void handle_level_irq(unsigned int irq, struct irq_desc *desc); |
423 | extern void handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc); | 424 | extern void handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc); |
424 | extern void handle_edge_irq(unsigned int irq, struct irq_desc *desc); | 425 | extern void handle_edge_irq(unsigned int irq, struct irq_desc *desc); |
425 | extern void handle_edge_eoi_irq(unsigned int irq, struct irq_desc *desc); | 426 | extern void handle_edge_eoi_irq(unsigned int irq, struct irq_desc *desc); |
426 | extern void handle_simple_irq(unsigned int irq, struct irq_desc *desc); | 427 | extern void handle_simple_irq(unsigned int irq, struct irq_desc *desc); |
427 | extern void handle_percpu_irq(unsigned int irq, struct irq_desc *desc); | 428 | extern void handle_percpu_irq(unsigned int irq, struct irq_desc *desc); |
428 | extern void handle_percpu_devid_irq(unsigned int irq, struct irq_desc *desc); | 429 | extern void handle_percpu_devid_irq(unsigned int irq, struct irq_desc *desc); |
429 | extern void handle_bad_irq(unsigned int irq, struct irq_desc *desc); | 430 | extern void handle_bad_irq(unsigned int irq, struct irq_desc *desc); |
430 | extern void handle_nested_irq(unsigned int irq); | 431 | extern void handle_nested_irq(unsigned int irq); |
431 | 432 | ||
432 | /* Handling of unhandled and spurious interrupts: */ | 433 | /* Handling of unhandled and spurious interrupts: */ |
433 | extern void note_interrupt(unsigned int irq, struct irq_desc *desc, | 434 | extern void note_interrupt(unsigned int irq, struct irq_desc *desc, |
434 | irqreturn_t action_ret); | 435 | irqreturn_t action_ret); |
435 | 436 | ||
436 | 437 | ||
437 | /* Enable/disable irq debugging output: */ | 438 | /* Enable/disable irq debugging output: */ |
438 | extern int noirqdebug_setup(char *str); | 439 | extern int noirqdebug_setup(char *str); |
439 | 440 | ||
440 | /* Checks whether the interrupt can be requested by request_irq(): */ | 441 | /* Checks whether the interrupt can be requested by request_irq(): */ |
441 | extern int can_request_irq(unsigned int irq, unsigned long irqflags); | 442 | extern int can_request_irq(unsigned int irq, unsigned long irqflags); |
442 | 443 | ||
443 | /* Dummy irq-chip implementations: */ | 444 | /* Dummy irq-chip implementations: */ |
444 | extern struct irq_chip no_irq_chip; | 445 | extern struct irq_chip no_irq_chip; |
445 | extern struct irq_chip dummy_irq_chip; | 446 | extern struct irq_chip dummy_irq_chip; |
446 | 447 | ||
447 | extern void | 448 | extern void |
448 | irq_set_chip_and_handler_name(unsigned int irq, struct irq_chip *chip, | 449 | irq_set_chip_and_handler_name(unsigned int irq, struct irq_chip *chip, |
449 | irq_flow_handler_t handle, const char *name); | 450 | irq_flow_handler_t handle, const char *name); |
450 | 451 | ||
451 | static inline void irq_set_chip_and_handler(unsigned int irq, struct irq_chip *chip, | 452 | static inline void irq_set_chip_and_handler(unsigned int irq, struct irq_chip *chip, |
452 | irq_flow_handler_t handle) | 453 | irq_flow_handler_t handle) |
453 | { | 454 | { |
454 | irq_set_chip_and_handler_name(irq, chip, handle, NULL); | 455 | irq_set_chip_and_handler_name(irq, chip, handle, NULL); |
455 | } | 456 | } |
456 | 457 | ||
457 | extern int irq_set_percpu_devid(unsigned int irq); | 458 | extern int irq_set_percpu_devid(unsigned int irq); |
458 | 459 | ||
459 | extern void | 460 | extern void |
460 | __irq_set_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained, | 461 | __irq_set_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained, |
461 | const char *name); | 462 | const char *name); |
462 | 463 | ||
463 | static inline void | 464 | static inline void |
464 | irq_set_handler(unsigned int irq, irq_flow_handler_t handle) | 465 | irq_set_handler(unsigned int irq, irq_flow_handler_t handle) |
465 | { | 466 | { |
466 | __irq_set_handler(irq, handle, 0, NULL); | 467 | __irq_set_handler(irq, handle, 0, NULL); |
467 | } | 468 | } |
468 | 469 | ||
469 | /* | 470 | /* |
470 | * Set a highlevel chained flow handler for a given IRQ. | 471 | * Set a highlevel chained flow handler for a given IRQ. |
471 | * (a chained handler is automatically enabled and set to | 472 | * (a chained handler is automatically enabled and set to |
472 | * IRQ_NOREQUEST, IRQ_NOPROBE, and IRQ_NOTHREAD) | 473 | * IRQ_NOREQUEST, IRQ_NOPROBE, and IRQ_NOTHREAD) |
473 | */ | 474 | */ |
474 | static inline void | 475 | static inline void |
475 | irq_set_chained_handler(unsigned int irq, irq_flow_handler_t handle) | 476 | irq_set_chained_handler(unsigned int irq, irq_flow_handler_t handle) |
476 | { | 477 | { |
477 | __irq_set_handler(irq, handle, 1, NULL); | 478 | __irq_set_handler(irq, handle, 1, NULL); |
478 | } | 479 | } |
479 | 480 | ||
480 | void irq_modify_status(unsigned int irq, unsigned long clr, unsigned long set); | 481 | void irq_modify_status(unsigned int irq, unsigned long clr, unsigned long set); |
481 | 482 | ||
482 | static inline void irq_set_status_flags(unsigned int irq, unsigned long set) | 483 | static inline void irq_set_status_flags(unsigned int irq, unsigned long set) |
483 | { | 484 | { |
484 | irq_modify_status(irq, 0, set); | 485 | irq_modify_status(irq, 0, set); |
485 | } | 486 | } |
486 | 487 | ||
487 | static inline void irq_clear_status_flags(unsigned int irq, unsigned long clr) | 488 | static inline void irq_clear_status_flags(unsigned int irq, unsigned long clr) |
488 | { | 489 | { |
489 | irq_modify_status(irq, clr, 0); | 490 | irq_modify_status(irq, clr, 0); |
490 | } | 491 | } |
491 | 492 | ||
492 | static inline void irq_set_noprobe(unsigned int irq) | 493 | static inline void irq_set_noprobe(unsigned int irq) |
493 | { | 494 | { |
494 | irq_modify_status(irq, 0, IRQ_NOPROBE); | 495 | irq_modify_status(irq, 0, IRQ_NOPROBE); |
495 | } | 496 | } |
496 | 497 | ||
497 | static inline void irq_set_probe(unsigned int irq) | 498 | static inline void irq_set_probe(unsigned int irq) |
498 | { | 499 | { |
499 | irq_modify_status(irq, IRQ_NOPROBE, 0); | 500 | irq_modify_status(irq, IRQ_NOPROBE, 0); |
500 | } | 501 | } |
501 | 502 | ||
502 | static inline void irq_set_nothread(unsigned int irq) | 503 | static inline void irq_set_nothread(unsigned int irq) |
503 | { | 504 | { |
504 | irq_modify_status(irq, 0, IRQ_NOTHREAD); | 505 | irq_modify_status(irq, 0, IRQ_NOTHREAD); |
505 | } | 506 | } |
506 | 507 | ||
507 | static inline void irq_set_thread(unsigned int irq) | 508 | static inline void irq_set_thread(unsigned int irq) |
508 | { | 509 | { |
509 | irq_modify_status(irq, IRQ_NOTHREAD, 0); | 510 | irq_modify_status(irq, IRQ_NOTHREAD, 0); |
510 | } | 511 | } |
511 | 512 | ||
512 | static inline void irq_set_nested_thread(unsigned int irq, bool nest) | 513 | static inline void irq_set_nested_thread(unsigned int irq, bool nest) |
513 | { | 514 | { |
514 | if (nest) | 515 | if (nest) |
515 | irq_set_status_flags(irq, IRQ_NESTED_THREAD); | 516 | irq_set_status_flags(irq, IRQ_NESTED_THREAD); |
516 | else | 517 | else |
517 | irq_clear_status_flags(irq, IRQ_NESTED_THREAD); | 518 | irq_clear_status_flags(irq, IRQ_NESTED_THREAD); |
518 | } | 519 | } |
519 | 520 | ||
520 | static inline void irq_set_percpu_devid_flags(unsigned int irq) | 521 | static inline void irq_set_percpu_devid_flags(unsigned int irq) |
521 | { | 522 | { |
522 | irq_set_status_flags(irq, | 523 | irq_set_status_flags(irq, |
523 | IRQ_NOAUTOEN | IRQ_PER_CPU | IRQ_NOTHREAD | | 524 | IRQ_NOAUTOEN | IRQ_PER_CPU | IRQ_NOTHREAD | |
524 | IRQ_NOPROBE | IRQ_PER_CPU_DEVID); | 525 | IRQ_NOPROBE | IRQ_PER_CPU_DEVID); |
525 | } | 526 | } |
526 | 527 | ||
527 | /* Handle dynamic irq creation and destruction */ | 528 | /* Handle dynamic irq creation and destruction */ |
528 | extern unsigned int create_irq_nr(unsigned int irq_want, int node); | 529 | extern unsigned int create_irq_nr(unsigned int irq_want, int node); |
529 | extern unsigned int __create_irqs(unsigned int from, unsigned int count, | 530 | extern unsigned int __create_irqs(unsigned int from, unsigned int count, |
530 | int node); | 531 | int node); |
531 | extern int create_irq(void); | 532 | extern int create_irq(void); |
532 | extern void destroy_irq(unsigned int irq); | 533 | extern void destroy_irq(unsigned int irq); |
533 | extern void destroy_irqs(unsigned int irq, unsigned int count); | 534 | extern void destroy_irqs(unsigned int irq, unsigned int count); |
534 | 535 | ||
535 | /* | 536 | /* |
536 | * Dynamic irq helper functions. Obsolete. Use irq_alloc_desc* and | 537 | * Dynamic irq helper functions. Obsolete. Use irq_alloc_desc* and |
537 | * irq_free_desc instead. | 538 | * irq_free_desc instead. |
538 | */ | 539 | */ |
539 | extern void dynamic_irq_cleanup(unsigned int irq); | 540 | extern void dynamic_irq_cleanup(unsigned int irq); |
540 | static inline void dynamic_irq_init(unsigned int irq) | 541 | static inline void dynamic_irq_init(unsigned int irq) |
541 | { | 542 | { |
542 | dynamic_irq_cleanup(irq); | 543 | dynamic_irq_cleanup(irq); |
543 | } | 544 | } |
544 | 545 | ||
545 | /* Set/get chip/data for an IRQ: */ | 546 | /* Set/get chip/data for an IRQ: */ |
546 | extern int irq_set_chip(unsigned int irq, struct irq_chip *chip); | 547 | extern int irq_set_chip(unsigned int irq, struct irq_chip *chip); |
547 | extern int irq_set_handler_data(unsigned int irq, void *data); | 548 | extern int irq_set_handler_data(unsigned int irq, void *data); |
548 | extern int irq_set_chip_data(unsigned int irq, void *data); | 549 | extern int irq_set_chip_data(unsigned int irq, void *data); |
549 | extern int irq_set_irq_type(unsigned int irq, unsigned int type); | 550 | extern int irq_set_irq_type(unsigned int irq, unsigned int type); |
550 | extern int irq_set_msi_desc(unsigned int irq, struct msi_desc *entry); | 551 | extern int irq_set_msi_desc(unsigned int irq, struct msi_desc *entry); |
551 | extern int irq_set_msi_desc_off(unsigned int irq_base, unsigned int irq_offset, | 552 | extern int irq_set_msi_desc_off(unsigned int irq_base, unsigned int irq_offset, |
552 | struct msi_desc *entry); | 553 | struct msi_desc *entry); |
553 | extern struct irq_data *irq_get_irq_data(unsigned int irq); | 554 | extern struct irq_data *irq_get_irq_data(unsigned int irq); |
554 | 555 | ||
555 | static inline struct irq_chip *irq_get_chip(unsigned int irq) | 556 | static inline struct irq_chip *irq_get_chip(unsigned int irq) |
556 | { | 557 | { |
557 | struct irq_data *d = irq_get_irq_data(irq); | 558 | struct irq_data *d = irq_get_irq_data(irq); |
558 | return d ? d->chip : NULL; | 559 | return d ? d->chip : NULL; |
559 | } | 560 | } |
560 | 561 | ||
561 | static inline struct irq_chip *irq_data_get_irq_chip(struct irq_data *d) | 562 | static inline struct irq_chip *irq_data_get_irq_chip(struct irq_data *d) |
562 | { | 563 | { |
563 | return d->chip; | 564 | return d->chip; |
564 | } | 565 | } |
565 | 566 | ||
566 | static inline void *irq_get_chip_data(unsigned int irq) | 567 | static inline void *irq_get_chip_data(unsigned int irq) |
567 | { | 568 | { |
568 | struct irq_data *d = irq_get_irq_data(irq); | 569 | struct irq_data *d = irq_get_irq_data(irq); |
569 | return d ? d->chip_data : NULL; | 570 | return d ? d->chip_data : NULL; |
570 | } | 571 | } |
571 | 572 | ||
572 | static inline void *irq_data_get_irq_chip_data(struct irq_data *d) | 573 | static inline void *irq_data_get_irq_chip_data(struct irq_data *d) |
573 | { | 574 | { |
574 | return d->chip_data; | 575 | return d->chip_data; |
575 | } | 576 | } |
576 | 577 | ||
577 | static inline void *irq_get_handler_data(unsigned int irq) | 578 | static inline void *irq_get_handler_data(unsigned int irq) |
578 | { | 579 | { |
579 | struct irq_data *d = irq_get_irq_data(irq); | 580 | struct irq_data *d = irq_get_irq_data(irq); |
580 | return d ? d->handler_data : NULL; | 581 | return d ? d->handler_data : NULL; |
581 | } | 582 | } |
582 | 583 | ||
583 | static inline void *irq_data_get_irq_handler_data(struct irq_data *d) | 584 | static inline void *irq_data_get_irq_handler_data(struct irq_data *d) |
584 | { | 585 | { |
585 | return d->handler_data; | 586 | return d->handler_data; |
586 | } | 587 | } |
587 | 588 | ||
588 | static inline struct msi_desc *irq_get_msi_desc(unsigned int irq) | 589 | static inline struct msi_desc *irq_get_msi_desc(unsigned int irq) |
589 | { | 590 | { |
590 | struct irq_data *d = irq_get_irq_data(irq); | 591 | struct irq_data *d = irq_get_irq_data(irq); |
591 | return d ? d->msi_desc : NULL; | 592 | return d ? d->msi_desc : NULL; |
592 | } | 593 | } |
593 | 594 | ||
594 | static inline struct msi_desc *irq_data_get_msi(struct irq_data *d) | 595 | static inline struct msi_desc *irq_data_get_msi(struct irq_data *d) |
595 | { | 596 | { |
596 | return d->msi_desc; | 597 | return d->msi_desc; |
597 | } | 598 | } |
598 | 599 | ||
599 | static inline u32 irq_get_trigger_type(unsigned int irq) | 600 | static inline u32 irq_get_trigger_type(unsigned int irq) |
600 | { | 601 | { |
601 | struct irq_data *d = irq_get_irq_data(irq); | 602 | struct irq_data *d = irq_get_irq_data(irq); |
602 | return d ? irqd_get_trigger_type(d) : 0; | 603 | return d ? irqd_get_trigger_type(d) : 0; |
603 | } | 604 | } |
604 | 605 | ||
605 | int __irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node, | 606 | int __irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node, |
606 | struct module *owner); | 607 | struct module *owner); |
607 | 608 | ||
608 | /* use macros to avoid needing export.h for THIS_MODULE */ | 609 | /* use macros to avoid needing export.h for THIS_MODULE */ |
609 | #define irq_alloc_descs(irq, from, cnt, node) \ | 610 | #define irq_alloc_descs(irq, from, cnt, node) \ |
610 | __irq_alloc_descs(irq, from, cnt, node, THIS_MODULE) | 611 | __irq_alloc_descs(irq, from, cnt, node, THIS_MODULE) |
611 | 612 | ||
612 | #define irq_alloc_desc(node) \ | 613 | #define irq_alloc_desc(node) \ |
613 | irq_alloc_descs(-1, 0, 1, node) | 614 | irq_alloc_descs(-1, 0, 1, node) |
614 | 615 | ||
615 | #define irq_alloc_desc_at(at, node) \ | 616 | #define irq_alloc_desc_at(at, node) \ |
616 | irq_alloc_descs(at, at, 1, node) | 617 | irq_alloc_descs(at, at, 1, node) |
617 | 618 | ||
618 | #define irq_alloc_desc_from(from, node) \ | 619 | #define irq_alloc_desc_from(from, node) \ |
619 | irq_alloc_descs(-1, from, 1, node) | 620 | irq_alloc_descs(-1, from, 1, node) |
620 | 621 | ||
621 | #define irq_alloc_descs_from(from, cnt, node) \ | 622 | #define irq_alloc_descs_from(from, cnt, node) \ |
622 | irq_alloc_descs(-1, from, cnt, node) | 623 | irq_alloc_descs(-1, from, cnt, node) |
623 | 624 | ||
624 | void irq_free_descs(unsigned int irq, unsigned int cnt); | 625 | void irq_free_descs(unsigned int irq, unsigned int cnt); |
625 | int irq_reserve_irqs(unsigned int from, unsigned int cnt); | 626 | int irq_reserve_irqs(unsigned int from, unsigned int cnt); |
626 | 627 | ||
627 | static inline void irq_free_desc(unsigned int irq) | 628 | static inline void irq_free_desc(unsigned int irq) |
628 | { | 629 | { |
629 | irq_free_descs(irq, 1); | 630 | irq_free_descs(irq, 1); |
630 | } | 631 | } |
631 | 632 | ||
632 | static inline int irq_reserve_irq(unsigned int irq) | 633 | static inline int irq_reserve_irq(unsigned int irq) |
633 | { | 634 | { |
634 | return irq_reserve_irqs(irq, 1); | 635 | return irq_reserve_irqs(irq, 1); |
635 | } | 636 | } |
636 | 637 | ||
637 | #ifndef irq_reg_writel | 638 | #ifndef irq_reg_writel |
638 | # define irq_reg_writel(val, addr) writel(val, addr) | 639 | # define irq_reg_writel(val, addr) writel(val, addr) |
639 | #endif | 640 | #endif |
640 | #ifndef irq_reg_readl | 641 | #ifndef irq_reg_readl |
641 | # define irq_reg_readl(addr) readl(addr) | 642 | # define irq_reg_readl(addr) readl(addr) |
642 | #endif | 643 | #endif |
643 | 644 | ||
644 | /** | 645 | /** |
645 | * struct irq_chip_regs - register offsets for struct irq_gci | 646 | * struct irq_chip_regs - register offsets for struct irq_gci |
646 | * @enable: Enable register offset to reg_base | 647 | * @enable: Enable register offset to reg_base |
647 | * @disable: Disable register offset to reg_base | 648 | * @disable: Disable register offset to reg_base |
648 | * @mask: Mask register offset to reg_base | 649 | * @mask: Mask register offset to reg_base |
649 | * @ack: Ack register offset to reg_base | 650 | * @ack: Ack register offset to reg_base |
650 | * @eoi: Eoi register offset to reg_base | 651 | * @eoi: Eoi register offset to reg_base |
651 | * @type: Type configuration register offset to reg_base | 652 | * @type: Type configuration register offset to reg_base |
652 | * @polarity: Polarity configuration register offset to reg_base | 653 | * @polarity: Polarity configuration register offset to reg_base |
653 | */ | 654 | */ |
654 | struct irq_chip_regs { | 655 | struct irq_chip_regs { |
655 | unsigned long enable; | 656 | unsigned long enable; |
656 | unsigned long disable; | 657 | unsigned long disable; |
657 | unsigned long mask; | 658 | unsigned long mask; |
658 | unsigned long ack; | 659 | unsigned long ack; |
659 | unsigned long eoi; | 660 | unsigned long eoi; |
660 | unsigned long type; | 661 | unsigned long type; |
661 | unsigned long polarity; | 662 | unsigned long polarity; |
662 | }; | 663 | }; |
663 | 664 | ||
664 | /** | 665 | /** |
665 | * struct irq_chip_type - Generic interrupt chip instance for a flow type | 666 | * struct irq_chip_type - Generic interrupt chip instance for a flow type |
666 | * @chip: The real interrupt chip which provides the callbacks | 667 | * @chip: The real interrupt chip which provides the callbacks |
667 | * @regs: Register offsets for this chip | 668 | * @regs: Register offsets for this chip |
668 | * @handler: Flow handler associated with this chip | 669 | * @handler: Flow handler associated with this chip |
669 | * @type: Chip can handle these flow types | 670 | * @type: Chip can handle these flow types |
670 | * @mask_cache_priv: Cached mask register private to the chip type | 671 | * @mask_cache_priv: Cached mask register private to the chip type |
671 | * @mask_cache: Pointer to cached mask register | 672 | * @mask_cache: Pointer to cached mask register |
672 | * | 673 | * |
673 | * A irq_generic_chip can have several instances of irq_chip_type when | 674 | * A irq_generic_chip can have several instances of irq_chip_type when |
674 | * it requires different functions and register offsets for different | 675 | * it requires different functions and register offsets for different |
675 | * flow types. | 676 | * flow types. |
676 | */ | 677 | */ |
677 | struct irq_chip_type { | 678 | struct irq_chip_type { |
678 | struct irq_chip chip; | 679 | struct irq_chip chip; |
679 | struct irq_chip_regs regs; | 680 | struct irq_chip_regs regs; |
680 | irq_flow_handler_t handler; | 681 | irq_flow_handler_t handler; |
681 | u32 type; | 682 | u32 type; |
682 | u32 mask_cache_priv; | 683 | u32 mask_cache_priv; |
683 | u32 *mask_cache; | 684 | u32 *mask_cache; |
684 | }; | 685 | }; |
685 | 686 | ||
686 | /** | 687 | /** |
687 | * struct irq_chip_generic - Generic irq chip data structure | 688 | * struct irq_chip_generic - Generic irq chip data structure |
688 | * @lock: Lock to protect register and cache data access | 689 | * @lock: Lock to protect register and cache data access |
689 | * @reg_base: Register base address (virtual) | 690 | * @reg_base: Register base address (virtual) |
690 | * @irq_base: Interrupt base nr for this chip | 691 | * @irq_base: Interrupt base nr for this chip |
691 | * @irq_cnt: Number of interrupts handled by this chip | 692 | * @irq_cnt: Number of interrupts handled by this chip |
692 | * @mask_cache: Cached mask register shared between all chip types | 693 | * @mask_cache: Cached mask register shared between all chip types |
693 | * @type_cache: Cached type register | 694 | * @type_cache: Cached type register |
694 | * @polarity_cache: Cached polarity register | 695 | * @polarity_cache: Cached polarity register |
695 | * @wake_enabled: Interrupt can wakeup from suspend | 696 | * @wake_enabled: Interrupt can wakeup from suspend |
696 | * @wake_active: Interrupt is marked as an wakeup from suspend source | 697 | * @wake_active: Interrupt is marked as an wakeup from suspend source |
697 | * @num_ct: Number of available irq_chip_type instances (usually 1) | 698 | * @num_ct: Number of available irq_chip_type instances (usually 1) |
698 | * @private: Private data for non generic chip callbacks | 699 | * @private: Private data for non generic chip callbacks |
699 | * @installed: bitfield to denote installed interrupts | 700 | * @installed: bitfield to denote installed interrupts |
700 | * @unused: bitfield to denote unused interrupts | 701 | * @unused: bitfield to denote unused interrupts |
701 | * @domain: irq domain pointer | 702 | * @domain: irq domain pointer |
702 | * @list: List head for keeping track of instances | 703 | * @list: List head for keeping track of instances |
703 | * @chip_types: Array of interrupt irq_chip_types | 704 | * @chip_types: Array of interrupt irq_chip_types |
704 | * | 705 | * |
705 | * Note, that irq_chip_generic can have multiple irq_chip_type | 706 | * Note, that irq_chip_generic can have multiple irq_chip_type |
706 | * implementations which can be associated to a particular irq line of | 707 | * implementations which can be associated to a particular irq line of |
707 | * an irq_chip_generic instance. That allows to share and protect | 708 | * an irq_chip_generic instance. That allows to share and protect |
708 | * state in an irq_chip_generic instance when we need to implement | 709 | * state in an irq_chip_generic instance when we need to implement |
709 | * different flow mechanisms (level/edge) for it. | 710 | * different flow mechanisms (level/edge) for it. |
710 | */ | 711 | */ |
711 | struct irq_chip_generic { | 712 | struct irq_chip_generic { |
712 | raw_spinlock_t lock; | 713 | raw_spinlock_t lock; |
713 | void __iomem *reg_base; | 714 | void __iomem *reg_base; |
714 | unsigned int irq_base; | 715 | unsigned int irq_base; |
715 | unsigned int irq_cnt; | 716 | unsigned int irq_cnt; |
716 | u32 mask_cache; | 717 | u32 mask_cache; |
717 | u32 type_cache; | 718 | u32 type_cache; |
718 | u32 polarity_cache; | 719 | u32 polarity_cache; |
719 | u32 wake_enabled; | 720 | u32 wake_enabled; |
720 | u32 wake_active; | 721 | u32 wake_active; |
721 | unsigned int num_ct; | 722 | unsigned int num_ct; |
722 | void *private; | 723 | void *private; |
723 | unsigned long installed; | 724 | unsigned long installed; |
724 | unsigned long unused; | 725 | unsigned long unused; |
725 | struct irq_domain *domain; | 726 | struct irq_domain *domain; |
726 | struct list_head list; | 727 | struct list_head list; |
727 | struct irq_chip_type chip_types[0]; | 728 | struct irq_chip_type chip_types[0]; |
728 | }; | 729 | }; |
729 | 730 | ||
730 | /** | 731 | /** |
731 | * enum irq_gc_flags - Initialization flags for generic irq chips | 732 | * enum irq_gc_flags - Initialization flags for generic irq chips |
732 | * @IRQ_GC_INIT_MASK_CACHE: Initialize the mask_cache by reading mask reg | 733 | * @IRQ_GC_INIT_MASK_CACHE: Initialize the mask_cache by reading mask reg |
733 | * @IRQ_GC_INIT_NESTED_LOCK: Set the lock class of the irqs to nested for | 734 | * @IRQ_GC_INIT_NESTED_LOCK: Set the lock class of the irqs to nested for |
734 | * irq chips which need to call irq_set_wake() on | 735 | * irq chips which need to call irq_set_wake() on |
735 | * the parent irq. Usually GPIO implementations | 736 | * the parent irq. Usually GPIO implementations |
736 | * @IRQ_GC_MASK_CACHE_PER_TYPE: Mask cache is chip type private | 737 | * @IRQ_GC_MASK_CACHE_PER_TYPE: Mask cache is chip type private |
737 | * @IRQ_GC_NO_MASK: Do not calculate irq_data->mask | 738 | * @IRQ_GC_NO_MASK: Do not calculate irq_data->mask |
738 | */ | 739 | */ |
739 | enum irq_gc_flags { | 740 | enum irq_gc_flags { |
740 | IRQ_GC_INIT_MASK_CACHE = 1 << 0, | 741 | IRQ_GC_INIT_MASK_CACHE = 1 << 0, |
741 | IRQ_GC_INIT_NESTED_LOCK = 1 << 1, | 742 | IRQ_GC_INIT_NESTED_LOCK = 1 << 1, |
742 | IRQ_GC_MASK_CACHE_PER_TYPE = 1 << 2, | 743 | IRQ_GC_MASK_CACHE_PER_TYPE = 1 << 2, |
743 | IRQ_GC_NO_MASK = 1 << 3, | 744 | IRQ_GC_NO_MASK = 1 << 3, |
744 | }; | 745 | }; |
745 | 746 | ||
746 | /* | 747 | /* |
747 | * struct irq_domain_chip_generic - Generic irq chip data structure for irq domains | 748 | * struct irq_domain_chip_generic - Generic irq chip data structure for irq domains |
748 | * @irqs_per_chip: Number of interrupts per chip | 749 | * @irqs_per_chip: Number of interrupts per chip |
749 | * @num_chips: Number of chips | 750 | * @num_chips: Number of chips |
750 | * @irq_flags_to_set: IRQ* flags to set on irq setup | 751 | * @irq_flags_to_set: IRQ* flags to set on irq setup |
751 | * @irq_flags_to_clear: IRQ* flags to clear on irq setup | 752 | * @irq_flags_to_clear: IRQ* flags to clear on irq setup |
752 | * @gc_flags: Generic chip specific setup flags | 753 | * @gc_flags: Generic chip specific setup flags |
753 | * @gc: Array of pointers to generic interrupt chips | 754 | * @gc: Array of pointers to generic interrupt chips |
754 | */ | 755 | */ |
755 | struct irq_domain_chip_generic { | 756 | struct irq_domain_chip_generic { |
756 | unsigned int irqs_per_chip; | 757 | unsigned int irqs_per_chip; |
757 | unsigned int num_chips; | 758 | unsigned int num_chips; |
758 | unsigned int irq_flags_to_clear; | 759 | unsigned int irq_flags_to_clear; |
759 | unsigned int irq_flags_to_set; | 760 | unsigned int irq_flags_to_set; |
760 | enum irq_gc_flags gc_flags; | 761 | enum irq_gc_flags gc_flags; |
761 | struct irq_chip_generic *gc[0]; | 762 | struct irq_chip_generic *gc[0]; |
762 | }; | 763 | }; |
763 | 764 | ||
764 | /* Generic chip callback functions */ | 765 | /* Generic chip callback functions */ |
765 | void irq_gc_noop(struct irq_data *d); | 766 | void irq_gc_noop(struct irq_data *d); |
766 | void irq_gc_mask_disable_reg(struct irq_data *d); | 767 | void irq_gc_mask_disable_reg(struct irq_data *d); |
767 | void irq_gc_mask_set_bit(struct irq_data *d); | 768 | void irq_gc_mask_set_bit(struct irq_data *d); |
768 | void irq_gc_mask_clr_bit(struct irq_data *d); | 769 | void irq_gc_mask_clr_bit(struct irq_data *d); |
769 | void irq_gc_unmask_enable_reg(struct irq_data *d); | 770 | void irq_gc_unmask_enable_reg(struct irq_data *d); |
770 | void irq_gc_ack_set_bit(struct irq_data *d); | 771 | void irq_gc_ack_set_bit(struct irq_data *d); |
771 | void irq_gc_ack_clr_bit(struct irq_data *d); | 772 | void irq_gc_ack_clr_bit(struct irq_data *d); |
772 | void irq_gc_mask_disable_reg_and_ack(struct irq_data *d); | 773 | void irq_gc_mask_disable_reg_and_ack(struct irq_data *d); |
773 | void irq_gc_eoi(struct irq_data *d); | 774 | void irq_gc_eoi(struct irq_data *d); |
774 | int irq_gc_set_wake(struct irq_data *d, unsigned int on); | 775 | int irq_gc_set_wake(struct irq_data *d, unsigned int on); |
775 | 776 | ||
776 | /* Setup functions for irq_chip_generic */ | 777 | /* Setup functions for irq_chip_generic */ |
777 | struct irq_chip_generic * | 778 | struct irq_chip_generic * |
778 | irq_alloc_generic_chip(const char *name, int nr_ct, unsigned int irq_base, | 779 | irq_alloc_generic_chip(const char *name, int nr_ct, unsigned int irq_base, |
779 | void __iomem *reg_base, irq_flow_handler_t handler); | 780 | void __iomem *reg_base, irq_flow_handler_t handler); |
780 | void irq_setup_generic_chip(struct irq_chip_generic *gc, u32 msk, | 781 | void irq_setup_generic_chip(struct irq_chip_generic *gc, u32 msk, |
781 | enum irq_gc_flags flags, unsigned int clr, | 782 | enum irq_gc_flags flags, unsigned int clr, |
782 | unsigned int set); | 783 | unsigned int set); |
783 | int irq_setup_alt_chip(struct irq_data *d, unsigned int type); | 784 | int irq_setup_alt_chip(struct irq_data *d, unsigned int type); |
784 | void irq_remove_generic_chip(struct irq_chip_generic *gc, u32 msk, | 785 | void irq_remove_generic_chip(struct irq_chip_generic *gc, u32 msk, |
785 | unsigned int clr, unsigned int set); | 786 | unsigned int clr, unsigned int set); |
786 | 787 | ||
787 | struct irq_chip_generic *irq_get_domain_generic_chip(struct irq_domain *d, unsigned int hw_irq); | 788 | struct irq_chip_generic *irq_get_domain_generic_chip(struct irq_domain *d, unsigned int hw_irq); |
788 | int irq_alloc_domain_generic_chips(struct irq_domain *d, int irqs_per_chip, | 789 | int irq_alloc_domain_generic_chips(struct irq_domain *d, int irqs_per_chip, |
789 | int num_ct, const char *name, | 790 | int num_ct, const char *name, |
790 | irq_flow_handler_t handler, | 791 | irq_flow_handler_t handler, |
791 | unsigned int clr, unsigned int set, | 792 | unsigned int clr, unsigned int set, |
792 | enum irq_gc_flags flags); | 793 | enum irq_gc_flags flags); |
793 | 794 | ||
794 | 795 | ||
795 | static inline struct irq_chip_type *irq_data_get_chip_type(struct irq_data *d) | 796 | static inline struct irq_chip_type *irq_data_get_chip_type(struct irq_data *d) |
796 | { | 797 | { |
797 | return container_of(d->chip, struct irq_chip_type, chip); | 798 | return container_of(d->chip, struct irq_chip_type, chip); |
798 | } | 799 | } |
799 | 800 | ||
800 | #define IRQ_MSK(n) (u32)((n) < 32 ? ((1 << (n)) - 1) : UINT_MAX) | 801 | #define IRQ_MSK(n) (u32)((n) < 32 ? ((1 << (n)) - 1) : UINT_MAX) |
801 | 802 | ||
802 | #ifdef CONFIG_SMP | 803 | #ifdef CONFIG_SMP |
803 | static inline void irq_gc_lock(struct irq_chip_generic *gc) | 804 | static inline void irq_gc_lock(struct irq_chip_generic *gc) |
804 | { | 805 | { |
805 | raw_spin_lock(&gc->lock); | 806 | raw_spin_lock(&gc->lock); |
806 | } | 807 | } |
807 | 808 | ||
808 | static inline void irq_gc_unlock(struct irq_chip_generic *gc) | 809 | static inline void irq_gc_unlock(struct irq_chip_generic *gc) |
809 | { | 810 | { |
810 | raw_spin_unlock(&gc->lock); | 811 | raw_spin_unlock(&gc->lock); |
811 | } | 812 | } |
812 | #else | 813 | #else |
813 | static inline void irq_gc_lock(struct irq_chip_generic *gc) { } | 814 | static inline void irq_gc_lock(struct irq_chip_generic *gc) { } |
814 | static inline void irq_gc_unlock(struct irq_chip_generic *gc) { } | 815 | static inline void irq_gc_unlock(struct irq_chip_generic *gc) { } |
815 | #endif | 816 | #endif |
816 | 817 | ||
817 | #endif /* _LINUX_IRQ_H */ | 818 | #endif /* _LINUX_IRQ_H */ |
818 | 819 |
1 | /* | 1 | /* |
2 | * linux/kernel/irq/manage.c | 2 | * linux/kernel/irq/manage.c |
3 | * | 3 | * |
4 | * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar | 4 | * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar |
5 | * Copyright (C) 2005-2006 Thomas Gleixner | 5 | * Copyright (C) 2005-2006 Thomas Gleixner |
6 | * | 6 | * |
7 | * This file contains driver APIs to the irq subsystem. | 7 | * This file contains driver APIs to the irq subsystem. |
8 | */ | 8 | */ |
9 | 9 | ||
10 | #define pr_fmt(fmt) "genirq: " fmt | 10 | #define pr_fmt(fmt) "genirq: " fmt |
11 | 11 | ||
12 | #include <linux/irq.h> | 12 | #include <linux/irq.h> |
13 | #include <linux/kthread.h> | 13 | #include <linux/kthread.h> |
14 | #include <linux/module.h> | 14 | #include <linux/module.h> |
15 | #include <linux/random.h> | 15 | #include <linux/random.h> |
16 | #include <linux/interrupt.h> | 16 | #include <linux/interrupt.h> |
17 | #include <linux/slab.h> | 17 | #include <linux/slab.h> |
18 | #include <linux/sched.h> | 18 | #include <linux/sched.h> |
19 | #include <linux/sched/rt.h> | 19 | #include <linux/sched/rt.h> |
20 | #include <linux/task_work.h> | 20 | #include <linux/task_work.h> |
21 | 21 | ||
22 | #include "internals.h" | 22 | #include "internals.h" |
23 | 23 | ||
24 | #ifdef CONFIG_IRQ_FORCED_THREADING | 24 | #ifdef CONFIG_IRQ_FORCED_THREADING |
25 | __read_mostly bool force_irqthreads; | 25 | __read_mostly bool force_irqthreads; |
26 | 26 | ||
27 | static int __init setup_forced_irqthreads(char *arg) | 27 | static int __init setup_forced_irqthreads(char *arg) |
28 | { | 28 | { |
29 | force_irqthreads = true; | 29 | force_irqthreads = true; |
30 | return 0; | 30 | return 0; |
31 | } | 31 | } |
32 | early_param("threadirqs", setup_forced_irqthreads); | 32 | early_param("threadirqs", setup_forced_irqthreads); |
33 | #endif | 33 | #endif |
34 | 34 | ||
35 | static void __synchronize_hardirq(struct irq_desc *desc) | 35 | static void __synchronize_hardirq(struct irq_desc *desc) |
36 | { | 36 | { |
37 | bool inprogress; | 37 | bool inprogress; |
38 | 38 | ||
39 | do { | 39 | do { |
40 | unsigned long flags; | 40 | unsigned long flags; |
41 | 41 | ||
42 | /* | 42 | /* |
43 | * Wait until we're out of the critical section. This might | 43 | * Wait until we're out of the critical section. This might |
44 | * give the wrong answer due to the lack of memory barriers. | 44 | * give the wrong answer due to the lack of memory barriers. |
45 | */ | 45 | */ |
46 | while (irqd_irq_inprogress(&desc->irq_data)) | 46 | while (irqd_irq_inprogress(&desc->irq_data)) |
47 | cpu_relax(); | 47 | cpu_relax(); |
48 | 48 | ||
49 | /* Ok, that indicated we're done: double-check carefully. */ | 49 | /* Ok, that indicated we're done: double-check carefully. */ |
50 | raw_spin_lock_irqsave(&desc->lock, flags); | 50 | raw_spin_lock_irqsave(&desc->lock, flags); |
51 | inprogress = irqd_irq_inprogress(&desc->irq_data); | 51 | inprogress = irqd_irq_inprogress(&desc->irq_data); |
52 | raw_spin_unlock_irqrestore(&desc->lock, flags); | 52 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
53 | 53 | ||
54 | /* Oops, that failed? */ | 54 | /* Oops, that failed? */ |
55 | } while (inprogress); | 55 | } while (inprogress); |
56 | } | 56 | } |
57 | 57 | ||
58 | /** | 58 | /** |
59 | * synchronize_hardirq - wait for pending hard IRQ handlers (on other CPUs) | 59 | * synchronize_hardirq - wait for pending hard IRQ handlers (on other CPUs) |
60 | * @irq: interrupt number to wait for | 60 | * @irq: interrupt number to wait for |
61 | * | 61 | * |
62 | * This function waits for any pending hard IRQ handlers for this | 62 | * This function waits for any pending hard IRQ handlers for this |
63 | * interrupt to complete before returning. If you use this | 63 | * interrupt to complete before returning. If you use this |
64 | * function while holding a resource the IRQ handler may need you | 64 | * function while holding a resource the IRQ handler may need you |
65 | * will deadlock. It does not take associated threaded handlers | 65 | * will deadlock. It does not take associated threaded handlers |
66 | * into account. | 66 | * into account. |
67 | * | 67 | * |
68 | * Do not use this for shutdown scenarios where you must be sure | 68 | * Do not use this for shutdown scenarios where you must be sure |
69 | * that all parts (hardirq and threaded handler) have completed. | 69 | * that all parts (hardirq and threaded handler) have completed. |
70 | * | 70 | * |
71 | * This function may be called - with care - from IRQ context. | 71 | * This function may be called - with care - from IRQ context. |
72 | */ | 72 | */ |
73 | void synchronize_hardirq(unsigned int irq) | 73 | void synchronize_hardirq(unsigned int irq) |
74 | { | 74 | { |
75 | struct irq_desc *desc = irq_to_desc(irq); | 75 | struct irq_desc *desc = irq_to_desc(irq); |
76 | 76 | ||
77 | if (desc) | 77 | if (desc) |
78 | __synchronize_hardirq(desc); | 78 | __synchronize_hardirq(desc); |
79 | } | 79 | } |
80 | EXPORT_SYMBOL(synchronize_hardirq); | 80 | EXPORT_SYMBOL(synchronize_hardirq); |
81 | 81 | ||
82 | /** | 82 | /** |
83 | * synchronize_irq - wait for pending IRQ handlers (on other CPUs) | 83 | * synchronize_irq - wait for pending IRQ handlers (on other CPUs) |
84 | * @irq: interrupt number to wait for | 84 | * @irq: interrupt number to wait for |
85 | * | 85 | * |
86 | * This function waits for any pending IRQ handlers for this interrupt | 86 | * This function waits for any pending IRQ handlers for this interrupt |
87 | * to complete before returning. If you use this function while | 87 | * to complete before returning. If you use this function while |
88 | * holding a resource the IRQ handler may need you will deadlock. | 88 | * holding a resource the IRQ handler may need you will deadlock. |
89 | * | 89 | * |
90 | * This function may be called - with care - from IRQ context. | 90 | * This function may be called - with care - from IRQ context. |
91 | */ | 91 | */ |
92 | void synchronize_irq(unsigned int irq) | 92 | void synchronize_irq(unsigned int irq) |
93 | { | 93 | { |
94 | struct irq_desc *desc = irq_to_desc(irq); | 94 | struct irq_desc *desc = irq_to_desc(irq); |
95 | 95 | ||
96 | if (desc) { | 96 | if (desc) { |
97 | __synchronize_hardirq(desc); | 97 | __synchronize_hardirq(desc); |
98 | /* | 98 | /* |
99 | * We made sure that no hardirq handler is | 99 | * We made sure that no hardirq handler is |
100 | * running. Now verify that no threaded handlers are | 100 | * running. Now verify that no threaded handlers are |
101 | * active. | 101 | * active. |
102 | */ | 102 | */ |
103 | wait_event(desc->wait_for_threads, | 103 | wait_event(desc->wait_for_threads, |
104 | !atomic_read(&desc->threads_active)); | 104 | !atomic_read(&desc->threads_active)); |
105 | } | 105 | } |
106 | } | 106 | } |
107 | EXPORT_SYMBOL(synchronize_irq); | 107 | EXPORT_SYMBOL(synchronize_irq); |
108 | 108 | ||
109 | #ifdef CONFIG_SMP | 109 | #ifdef CONFIG_SMP |
110 | cpumask_var_t irq_default_affinity; | 110 | cpumask_var_t irq_default_affinity; |
111 | 111 | ||
112 | /** | 112 | /** |
113 | * irq_can_set_affinity - Check if the affinity of a given irq can be set | 113 | * irq_can_set_affinity - Check if the affinity of a given irq can be set |
114 | * @irq: Interrupt to check | 114 | * @irq: Interrupt to check |
115 | * | 115 | * |
116 | */ | 116 | */ |
117 | int irq_can_set_affinity(unsigned int irq) | 117 | int irq_can_set_affinity(unsigned int irq) |
118 | { | 118 | { |
119 | struct irq_desc *desc = irq_to_desc(irq); | 119 | struct irq_desc *desc = irq_to_desc(irq); |
120 | 120 | ||
121 | if (!desc || !irqd_can_balance(&desc->irq_data) || | 121 | if (!desc || !irqd_can_balance(&desc->irq_data) || |
122 | !desc->irq_data.chip || !desc->irq_data.chip->irq_set_affinity) | 122 | !desc->irq_data.chip || !desc->irq_data.chip->irq_set_affinity) |
123 | return 0; | 123 | return 0; |
124 | 124 | ||
125 | return 1; | 125 | return 1; |
126 | } | 126 | } |
127 | 127 | ||
128 | /** | 128 | /** |
129 | * irq_set_thread_affinity - Notify irq threads to adjust affinity | 129 | * irq_set_thread_affinity - Notify irq threads to adjust affinity |
130 | * @desc: irq descriptor which has affitnity changed | 130 | * @desc: irq descriptor which has affitnity changed |
131 | * | 131 | * |
132 | * We just set IRQTF_AFFINITY and delegate the affinity setting | 132 | * We just set IRQTF_AFFINITY and delegate the affinity setting |
133 | * to the interrupt thread itself. We can not call | 133 | * to the interrupt thread itself. We can not call |
134 | * set_cpus_allowed_ptr() here as we hold desc->lock and this | 134 | * set_cpus_allowed_ptr() here as we hold desc->lock and this |
135 | * code can be called from hard interrupt context. | 135 | * code can be called from hard interrupt context. |
136 | */ | 136 | */ |
137 | void irq_set_thread_affinity(struct irq_desc *desc) | 137 | void irq_set_thread_affinity(struct irq_desc *desc) |
138 | { | 138 | { |
139 | struct irqaction *action = desc->action; | 139 | struct irqaction *action = desc->action; |
140 | 140 | ||
141 | while (action) { | 141 | while (action) { |
142 | if (action->thread) | 142 | if (action->thread) |
143 | set_bit(IRQTF_AFFINITY, &action->thread_flags); | 143 | set_bit(IRQTF_AFFINITY, &action->thread_flags); |
144 | action = action->next; | 144 | action = action->next; |
145 | } | 145 | } |
146 | } | 146 | } |
147 | 147 | ||
148 | #ifdef CONFIG_GENERIC_PENDING_IRQ | 148 | #ifdef CONFIG_GENERIC_PENDING_IRQ |
149 | static inline bool irq_can_move_pcntxt(struct irq_data *data) | 149 | static inline bool irq_can_move_pcntxt(struct irq_data *data) |
150 | { | 150 | { |
151 | return irqd_can_move_in_process_context(data); | 151 | return irqd_can_move_in_process_context(data); |
152 | } | 152 | } |
153 | static inline bool irq_move_pending(struct irq_data *data) | 153 | static inline bool irq_move_pending(struct irq_data *data) |
154 | { | 154 | { |
155 | return irqd_is_setaffinity_pending(data); | 155 | return irqd_is_setaffinity_pending(data); |
156 | } | 156 | } |
157 | static inline void | 157 | static inline void |
158 | irq_copy_pending(struct irq_desc *desc, const struct cpumask *mask) | 158 | irq_copy_pending(struct irq_desc *desc, const struct cpumask *mask) |
159 | { | 159 | { |
160 | cpumask_copy(desc->pending_mask, mask); | 160 | cpumask_copy(desc->pending_mask, mask); |
161 | } | 161 | } |
162 | static inline void | 162 | static inline void |
163 | irq_get_pending(struct cpumask *mask, struct irq_desc *desc) | 163 | irq_get_pending(struct cpumask *mask, struct irq_desc *desc) |
164 | { | 164 | { |
165 | cpumask_copy(mask, desc->pending_mask); | 165 | cpumask_copy(mask, desc->pending_mask); |
166 | } | 166 | } |
167 | #else | 167 | #else |
168 | static inline bool irq_can_move_pcntxt(struct irq_data *data) { return true; } | 168 | static inline bool irq_can_move_pcntxt(struct irq_data *data) { return true; } |
169 | static inline bool irq_move_pending(struct irq_data *data) { return false; } | 169 | static inline bool irq_move_pending(struct irq_data *data) { return false; } |
170 | static inline void | 170 | static inline void |
171 | irq_copy_pending(struct irq_desc *desc, const struct cpumask *mask) { } | 171 | irq_copy_pending(struct irq_desc *desc, const struct cpumask *mask) { } |
172 | static inline void | 172 | static inline void |
173 | irq_get_pending(struct cpumask *mask, struct irq_desc *desc) { } | 173 | irq_get_pending(struct cpumask *mask, struct irq_desc *desc) { } |
174 | #endif | 174 | #endif |
175 | 175 | ||
176 | int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask, | 176 | int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask, |
177 | bool force) | 177 | bool force) |
178 | { | 178 | { |
179 | struct irq_desc *desc = irq_data_to_desc(data); | 179 | struct irq_desc *desc = irq_data_to_desc(data); |
180 | struct irq_chip *chip = irq_data_get_irq_chip(data); | 180 | struct irq_chip *chip = irq_data_get_irq_chip(data); |
181 | int ret; | 181 | int ret; |
182 | 182 | ||
183 | ret = chip->irq_set_affinity(data, mask, false); | 183 | ret = chip->irq_set_affinity(data, mask, force); |
184 | switch (ret) { | 184 | switch (ret) { |
185 | case IRQ_SET_MASK_OK: | 185 | case IRQ_SET_MASK_OK: |
186 | cpumask_copy(data->affinity, mask); | 186 | cpumask_copy(data->affinity, mask); |
187 | case IRQ_SET_MASK_OK_NOCOPY: | 187 | case IRQ_SET_MASK_OK_NOCOPY: |
188 | irq_set_thread_affinity(desc); | 188 | irq_set_thread_affinity(desc); |
189 | ret = 0; | 189 | ret = 0; |
190 | } | 190 | } |
191 | 191 | ||
192 | return ret; | 192 | return ret; |
193 | } | 193 | } |
194 | 194 | ||
195 | int __irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask) | 195 | int irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask, |
196 | bool force) | ||
196 | { | 197 | { |
197 | struct irq_chip *chip = irq_data_get_irq_chip(data); | 198 | struct irq_chip *chip = irq_data_get_irq_chip(data); |
198 | struct irq_desc *desc = irq_data_to_desc(data); | 199 | struct irq_desc *desc = irq_data_to_desc(data); |
199 | int ret = 0; | 200 | int ret = 0; |
200 | 201 | ||
201 | if (!chip || !chip->irq_set_affinity) | 202 | if (!chip || !chip->irq_set_affinity) |
202 | return -EINVAL; | 203 | return -EINVAL; |
203 | 204 | ||
204 | if (irq_can_move_pcntxt(data)) { | 205 | if (irq_can_move_pcntxt(data)) { |
205 | ret = irq_do_set_affinity(data, mask, false); | 206 | ret = irq_do_set_affinity(data, mask, force); |
206 | } else { | 207 | } else { |
207 | irqd_set_move_pending(data); | 208 | irqd_set_move_pending(data); |
208 | irq_copy_pending(desc, mask); | 209 | irq_copy_pending(desc, mask); |
209 | } | 210 | } |
210 | 211 | ||
211 | if (desc->affinity_notify) { | 212 | if (desc->affinity_notify) { |
212 | kref_get(&desc->affinity_notify->kref); | 213 | kref_get(&desc->affinity_notify->kref); |
213 | schedule_work(&desc->affinity_notify->work); | 214 | schedule_work(&desc->affinity_notify->work); |
214 | } | 215 | } |
215 | irqd_set(data, IRQD_AFFINITY_SET); | 216 | irqd_set(data, IRQD_AFFINITY_SET); |
216 | 217 | ||
217 | return ret; | 218 | return ret; |
218 | } | 219 | } |
219 | 220 | ||
220 | /** | 221 | int __irq_set_affinity(unsigned int irq, const struct cpumask *mask, bool force) |
221 | * irq_set_affinity - Set the irq affinity of a given irq | ||
222 | * @irq: Interrupt to set affinity | ||
223 | * @mask: cpumask | ||
224 | * | ||
225 | */ | ||
226 | int irq_set_affinity(unsigned int irq, const struct cpumask *mask) | ||
227 | { | 222 | { |
228 | struct irq_desc *desc = irq_to_desc(irq); | 223 | struct irq_desc *desc = irq_to_desc(irq); |
229 | unsigned long flags; | 224 | unsigned long flags; |
230 | int ret; | 225 | int ret; |
231 | 226 | ||
232 | if (!desc) | 227 | if (!desc) |
233 | return -EINVAL; | 228 | return -EINVAL; |
234 | 229 | ||
235 | raw_spin_lock_irqsave(&desc->lock, flags); | 230 | raw_spin_lock_irqsave(&desc->lock, flags); |
236 | ret = __irq_set_affinity_locked(irq_desc_get_irq_data(desc), mask); | 231 | ret = irq_set_affinity_locked(irq_desc_get_irq_data(desc), mask, force); |
237 | raw_spin_unlock_irqrestore(&desc->lock, flags); | 232 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
238 | return ret; | 233 | return ret; |
239 | } | 234 | } |
240 | 235 | ||
241 | int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m) | 236 | int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m) |
242 | { | 237 | { |
243 | unsigned long flags; | 238 | unsigned long flags; |
244 | struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL); | 239 | struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL); |
245 | 240 | ||
246 | if (!desc) | 241 | if (!desc) |
247 | return -EINVAL; | 242 | return -EINVAL; |
248 | desc->affinity_hint = m; | 243 | desc->affinity_hint = m; |
249 | irq_put_desc_unlock(desc, flags); | 244 | irq_put_desc_unlock(desc, flags); |
250 | return 0; | 245 | return 0; |
251 | } | 246 | } |
252 | EXPORT_SYMBOL_GPL(irq_set_affinity_hint); | 247 | EXPORT_SYMBOL_GPL(irq_set_affinity_hint); |
253 | 248 | ||
254 | static void irq_affinity_notify(struct work_struct *work) | 249 | static void irq_affinity_notify(struct work_struct *work) |
255 | { | 250 | { |
256 | struct irq_affinity_notify *notify = | 251 | struct irq_affinity_notify *notify = |
257 | container_of(work, struct irq_affinity_notify, work); | 252 | container_of(work, struct irq_affinity_notify, work); |
258 | struct irq_desc *desc = irq_to_desc(notify->irq); | 253 | struct irq_desc *desc = irq_to_desc(notify->irq); |
259 | cpumask_var_t cpumask; | 254 | cpumask_var_t cpumask; |
260 | unsigned long flags; | 255 | unsigned long flags; |
261 | 256 | ||
262 | if (!desc || !alloc_cpumask_var(&cpumask, GFP_KERNEL)) | 257 | if (!desc || !alloc_cpumask_var(&cpumask, GFP_KERNEL)) |
263 | goto out; | 258 | goto out; |
264 | 259 | ||
265 | raw_spin_lock_irqsave(&desc->lock, flags); | 260 | raw_spin_lock_irqsave(&desc->lock, flags); |
266 | if (irq_move_pending(&desc->irq_data)) | 261 | if (irq_move_pending(&desc->irq_data)) |
267 | irq_get_pending(cpumask, desc); | 262 | irq_get_pending(cpumask, desc); |
268 | else | 263 | else |
269 | cpumask_copy(cpumask, desc->irq_data.affinity); | 264 | cpumask_copy(cpumask, desc->irq_data.affinity); |
270 | raw_spin_unlock_irqrestore(&desc->lock, flags); | 265 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
271 | 266 | ||
272 | notify->notify(notify, cpumask); | 267 | notify->notify(notify, cpumask); |
273 | 268 | ||
274 | free_cpumask_var(cpumask); | 269 | free_cpumask_var(cpumask); |
275 | out: | 270 | out: |
276 | kref_put(¬ify->kref, notify->release); | 271 | kref_put(¬ify->kref, notify->release); |
277 | } | 272 | } |
278 | 273 | ||
279 | /** | 274 | /** |
280 | * irq_set_affinity_notifier - control notification of IRQ affinity changes | 275 | * irq_set_affinity_notifier - control notification of IRQ affinity changes |
281 | * @irq: Interrupt for which to enable/disable notification | 276 | * @irq: Interrupt for which to enable/disable notification |
282 | * @notify: Context for notification, or %NULL to disable | 277 | * @notify: Context for notification, or %NULL to disable |
283 | * notification. Function pointers must be initialised; | 278 | * notification. Function pointers must be initialised; |
284 | * the other fields will be initialised by this function. | 279 | * the other fields will be initialised by this function. |
285 | * | 280 | * |
286 | * Must be called in process context. Notification may only be enabled | 281 | * Must be called in process context. Notification may only be enabled |
287 | * after the IRQ is allocated and must be disabled before the IRQ is | 282 | * after the IRQ is allocated and must be disabled before the IRQ is |
288 | * freed using free_irq(). | 283 | * freed using free_irq(). |
289 | */ | 284 | */ |
290 | int | 285 | int |
291 | irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify) | 286 | irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify) |
292 | { | 287 | { |
293 | struct irq_desc *desc = irq_to_desc(irq); | 288 | struct irq_desc *desc = irq_to_desc(irq); |
294 | struct irq_affinity_notify *old_notify; | 289 | struct irq_affinity_notify *old_notify; |
295 | unsigned long flags; | 290 | unsigned long flags; |
296 | 291 | ||
297 | /* The release function is promised process context */ | 292 | /* The release function is promised process context */ |
298 | might_sleep(); | 293 | might_sleep(); |
299 | 294 | ||
300 | if (!desc) | 295 | if (!desc) |
301 | return -EINVAL; | 296 | return -EINVAL; |
302 | 297 | ||
303 | /* Complete initialisation of *notify */ | 298 | /* Complete initialisation of *notify */ |
304 | if (notify) { | 299 | if (notify) { |
305 | notify->irq = irq; | 300 | notify->irq = irq; |
306 | kref_init(¬ify->kref); | 301 | kref_init(¬ify->kref); |
307 | INIT_WORK(¬ify->work, irq_affinity_notify); | 302 | INIT_WORK(¬ify->work, irq_affinity_notify); |
308 | } | 303 | } |
309 | 304 | ||
310 | raw_spin_lock_irqsave(&desc->lock, flags); | 305 | raw_spin_lock_irqsave(&desc->lock, flags); |
311 | old_notify = desc->affinity_notify; | 306 | old_notify = desc->affinity_notify; |
312 | desc->affinity_notify = notify; | 307 | desc->affinity_notify = notify; |
313 | raw_spin_unlock_irqrestore(&desc->lock, flags); | 308 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
314 | 309 | ||
315 | if (old_notify) | 310 | if (old_notify) |
316 | kref_put(&old_notify->kref, old_notify->release); | 311 | kref_put(&old_notify->kref, old_notify->release); |
317 | 312 | ||
318 | return 0; | 313 | return 0; |
319 | } | 314 | } |
320 | EXPORT_SYMBOL_GPL(irq_set_affinity_notifier); | 315 | EXPORT_SYMBOL_GPL(irq_set_affinity_notifier); |
321 | 316 | ||
322 | #ifndef CONFIG_AUTO_IRQ_AFFINITY | 317 | #ifndef CONFIG_AUTO_IRQ_AFFINITY |
323 | /* | 318 | /* |
324 | * Generic version of the affinity autoselector. | 319 | * Generic version of the affinity autoselector. |
325 | */ | 320 | */ |
326 | static int | 321 | static int |
327 | setup_affinity(unsigned int irq, struct irq_desc *desc, struct cpumask *mask) | 322 | setup_affinity(unsigned int irq, struct irq_desc *desc, struct cpumask *mask) |
328 | { | 323 | { |
329 | struct cpumask *set = irq_default_affinity; | 324 | struct cpumask *set = irq_default_affinity; |
330 | int node = desc->irq_data.node; | 325 | int node = desc->irq_data.node; |
331 | 326 | ||
332 | /* Excludes PER_CPU and NO_BALANCE interrupts */ | 327 | /* Excludes PER_CPU and NO_BALANCE interrupts */ |
333 | if (!irq_can_set_affinity(irq)) | 328 | if (!irq_can_set_affinity(irq)) |
334 | return 0; | 329 | return 0; |
335 | 330 | ||
336 | /* | 331 | /* |
337 | * Preserve an userspace affinity setup, but make sure that | 332 | * Preserve an userspace affinity setup, but make sure that |
338 | * one of the targets is online. | 333 | * one of the targets is online. |
339 | */ | 334 | */ |
340 | if (irqd_has_set(&desc->irq_data, IRQD_AFFINITY_SET)) { | 335 | if (irqd_has_set(&desc->irq_data, IRQD_AFFINITY_SET)) { |
341 | if (cpumask_intersects(desc->irq_data.affinity, | 336 | if (cpumask_intersects(desc->irq_data.affinity, |
342 | cpu_online_mask)) | 337 | cpu_online_mask)) |
343 | set = desc->irq_data.affinity; | 338 | set = desc->irq_data.affinity; |
344 | else | 339 | else |
345 | irqd_clear(&desc->irq_data, IRQD_AFFINITY_SET); | 340 | irqd_clear(&desc->irq_data, IRQD_AFFINITY_SET); |
346 | } | 341 | } |
347 | 342 | ||
348 | cpumask_and(mask, cpu_online_mask, set); | 343 | cpumask_and(mask, cpu_online_mask, set); |
349 | if (node != NUMA_NO_NODE) { | 344 | if (node != NUMA_NO_NODE) { |
350 | const struct cpumask *nodemask = cpumask_of_node(node); | 345 | const struct cpumask *nodemask = cpumask_of_node(node); |
351 | 346 | ||
352 | /* make sure at least one of the cpus in nodemask is online */ | 347 | /* make sure at least one of the cpus in nodemask is online */ |
353 | if (cpumask_intersects(mask, nodemask)) | 348 | if (cpumask_intersects(mask, nodemask)) |
354 | cpumask_and(mask, mask, nodemask); | 349 | cpumask_and(mask, mask, nodemask); |
355 | } | 350 | } |
356 | irq_do_set_affinity(&desc->irq_data, mask, false); | 351 | irq_do_set_affinity(&desc->irq_data, mask, false); |
357 | return 0; | 352 | return 0; |
358 | } | 353 | } |
359 | #else | 354 | #else |
360 | static inline int | 355 | static inline int |
361 | setup_affinity(unsigned int irq, struct irq_desc *d, struct cpumask *mask) | 356 | setup_affinity(unsigned int irq, struct irq_desc *d, struct cpumask *mask) |
362 | { | 357 | { |
363 | return irq_select_affinity(irq); | 358 | return irq_select_affinity(irq); |
364 | } | 359 | } |
365 | #endif | 360 | #endif |
366 | 361 | ||
367 | /* | 362 | /* |
368 | * Called when affinity is set via /proc/irq | 363 | * Called when affinity is set via /proc/irq |
369 | */ | 364 | */ |
370 | int irq_select_affinity_usr(unsigned int irq, struct cpumask *mask) | 365 | int irq_select_affinity_usr(unsigned int irq, struct cpumask *mask) |
371 | { | 366 | { |
372 | struct irq_desc *desc = irq_to_desc(irq); | 367 | struct irq_desc *desc = irq_to_desc(irq); |
373 | unsigned long flags; | 368 | unsigned long flags; |
374 | int ret; | 369 | int ret; |
375 | 370 | ||
376 | raw_spin_lock_irqsave(&desc->lock, flags); | 371 | raw_spin_lock_irqsave(&desc->lock, flags); |
377 | ret = setup_affinity(irq, desc, mask); | 372 | ret = setup_affinity(irq, desc, mask); |
378 | raw_spin_unlock_irqrestore(&desc->lock, flags); | 373 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
379 | return ret; | 374 | return ret; |
380 | } | 375 | } |
381 | 376 | ||
382 | #else | 377 | #else |
383 | static inline int | 378 | static inline int |
384 | setup_affinity(unsigned int irq, struct irq_desc *desc, struct cpumask *mask) | 379 | setup_affinity(unsigned int irq, struct irq_desc *desc, struct cpumask *mask) |
385 | { | 380 | { |
386 | return 0; | 381 | return 0; |
387 | } | 382 | } |
388 | #endif | 383 | #endif |
389 | 384 | ||
390 | void __disable_irq(struct irq_desc *desc, unsigned int irq, bool suspend) | 385 | void __disable_irq(struct irq_desc *desc, unsigned int irq, bool suspend) |
391 | { | 386 | { |
392 | if (suspend) { | 387 | if (suspend) { |
393 | if (!desc->action || (desc->action->flags & IRQF_NO_SUSPEND)) | 388 | if (!desc->action || (desc->action->flags & IRQF_NO_SUSPEND)) |
394 | return; | 389 | return; |
395 | desc->istate |= IRQS_SUSPENDED; | 390 | desc->istate |= IRQS_SUSPENDED; |
396 | } | 391 | } |
397 | 392 | ||
398 | if (!desc->depth++) | 393 | if (!desc->depth++) |
399 | irq_disable(desc); | 394 | irq_disable(desc); |
400 | } | 395 | } |
401 | 396 | ||
402 | static int __disable_irq_nosync(unsigned int irq) | 397 | static int __disable_irq_nosync(unsigned int irq) |
403 | { | 398 | { |
404 | unsigned long flags; | 399 | unsigned long flags; |
405 | struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL); | 400 | struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL); |
406 | 401 | ||
407 | if (!desc) | 402 | if (!desc) |
408 | return -EINVAL; | 403 | return -EINVAL; |
409 | __disable_irq(desc, irq, false); | 404 | __disable_irq(desc, irq, false); |
410 | irq_put_desc_busunlock(desc, flags); | 405 | irq_put_desc_busunlock(desc, flags); |
411 | return 0; | 406 | return 0; |
412 | } | 407 | } |
413 | 408 | ||
414 | /** | 409 | /** |
415 | * disable_irq_nosync - disable an irq without waiting | 410 | * disable_irq_nosync - disable an irq without waiting |
416 | * @irq: Interrupt to disable | 411 | * @irq: Interrupt to disable |
417 | * | 412 | * |
418 | * Disable the selected interrupt line. Disables and Enables are | 413 | * Disable the selected interrupt line. Disables and Enables are |
419 | * nested. | 414 | * nested. |
420 | * Unlike disable_irq(), this function does not ensure existing | 415 | * Unlike disable_irq(), this function does not ensure existing |
421 | * instances of the IRQ handler have completed before returning. | 416 | * instances of the IRQ handler have completed before returning. |
422 | * | 417 | * |
423 | * This function may be called from IRQ context. | 418 | * This function may be called from IRQ context. |
424 | */ | 419 | */ |
425 | void disable_irq_nosync(unsigned int irq) | 420 | void disable_irq_nosync(unsigned int irq) |
426 | { | 421 | { |
427 | __disable_irq_nosync(irq); | 422 | __disable_irq_nosync(irq); |
428 | } | 423 | } |
429 | EXPORT_SYMBOL(disable_irq_nosync); | 424 | EXPORT_SYMBOL(disable_irq_nosync); |
430 | 425 | ||
431 | /** | 426 | /** |
432 | * disable_irq - disable an irq and wait for completion | 427 | * disable_irq - disable an irq and wait for completion |
433 | * @irq: Interrupt to disable | 428 | * @irq: Interrupt to disable |
434 | * | 429 | * |
435 | * Disable the selected interrupt line. Enables and Disables are | 430 | * Disable the selected interrupt line. Enables and Disables are |
436 | * nested. | 431 | * nested. |
437 | * This function waits for any pending IRQ handlers for this interrupt | 432 | * This function waits for any pending IRQ handlers for this interrupt |
438 | * to complete before returning. If you use this function while | 433 | * to complete before returning. If you use this function while |
439 | * holding a resource the IRQ handler may need you will deadlock. | 434 | * holding a resource the IRQ handler may need you will deadlock. |
440 | * | 435 | * |
441 | * This function may be called - with care - from IRQ context. | 436 | * This function may be called - with care - from IRQ context. |
442 | */ | 437 | */ |
443 | void disable_irq(unsigned int irq) | 438 | void disable_irq(unsigned int irq) |
444 | { | 439 | { |
445 | if (!__disable_irq_nosync(irq)) | 440 | if (!__disable_irq_nosync(irq)) |
446 | synchronize_irq(irq); | 441 | synchronize_irq(irq); |
447 | } | 442 | } |
448 | EXPORT_SYMBOL(disable_irq); | 443 | EXPORT_SYMBOL(disable_irq); |
449 | 444 | ||
450 | void __enable_irq(struct irq_desc *desc, unsigned int irq, bool resume) | 445 | void __enable_irq(struct irq_desc *desc, unsigned int irq, bool resume) |
451 | { | 446 | { |
452 | if (resume) { | 447 | if (resume) { |
453 | if (!(desc->istate & IRQS_SUSPENDED)) { | 448 | if (!(desc->istate & IRQS_SUSPENDED)) { |
454 | if (!desc->action) | 449 | if (!desc->action) |
455 | return; | 450 | return; |
456 | if (!(desc->action->flags & IRQF_FORCE_RESUME)) | 451 | if (!(desc->action->flags & IRQF_FORCE_RESUME)) |
457 | return; | 452 | return; |
458 | /* Pretend that it got disabled ! */ | 453 | /* Pretend that it got disabled ! */ |
459 | desc->depth++; | 454 | desc->depth++; |
460 | } | 455 | } |
461 | desc->istate &= ~IRQS_SUSPENDED; | 456 | desc->istate &= ~IRQS_SUSPENDED; |
462 | } | 457 | } |
463 | 458 | ||
464 | switch (desc->depth) { | 459 | switch (desc->depth) { |
465 | case 0: | 460 | case 0: |
466 | err_out: | 461 | err_out: |
467 | WARN(1, KERN_WARNING "Unbalanced enable for IRQ %d\n", irq); | 462 | WARN(1, KERN_WARNING "Unbalanced enable for IRQ %d\n", irq); |
468 | break; | 463 | break; |
469 | case 1: { | 464 | case 1: { |
470 | if (desc->istate & IRQS_SUSPENDED) | 465 | if (desc->istate & IRQS_SUSPENDED) |
471 | goto err_out; | 466 | goto err_out; |
472 | /* Prevent probing on this irq: */ | 467 | /* Prevent probing on this irq: */ |
473 | irq_settings_set_noprobe(desc); | 468 | irq_settings_set_noprobe(desc); |
474 | irq_enable(desc); | 469 | irq_enable(desc); |
475 | check_irq_resend(desc, irq); | 470 | check_irq_resend(desc, irq); |
476 | /* fall-through */ | 471 | /* fall-through */ |
477 | } | 472 | } |
478 | default: | 473 | default: |
479 | desc->depth--; | 474 | desc->depth--; |
480 | } | 475 | } |
481 | } | 476 | } |
482 | 477 | ||
483 | /** | 478 | /** |
484 | * enable_irq - enable handling of an irq | 479 | * enable_irq - enable handling of an irq |
485 | * @irq: Interrupt to enable | 480 | * @irq: Interrupt to enable |
486 | * | 481 | * |
487 | * Undoes the effect of one call to disable_irq(). If this | 482 | * Undoes the effect of one call to disable_irq(). If this |
488 | * matches the last disable, processing of interrupts on this | 483 | * matches the last disable, processing of interrupts on this |
489 | * IRQ line is re-enabled. | 484 | * IRQ line is re-enabled. |
490 | * | 485 | * |
491 | * This function may be called from IRQ context only when | 486 | * This function may be called from IRQ context only when |
492 | * desc->irq_data.chip->bus_lock and desc->chip->bus_sync_unlock are NULL ! | 487 | * desc->irq_data.chip->bus_lock and desc->chip->bus_sync_unlock are NULL ! |
493 | */ | 488 | */ |
494 | void enable_irq(unsigned int irq) | 489 | void enable_irq(unsigned int irq) |
495 | { | 490 | { |
496 | unsigned long flags; | 491 | unsigned long flags; |
497 | struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL); | 492 | struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL); |
498 | 493 | ||
499 | if (!desc) | 494 | if (!desc) |
500 | return; | 495 | return; |
501 | if (WARN(!desc->irq_data.chip, | 496 | if (WARN(!desc->irq_data.chip, |
502 | KERN_ERR "enable_irq before setup/request_irq: irq %u\n", irq)) | 497 | KERN_ERR "enable_irq before setup/request_irq: irq %u\n", irq)) |
503 | goto out; | 498 | goto out; |
504 | 499 | ||
505 | __enable_irq(desc, irq, false); | 500 | __enable_irq(desc, irq, false); |
506 | out: | 501 | out: |
507 | irq_put_desc_busunlock(desc, flags); | 502 | irq_put_desc_busunlock(desc, flags); |
508 | } | 503 | } |
509 | EXPORT_SYMBOL(enable_irq); | 504 | EXPORT_SYMBOL(enable_irq); |
510 | 505 | ||
511 | static int set_irq_wake_real(unsigned int irq, unsigned int on) | 506 | static int set_irq_wake_real(unsigned int irq, unsigned int on) |
512 | { | 507 | { |
513 | struct irq_desc *desc = irq_to_desc(irq); | 508 | struct irq_desc *desc = irq_to_desc(irq); |
514 | int ret = -ENXIO; | 509 | int ret = -ENXIO; |
515 | 510 | ||
516 | if (irq_desc_get_chip(desc)->flags & IRQCHIP_SKIP_SET_WAKE) | 511 | if (irq_desc_get_chip(desc)->flags & IRQCHIP_SKIP_SET_WAKE) |
517 | return 0; | 512 | return 0; |
518 | 513 | ||
519 | if (desc->irq_data.chip->irq_set_wake) | 514 | if (desc->irq_data.chip->irq_set_wake) |
520 | ret = desc->irq_data.chip->irq_set_wake(&desc->irq_data, on); | 515 | ret = desc->irq_data.chip->irq_set_wake(&desc->irq_data, on); |
521 | 516 | ||
522 | return ret; | 517 | return ret; |
523 | } | 518 | } |
524 | 519 | ||
525 | /** | 520 | /** |
526 | * irq_set_irq_wake - control irq power management wakeup | 521 | * irq_set_irq_wake - control irq power management wakeup |
527 | * @irq: interrupt to control | 522 | * @irq: interrupt to control |
528 | * @on: enable/disable power management wakeup | 523 | * @on: enable/disable power management wakeup |
529 | * | 524 | * |
530 | * Enable/disable power management wakeup mode, which is | 525 | * Enable/disable power management wakeup mode, which is |
531 | * disabled by default. Enables and disables must match, | 526 | * disabled by default. Enables and disables must match, |
532 | * just as they match for non-wakeup mode support. | 527 | * just as they match for non-wakeup mode support. |
533 | * | 528 | * |
534 | * Wakeup mode lets this IRQ wake the system from sleep | 529 | * Wakeup mode lets this IRQ wake the system from sleep |
535 | * states like "suspend to RAM". | 530 | * states like "suspend to RAM". |
536 | */ | 531 | */ |
537 | int irq_set_irq_wake(unsigned int irq, unsigned int on) | 532 | int irq_set_irq_wake(unsigned int irq, unsigned int on) |
538 | { | 533 | { |
539 | unsigned long flags; | 534 | unsigned long flags; |
540 | struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL); | 535 | struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL); |
541 | int ret = 0; | 536 | int ret = 0; |
542 | 537 | ||
543 | if (!desc) | 538 | if (!desc) |
544 | return -EINVAL; | 539 | return -EINVAL; |
545 | 540 | ||
546 | /* wakeup-capable irqs can be shared between drivers that | 541 | /* wakeup-capable irqs can be shared between drivers that |
547 | * don't need to have the same sleep mode behaviors. | 542 | * don't need to have the same sleep mode behaviors. |
548 | */ | 543 | */ |
549 | if (on) { | 544 | if (on) { |
550 | if (desc->wake_depth++ == 0) { | 545 | if (desc->wake_depth++ == 0) { |
551 | ret = set_irq_wake_real(irq, on); | 546 | ret = set_irq_wake_real(irq, on); |
552 | if (ret) | 547 | if (ret) |
553 | desc->wake_depth = 0; | 548 | desc->wake_depth = 0; |
554 | else | 549 | else |
555 | irqd_set(&desc->irq_data, IRQD_WAKEUP_STATE); | 550 | irqd_set(&desc->irq_data, IRQD_WAKEUP_STATE); |
556 | } | 551 | } |
557 | } else { | 552 | } else { |
558 | if (desc->wake_depth == 0) { | 553 | if (desc->wake_depth == 0) { |
559 | WARN(1, "Unbalanced IRQ %d wake disable\n", irq); | 554 | WARN(1, "Unbalanced IRQ %d wake disable\n", irq); |
560 | } else if (--desc->wake_depth == 0) { | 555 | } else if (--desc->wake_depth == 0) { |
561 | ret = set_irq_wake_real(irq, on); | 556 | ret = set_irq_wake_real(irq, on); |
562 | if (ret) | 557 | if (ret) |
563 | desc->wake_depth = 1; | 558 | desc->wake_depth = 1; |
564 | else | 559 | else |
565 | irqd_clear(&desc->irq_data, IRQD_WAKEUP_STATE); | 560 | irqd_clear(&desc->irq_data, IRQD_WAKEUP_STATE); |
566 | } | 561 | } |
567 | } | 562 | } |
568 | irq_put_desc_busunlock(desc, flags); | 563 | irq_put_desc_busunlock(desc, flags); |
569 | return ret; | 564 | return ret; |
570 | } | 565 | } |
571 | EXPORT_SYMBOL(irq_set_irq_wake); | 566 | EXPORT_SYMBOL(irq_set_irq_wake); |
572 | 567 | ||
573 | /* | 568 | /* |
574 | * Internal function that tells the architecture code whether a | 569 | * Internal function that tells the architecture code whether a |
575 | * particular irq has been exclusively allocated or is available | 570 | * particular irq has been exclusively allocated or is available |
576 | * for driver use. | 571 | * for driver use. |
577 | */ | 572 | */ |
578 | int can_request_irq(unsigned int irq, unsigned long irqflags) | 573 | int can_request_irq(unsigned int irq, unsigned long irqflags) |
579 | { | 574 | { |
580 | unsigned long flags; | 575 | unsigned long flags; |
581 | struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0); | 576 | struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0); |
582 | int canrequest = 0; | 577 | int canrequest = 0; |
583 | 578 | ||
584 | if (!desc) | 579 | if (!desc) |
585 | return 0; | 580 | return 0; |
586 | 581 | ||
587 | if (irq_settings_can_request(desc)) { | 582 | if (irq_settings_can_request(desc)) { |
588 | if (!desc->action || | 583 | if (!desc->action || |
589 | irqflags & desc->action->flags & IRQF_SHARED) | 584 | irqflags & desc->action->flags & IRQF_SHARED) |
590 | canrequest = 1; | 585 | canrequest = 1; |
591 | } | 586 | } |
592 | irq_put_desc_unlock(desc, flags); | 587 | irq_put_desc_unlock(desc, flags); |
593 | return canrequest; | 588 | return canrequest; |
594 | } | 589 | } |
595 | 590 | ||
596 | int __irq_set_trigger(struct irq_desc *desc, unsigned int irq, | 591 | int __irq_set_trigger(struct irq_desc *desc, unsigned int irq, |
597 | unsigned long flags) | 592 | unsigned long flags) |
598 | { | 593 | { |
599 | struct irq_chip *chip = desc->irq_data.chip; | 594 | struct irq_chip *chip = desc->irq_data.chip; |
600 | int ret, unmask = 0; | 595 | int ret, unmask = 0; |
601 | 596 | ||
602 | if (!chip || !chip->irq_set_type) { | 597 | if (!chip || !chip->irq_set_type) { |
603 | /* | 598 | /* |
604 | * IRQF_TRIGGER_* but the PIC does not support multiple | 599 | * IRQF_TRIGGER_* but the PIC does not support multiple |
605 | * flow-types? | 600 | * flow-types? |
606 | */ | 601 | */ |
607 | pr_debug("No set_type function for IRQ %d (%s)\n", irq, | 602 | pr_debug("No set_type function for IRQ %d (%s)\n", irq, |
608 | chip ? (chip->name ? : "unknown") : "unknown"); | 603 | chip ? (chip->name ? : "unknown") : "unknown"); |
609 | return 0; | 604 | return 0; |
610 | } | 605 | } |
611 | 606 | ||
612 | flags &= IRQ_TYPE_SENSE_MASK; | 607 | flags &= IRQ_TYPE_SENSE_MASK; |
613 | 608 | ||
614 | if (chip->flags & IRQCHIP_SET_TYPE_MASKED) { | 609 | if (chip->flags & IRQCHIP_SET_TYPE_MASKED) { |
615 | if (!irqd_irq_masked(&desc->irq_data)) | 610 | if (!irqd_irq_masked(&desc->irq_data)) |
616 | mask_irq(desc); | 611 | mask_irq(desc); |
617 | if (!irqd_irq_disabled(&desc->irq_data)) | 612 | if (!irqd_irq_disabled(&desc->irq_data)) |
618 | unmask = 1; | 613 | unmask = 1; |
619 | } | 614 | } |
620 | 615 | ||
621 | /* caller masked out all except trigger mode flags */ | 616 | /* caller masked out all except trigger mode flags */ |
622 | ret = chip->irq_set_type(&desc->irq_data, flags); | 617 | ret = chip->irq_set_type(&desc->irq_data, flags); |
623 | 618 | ||
624 | switch (ret) { | 619 | switch (ret) { |
625 | case IRQ_SET_MASK_OK: | 620 | case IRQ_SET_MASK_OK: |
626 | irqd_clear(&desc->irq_data, IRQD_TRIGGER_MASK); | 621 | irqd_clear(&desc->irq_data, IRQD_TRIGGER_MASK); |
627 | irqd_set(&desc->irq_data, flags); | 622 | irqd_set(&desc->irq_data, flags); |
628 | 623 | ||
629 | case IRQ_SET_MASK_OK_NOCOPY: | 624 | case IRQ_SET_MASK_OK_NOCOPY: |
630 | flags = irqd_get_trigger_type(&desc->irq_data); | 625 | flags = irqd_get_trigger_type(&desc->irq_data); |
631 | irq_settings_set_trigger_mask(desc, flags); | 626 | irq_settings_set_trigger_mask(desc, flags); |
632 | irqd_clear(&desc->irq_data, IRQD_LEVEL); | 627 | irqd_clear(&desc->irq_data, IRQD_LEVEL); |
633 | irq_settings_clr_level(desc); | 628 | irq_settings_clr_level(desc); |
634 | if (flags & IRQ_TYPE_LEVEL_MASK) { | 629 | if (flags & IRQ_TYPE_LEVEL_MASK) { |
635 | irq_settings_set_level(desc); | 630 | irq_settings_set_level(desc); |
636 | irqd_set(&desc->irq_data, IRQD_LEVEL); | 631 | irqd_set(&desc->irq_data, IRQD_LEVEL); |
637 | } | 632 | } |
638 | 633 | ||
639 | ret = 0; | 634 | ret = 0; |
640 | break; | 635 | break; |
641 | default: | 636 | default: |
642 | pr_err("Setting trigger mode %lu for irq %u failed (%pF)\n", | 637 | pr_err("Setting trigger mode %lu for irq %u failed (%pF)\n", |
643 | flags, irq, chip->irq_set_type); | 638 | flags, irq, chip->irq_set_type); |
644 | } | 639 | } |
645 | if (unmask) | 640 | if (unmask) |
646 | unmask_irq(desc); | 641 | unmask_irq(desc); |
647 | return ret; | 642 | return ret; |
648 | } | 643 | } |
649 | 644 | ||
650 | #ifdef CONFIG_HARDIRQS_SW_RESEND | 645 | #ifdef CONFIG_HARDIRQS_SW_RESEND |
651 | int irq_set_parent(int irq, int parent_irq) | 646 | int irq_set_parent(int irq, int parent_irq) |
652 | { | 647 | { |
653 | unsigned long flags; | 648 | unsigned long flags; |
654 | struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0); | 649 | struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0); |
655 | 650 | ||
656 | if (!desc) | 651 | if (!desc) |
657 | return -EINVAL; | 652 | return -EINVAL; |
658 | 653 | ||
659 | desc->parent_irq = parent_irq; | 654 | desc->parent_irq = parent_irq; |
660 | 655 | ||
661 | irq_put_desc_unlock(desc, flags); | 656 | irq_put_desc_unlock(desc, flags); |
662 | return 0; | 657 | return 0; |
663 | } | 658 | } |
664 | #endif | 659 | #endif |
665 | 660 | ||
666 | /* | 661 | /* |
667 | * Default primary interrupt handler for threaded interrupts. Is | 662 | * Default primary interrupt handler for threaded interrupts. Is |
668 | * assigned as primary handler when request_threaded_irq is called | 663 | * assigned as primary handler when request_threaded_irq is called |
669 | * with handler == NULL. Useful for oneshot interrupts. | 664 | * with handler == NULL. Useful for oneshot interrupts. |
670 | */ | 665 | */ |
671 | static irqreturn_t irq_default_primary_handler(int irq, void *dev_id) | 666 | static irqreturn_t irq_default_primary_handler(int irq, void *dev_id) |
672 | { | 667 | { |
673 | return IRQ_WAKE_THREAD; | 668 | return IRQ_WAKE_THREAD; |
674 | } | 669 | } |
675 | 670 | ||
676 | /* | 671 | /* |
677 | * Primary handler for nested threaded interrupts. Should never be | 672 | * Primary handler for nested threaded interrupts. Should never be |
678 | * called. | 673 | * called. |
679 | */ | 674 | */ |
680 | static irqreturn_t irq_nested_primary_handler(int irq, void *dev_id) | 675 | static irqreturn_t irq_nested_primary_handler(int irq, void *dev_id) |
681 | { | 676 | { |
682 | WARN(1, "Primary handler called for nested irq %d\n", irq); | 677 | WARN(1, "Primary handler called for nested irq %d\n", irq); |
683 | return IRQ_NONE; | 678 | return IRQ_NONE; |
684 | } | 679 | } |
685 | 680 | ||
686 | static int irq_wait_for_interrupt(struct irqaction *action) | 681 | static int irq_wait_for_interrupt(struct irqaction *action) |
687 | { | 682 | { |
688 | set_current_state(TASK_INTERRUPTIBLE); | 683 | set_current_state(TASK_INTERRUPTIBLE); |
689 | 684 | ||
690 | while (!kthread_should_stop()) { | 685 | while (!kthread_should_stop()) { |
691 | 686 | ||
692 | if (test_and_clear_bit(IRQTF_RUNTHREAD, | 687 | if (test_and_clear_bit(IRQTF_RUNTHREAD, |
693 | &action->thread_flags)) { | 688 | &action->thread_flags)) { |
694 | __set_current_state(TASK_RUNNING); | 689 | __set_current_state(TASK_RUNNING); |
695 | return 0; | 690 | return 0; |
696 | } | 691 | } |
697 | schedule(); | 692 | schedule(); |
698 | set_current_state(TASK_INTERRUPTIBLE); | 693 | set_current_state(TASK_INTERRUPTIBLE); |
699 | } | 694 | } |
700 | __set_current_state(TASK_RUNNING); | 695 | __set_current_state(TASK_RUNNING); |
701 | return -1; | 696 | return -1; |
702 | } | 697 | } |
703 | 698 | ||
704 | /* | 699 | /* |
705 | * Oneshot interrupts keep the irq line masked until the threaded | 700 | * Oneshot interrupts keep the irq line masked until the threaded |
706 | * handler finished. unmask if the interrupt has not been disabled and | 701 | * handler finished. unmask if the interrupt has not been disabled and |
707 | * is marked MASKED. | 702 | * is marked MASKED. |
708 | */ | 703 | */ |
709 | static void irq_finalize_oneshot(struct irq_desc *desc, | 704 | static void irq_finalize_oneshot(struct irq_desc *desc, |
710 | struct irqaction *action) | 705 | struct irqaction *action) |
711 | { | 706 | { |
712 | if (!(desc->istate & IRQS_ONESHOT)) | 707 | if (!(desc->istate & IRQS_ONESHOT)) |
713 | return; | 708 | return; |
714 | again: | 709 | again: |
715 | chip_bus_lock(desc); | 710 | chip_bus_lock(desc); |
716 | raw_spin_lock_irq(&desc->lock); | 711 | raw_spin_lock_irq(&desc->lock); |
717 | 712 | ||
718 | /* | 713 | /* |
719 | * Implausible though it may be we need to protect us against | 714 | * Implausible though it may be we need to protect us against |
720 | * the following scenario: | 715 | * the following scenario: |
721 | * | 716 | * |
722 | * The thread is faster done than the hard interrupt handler | 717 | * The thread is faster done than the hard interrupt handler |
723 | * on the other CPU. If we unmask the irq line then the | 718 | * on the other CPU. If we unmask the irq line then the |
724 | * interrupt can come in again and masks the line, leaves due | 719 | * interrupt can come in again and masks the line, leaves due |
725 | * to IRQS_INPROGRESS and the irq line is masked forever. | 720 | * to IRQS_INPROGRESS and the irq line is masked forever. |
726 | * | 721 | * |
727 | * This also serializes the state of shared oneshot handlers | 722 | * This also serializes the state of shared oneshot handlers |
728 | * versus "desc->threads_onehsot |= action->thread_mask;" in | 723 | * versus "desc->threads_onehsot |= action->thread_mask;" in |
729 | * irq_wake_thread(). See the comment there which explains the | 724 | * irq_wake_thread(). See the comment there which explains the |
730 | * serialization. | 725 | * serialization. |
731 | */ | 726 | */ |
732 | if (unlikely(irqd_irq_inprogress(&desc->irq_data))) { | 727 | if (unlikely(irqd_irq_inprogress(&desc->irq_data))) { |
733 | raw_spin_unlock_irq(&desc->lock); | 728 | raw_spin_unlock_irq(&desc->lock); |
734 | chip_bus_sync_unlock(desc); | 729 | chip_bus_sync_unlock(desc); |
735 | cpu_relax(); | 730 | cpu_relax(); |
736 | goto again; | 731 | goto again; |
737 | } | 732 | } |
738 | 733 | ||
739 | /* | 734 | /* |
740 | * Now check again, whether the thread should run. Otherwise | 735 | * Now check again, whether the thread should run. Otherwise |
741 | * we would clear the threads_oneshot bit of this thread which | 736 | * we would clear the threads_oneshot bit of this thread which |
742 | * was just set. | 737 | * was just set. |
743 | */ | 738 | */ |
744 | if (test_bit(IRQTF_RUNTHREAD, &action->thread_flags)) | 739 | if (test_bit(IRQTF_RUNTHREAD, &action->thread_flags)) |
745 | goto out_unlock; | 740 | goto out_unlock; |
746 | 741 | ||
747 | desc->threads_oneshot &= ~action->thread_mask; | 742 | desc->threads_oneshot &= ~action->thread_mask; |
748 | 743 | ||
749 | if (!desc->threads_oneshot && !irqd_irq_disabled(&desc->irq_data) && | 744 | if (!desc->threads_oneshot && !irqd_irq_disabled(&desc->irq_data) && |
750 | irqd_irq_masked(&desc->irq_data)) | 745 | irqd_irq_masked(&desc->irq_data)) |
751 | unmask_threaded_irq(desc); | 746 | unmask_threaded_irq(desc); |
752 | 747 | ||
753 | out_unlock: | 748 | out_unlock: |
754 | raw_spin_unlock_irq(&desc->lock); | 749 | raw_spin_unlock_irq(&desc->lock); |
755 | chip_bus_sync_unlock(desc); | 750 | chip_bus_sync_unlock(desc); |
756 | } | 751 | } |
757 | 752 | ||
758 | #ifdef CONFIG_SMP | 753 | #ifdef CONFIG_SMP |
759 | /* | 754 | /* |
760 | * Check whether we need to change the affinity of the interrupt thread. | 755 | * Check whether we need to change the affinity of the interrupt thread. |
761 | */ | 756 | */ |
762 | static void | 757 | static void |
763 | irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) | 758 | irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) |
764 | { | 759 | { |
765 | cpumask_var_t mask; | 760 | cpumask_var_t mask; |
766 | bool valid = true; | 761 | bool valid = true; |
767 | 762 | ||
768 | if (!test_and_clear_bit(IRQTF_AFFINITY, &action->thread_flags)) | 763 | if (!test_and_clear_bit(IRQTF_AFFINITY, &action->thread_flags)) |
769 | return; | 764 | return; |
770 | 765 | ||
771 | /* | 766 | /* |
772 | * In case we are out of memory we set IRQTF_AFFINITY again and | 767 | * In case we are out of memory we set IRQTF_AFFINITY again and |
773 | * try again next time | 768 | * try again next time |
774 | */ | 769 | */ |
775 | if (!alloc_cpumask_var(&mask, GFP_KERNEL)) { | 770 | if (!alloc_cpumask_var(&mask, GFP_KERNEL)) { |
776 | set_bit(IRQTF_AFFINITY, &action->thread_flags); | 771 | set_bit(IRQTF_AFFINITY, &action->thread_flags); |
777 | return; | 772 | return; |
778 | } | 773 | } |
779 | 774 | ||
780 | raw_spin_lock_irq(&desc->lock); | 775 | raw_spin_lock_irq(&desc->lock); |
781 | /* | 776 | /* |
782 | * This code is triggered unconditionally. Check the affinity | 777 | * This code is triggered unconditionally. Check the affinity |
783 | * mask pointer. For CPU_MASK_OFFSTACK=n this is optimized out. | 778 | * mask pointer. For CPU_MASK_OFFSTACK=n this is optimized out. |
784 | */ | 779 | */ |
785 | if (desc->irq_data.affinity) | 780 | if (desc->irq_data.affinity) |
786 | cpumask_copy(mask, desc->irq_data.affinity); | 781 | cpumask_copy(mask, desc->irq_data.affinity); |
787 | else | 782 | else |
788 | valid = false; | 783 | valid = false; |
789 | raw_spin_unlock_irq(&desc->lock); | 784 | raw_spin_unlock_irq(&desc->lock); |
790 | 785 | ||
791 | if (valid) | 786 | if (valid) |
792 | set_cpus_allowed_ptr(current, mask); | 787 | set_cpus_allowed_ptr(current, mask); |
793 | free_cpumask_var(mask); | 788 | free_cpumask_var(mask); |
794 | } | 789 | } |
795 | #else | 790 | #else |
796 | static inline void | 791 | static inline void |
797 | irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) { } | 792 | irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) { } |
798 | #endif | 793 | #endif |
799 | 794 | ||
800 | /* | 795 | /* |
801 | * Interrupts which are not explicitely requested as threaded | 796 | * Interrupts which are not explicitely requested as threaded |
802 | * interrupts rely on the implicit bh/preempt disable of the hard irq | 797 | * interrupts rely on the implicit bh/preempt disable of the hard irq |
803 | * context. So we need to disable bh here to avoid deadlocks and other | 798 | * context. So we need to disable bh here to avoid deadlocks and other |
804 | * side effects. | 799 | * side effects. |
805 | */ | 800 | */ |
806 | static irqreturn_t | 801 | static irqreturn_t |
807 | irq_forced_thread_fn(struct irq_desc *desc, struct irqaction *action) | 802 | irq_forced_thread_fn(struct irq_desc *desc, struct irqaction *action) |
808 | { | 803 | { |
809 | irqreturn_t ret; | 804 | irqreturn_t ret; |
810 | 805 | ||
811 | local_bh_disable(); | 806 | local_bh_disable(); |
812 | ret = action->thread_fn(action->irq, action->dev_id); | 807 | ret = action->thread_fn(action->irq, action->dev_id); |
813 | irq_finalize_oneshot(desc, action); | 808 | irq_finalize_oneshot(desc, action); |
814 | local_bh_enable(); | 809 | local_bh_enable(); |
815 | return ret; | 810 | return ret; |
816 | } | 811 | } |
817 | 812 | ||
818 | /* | 813 | /* |
819 | * Interrupts explicitly requested as threaded interrupts want to be | 814 | * Interrupts explicitly requested as threaded interrupts want to be |
820 | * preemtible - many of them need to sleep and wait for slow busses to | 815 | * preemtible - many of them need to sleep and wait for slow busses to |
821 | * complete. | 816 | * complete. |
822 | */ | 817 | */ |
823 | static irqreturn_t irq_thread_fn(struct irq_desc *desc, | 818 | static irqreturn_t irq_thread_fn(struct irq_desc *desc, |
824 | struct irqaction *action) | 819 | struct irqaction *action) |
825 | { | 820 | { |
826 | irqreturn_t ret; | 821 | irqreturn_t ret; |
827 | 822 | ||
828 | ret = action->thread_fn(action->irq, action->dev_id); | 823 | ret = action->thread_fn(action->irq, action->dev_id); |
829 | irq_finalize_oneshot(desc, action); | 824 | irq_finalize_oneshot(desc, action); |
830 | return ret; | 825 | return ret; |
831 | } | 826 | } |
832 | 827 | ||
833 | static void wake_threads_waitq(struct irq_desc *desc) | 828 | static void wake_threads_waitq(struct irq_desc *desc) |
834 | { | 829 | { |
835 | if (atomic_dec_and_test(&desc->threads_active)) | 830 | if (atomic_dec_and_test(&desc->threads_active)) |
836 | wake_up(&desc->wait_for_threads); | 831 | wake_up(&desc->wait_for_threads); |
837 | } | 832 | } |
838 | 833 | ||
839 | static void irq_thread_dtor(struct callback_head *unused) | 834 | static void irq_thread_dtor(struct callback_head *unused) |
840 | { | 835 | { |
841 | struct task_struct *tsk = current; | 836 | struct task_struct *tsk = current; |
842 | struct irq_desc *desc; | 837 | struct irq_desc *desc; |
843 | struct irqaction *action; | 838 | struct irqaction *action; |
844 | 839 | ||
845 | if (WARN_ON_ONCE(!(current->flags & PF_EXITING))) | 840 | if (WARN_ON_ONCE(!(current->flags & PF_EXITING))) |
846 | return; | 841 | return; |
847 | 842 | ||
848 | action = kthread_data(tsk); | 843 | action = kthread_data(tsk); |
849 | 844 | ||
850 | pr_err("exiting task \"%s\" (%d) is an active IRQ thread (irq %d)\n", | 845 | pr_err("exiting task \"%s\" (%d) is an active IRQ thread (irq %d)\n", |
851 | tsk->comm, tsk->pid, action->irq); | 846 | tsk->comm, tsk->pid, action->irq); |
852 | 847 | ||
853 | 848 | ||
854 | desc = irq_to_desc(action->irq); | 849 | desc = irq_to_desc(action->irq); |
855 | /* | 850 | /* |
856 | * If IRQTF_RUNTHREAD is set, we need to decrement | 851 | * If IRQTF_RUNTHREAD is set, we need to decrement |
857 | * desc->threads_active and wake possible waiters. | 852 | * desc->threads_active and wake possible waiters. |
858 | */ | 853 | */ |
859 | if (test_and_clear_bit(IRQTF_RUNTHREAD, &action->thread_flags)) | 854 | if (test_and_clear_bit(IRQTF_RUNTHREAD, &action->thread_flags)) |
860 | wake_threads_waitq(desc); | 855 | wake_threads_waitq(desc); |
861 | 856 | ||
862 | /* Prevent a stale desc->threads_oneshot */ | 857 | /* Prevent a stale desc->threads_oneshot */ |
863 | irq_finalize_oneshot(desc, action); | 858 | irq_finalize_oneshot(desc, action); |
864 | } | 859 | } |
865 | 860 | ||
866 | /* | 861 | /* |
867 | * Interrupt handler thread | 862 | * Interrupt handler thread |
868 | */ | 863 | */ |
869 | static int irq_thread(void *data) | 864 | static int irq_thread(void *data) |
870 | { | 865 | { |
871 | struct callback_head on_exit_work; | 866 | struct callback_head on_exit_work; |
872 | struct irqaction *action = data; | 867 | struct irqaction *action = data; |
873 | struct irq_desc *desc = irq_to_desc(action->irq); | 868 | struct irq_desc *desc = irq_to_desc(action->irq); |
874 | irqreturn_t (*handler_fn)(struct irq_desc *desc, | 869 | irqreturn_t (*handler_fn)(struct irq_desc *desc, |
875 | struct irqaction *action); | 870 | struct irqaction *action); |
876 | 871 | ||
877 | if (force_irqthreads && test_bit(IRQTF_FORCED_THREAD, | 872 | if (force_irqthreads && test_bit(IRQTF_FORCED_THREAD, |
878 | &action->thread_flags)) | 873 | &action->thread_flags)) |
879 | handler_fn = irq_forced_thread_fn; | 874 | handler_fn = irq_forced_thread_fn; |
880 | else | 875 | else |
881 | handler_fn = irq_thread_fn; | 876 | handler_fn = irq_thread_fn; |
882 | 877 | ||
883 | init_task_work(&on_exit_work, irq_thread_dtor); | 878 | init_task_work(&on_exit_work, irq_thread_dtor); |
884 | task_work_add(current, &on_exit_work, false); | 879 | task_work_add(current, &on_exit_work, false); |
885 | 880 | ||
886 | irq_thread_check_affinity(desc, action); | 881 | irq_thread_check_affinity(desc, action); |
887 | 882 | ||
888 | while (!irq_wait_for_interrupt(action)) { | 883 | while (!irq_wait_for_interrupt(action)) { |
889 | irqreturn_t action_ret; | 884 | irqreturn_t action_ret; |
890 | 885 | ||
891 | irq_thread_check_affinity(desc, action); | 886 | irq_thread_check_affinity(desc, action); |
892 | 887 | ||
893 | action_ret = handler_fn(desc, action); | 888 | action_ret = handler_fn(desc, action); |
894 | if (!noirqdebug) | 889 | if (!noirqdebug) |
895 | note_interrupt(action->irq, desc, action_ret); | 890 | note_interrupt(action->irq, desc, action_ret); |
896 | 891 | ||
897 | wake_threads_waitq(desc); | 892 | wake_threads_waitq(desc); |
898 | } | 893 | } |
899 | 894 | ||
900 | /* | 895 | /* |
901 | * This is the regular exit path. __free_irq() is stopping the | 896 | * This is the regular exit path. __free_irq() is stopping the |
902 | * thread via kthread_stop() after calling | 897 | * thread via kthread_stop() after calling |
903 | * synchronize_irq(). So neither IRQTF_RUNTHREAD nor the | 898 | * synchronize_irq(). So neither IRQTF_RUNTHREAD nor the |
904 | * oneshot mask bit can be set. We cannot verify that as we | 899 | * oneshot mask bit can be set. We cannot verify that as we |
905 | * cannot touch the oneshot mask at this point anymore as | 900 | * cannot touch the oneshot mask at this point anymore as |
906 | * __setup_irq() might have given out currents thread_mask | 901 | * __setup_irq() might have given out currents thread_mask |
907 | * again. | 902 | * again. |
908 | */ | 903 | */ |
909 | task_work_cancel(current, irq_thread_dtor); | 904 | task_work_cancel(current, irq_thread_dtor); |
910 | return 0; | 905 | return 0; |
911 | } | 906 | } |
912 | 907 | ||
913 | /** | 908 | /** |
914 | * irq_wake_thread - wake the irq thread for the action identified by dev_id | 909 | * irq_wake_thread - wake the irq thread for the action identified by dev_id |
915 | * @irq: Interrupt line | 910 | * @irq: Interrupt line |
916 | * @dev_id: Device identity for which the thread should be woken | 911 | * @dev_id: Device identity for which the thread should be woken |
917 | * | 912 | * |
918 | */ | 913 | */ |
919 | void irq_wake_thread(unsigned int irq, void *dev_id) | 914 | void irq_wake_thread(unsigned int irq, void *dev_id) |
920 | { | 915 | { |
921 | struct irq_desc *desc = irq_to_desc(irq); | 916 | struct irq_desc *desc = irq_to_desc(irq); |
922 | struct irqaction *action; | 917 | struct irqaction *action; |
923 | unsigned long flags; | 918 | unsigned long flags; |
924 | 919 | ||
925 | if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc))) | 920 | if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc))) |
926 | return; | 921 | return; |
927 | 922 | ||
928 | raw_spin_lock_irqsave(&desc->lock, flags); | 923 | raw_spin_lock_irqsave(&desc->lock, flags); |
929 | for (action = desc->action; action; action = action->next) { | 924 | for (action = desc->action; action; action = action->next) { |
930 | if (action->dev_id == dev_id) { | 925 | if (action->dev_id == dev_id) { |
931 | if (action->thread) | 926 | if (action->thread) |
932 | __irq_wake_thread(desc, action); | 927 | __irq_wake_thread(desc, action); |
933 | break; | 928 | break; |
934 | } | 929 | } |
935 | } | 930 | } |
936 | raw_spin_unlock_irqrestore(&desc->lock, flags); | 931 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
937 | } | 932 | } |
938 | EXPORT_SYMBOL_GPL(irq_wake_thread); | 933 | EXPORT_SYMBOL_GPL(irq_wake_thread); |
939 | 934 | ||
940 | static void irq_setup_forced_threading(struct irqaction *new) | 935 | static void irq_setup_forced_threading(struct irqaction *new) |
941 | { | 936 | { |
942 | if (!force_irqthreads) | 937 | if (!force_irqthreads) |
943 | return; | 938 | return; |
944 | if (new->flags & (IRQF_NO_THREAD | IRQF_PERCPU | IRQF_ONESHOT)) | 939 | if (new->flags & (IRQF_NO_THREAD | IRQF_PERCPU | IRQF_ONESHOT)) |
945 | return; | 940 | return; |
946 | 941 | ||
947 | new->flags |= IRQF_ONESHOT; | 942 | new->flags |= IRQF_ONESHOT; |
948 | 943 | ||
949 | if (!new->thread_fn) { | 944 | if (!new->thread_fn) { |
950 | set_bit(IRQTF_FORCED_THREAD, &new->thread_flags); | 945 | set_bit(IRQTF_FORCED_THREAD, &new->thread_flags); |
951 | new->thread_fn = new->handler; | 946 | new->thread_fn = new->handler; |
952 | new->handler = irq_default_primary_handler; | 947 | new->handler = irq_default_primary_handler; |
953 | } | 948 | } |
954 | } | 949 | } |
955 | 950 | ||
956 | static int irq_request_resources(struct irq_desc *desc) | 951 | static int irq_request_resources(struct irq_desc *desc) |
957 | { | 952 | { |
958 | struct irq_data *d = &desc->irq_data; | 953 | struct irq_data *d = &desc->irq_data; |
959 | struct irq_chip *c = d->chip; | 954 | struct irq_chip *c = d->chip; |
960 | 955 | ||
961 | return c->irq_request_resources ? c->irq_request_resources(d) : 0; | 956 | return c->irq_request_resources ? c->irq_request_resources(d) : 0; |
962 | } | 957 | } |
963 | 958 | ||
964 | static void irq_release_resources(struct irq_desc *desc) | 959 | static void irq_release_resources(struct irq_desc *desc) |
965 | { | 960 | { |
966 | struct irq_data *d = &desc->irq_data; | 961 | struct irq_data *d = &desc->irq_data; |
967 | struct irq_chip *c = d->chip; | 962 | struct irq_chip *c = d->chip; |
968 | 963 | ||
969 | if (c->irq_release_resources) | 964 | if (c->irq_release_resources) |
970 | c->irq_release_resources(d); | 965 | c->irq_release_resources(d); |
971 | } | 966 | } |
972 | 967 | ||
973 | /* | 968 | /* |
974 | * Internal function to register an irqaction - typically used to | 969 | * Internal function to register an irqaction - typically used to |
975 | * allocate special interrupts that are part of the architecture. | 970 | * allocate special interrupts that are part of the architecture. |
976 | */ | 971 | */ |
977 | static int | 972 | static int |
978 | __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new) | 973 | __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new) |
979 | { | 974 | { |
980 | struct irqaction *old, **old_ptr; | 975 | struct irqaction *old, **old_ptr; |
981 | unsigned long flags, thread_mask = 0; | 976 | unsigned long flags, thread_mask = 0; |
982 | int ret, nested, shared = 0; | 977 | int ret, nested, shared = 0; |
983 | cpumask_var_t mask; | 978 | cpumask_var_t mask; |
984 | 979 | ||
985 | if (!desc) | 980 | if (!desc) |
986 | return -EINVAL; | 981 | return -EINVAL; |
987 | 982 | ||
988 | if (desc->irq_data.chip == &no_irq_chip) | 983 | if (desc->irq_data.chip == &no_irq_chip) |
989 | return -ENOSYS; | 984 | return -ENOSYS; |
990 | if (!try_module_get(desc->owner)) | 985 | if (!try_module_get(desc->owner)) |
991 | return -ENODEV; | 986 | return -ENODEV; |
992 | 987 | ||
993 | /* | 988 | /* |
994 | * Check whether the interrupt nests into another interrupt | 989 | * Check whether the interrupt nests into another interrupt |
995 | * thread. | 990 | * thread. |
996 | */ | 991 | */ |
997 | nested = irq_settings_is_nested_thread(desc); | 992 | nested = irq_settings_is_nested_thread(desc); |
998 | if (nested) { | 993 | if (nested) { |
999 | if (!new->thread_fn) { | 994 | if (!new->thread_fn) { |
1000 | ret = -EINVAL; | 995 | ret = -EINVAL; |
1001 | goto out_mput; | 996 | goto out_mput; |
1002 | } | 997 | } |
1003 | /* | 998 | /* |
1004 | * Replace the primary handler which was provided from | 999 | * Replace the primary handler which was provided from |
1005 | * the driver for non nested interrupt handling by the | 1000 | * the driver for non nested interrupt handling by the |
1006 | * dummy function which warns when called. | 1001 | * dummy function which warns when called. |
1007 | */ | 1002 | */ |
1008 | new->handler = irq_nested_primary_handler; | 1003 | new->handler = irq_nested_primary_handler; |
1009 | } else { | 1004 | } else { |
1010 | if (irq_settings_can_thread(desc)) | 1005 | if (irq_settings_can_thread(desc)) |
1011 | irq_setup_forced_threading(new); | 1006 | irq_setup_forced_threading(new); |
1012 | } | 1007 | } |
1013 | 1008 | ||
1014 | /* | 1009 | /* |
1015 | * Create a handler thread when a thread function is supplied | 1010 | * Create a handler thread when a thread function is supplied |
1016 | * and the interrupt does not nest into another interrupt | 1011 | * and the interrupt does not nest into another interrupt |
1017 | * thread. | 1012 | * thread. |
1018 | */ | 1013 | */ |
1019 | if (new->thread_fn && !nested) { | 1014 | if (new->thread_fn && !nested) { |
1020 | struct task_struct *t; | 1015 | struct task_struct *t; |
1021 | static const struct sched_param param = { | 1016 | static const struct sched_param param = { |
1022 | .sched_priority = MAX_USER_RT_PRIO/2, | 1017 | .sched_priority = MAX_USER_RT_PRIO/2, |
1023 | }; | 1018 | }; |
1024 | 1019 | ||
1025 | t = kthread_create(irq_thread, new, "irq/%d-%s", irq, | 1020 | t = kthread_create(irq_thread, new, "irq/%d-%s", irq, |
1026 | new->name); | 1021 | new->name); |
1027 | if (IS_ERR(t)) { | 1022 | if (IS_ERR(t)) { |
1028 | ret = PTR_ERR(t); | 1023 | ret = PTR_ERR(t); |
1029 | goto out_mput; | 1024 | goto out_mput; |
1030 | } | 1025 | } |
1031 | 1026 | ||
1032 | sched_setscheduler_nocheck(t, SCHED_FIFO, ¶m); | 1027 | sched_setscheduler_nocheck(t, SCHED_FIFO, ¶m); |
1033 | 1028 | ||
1034 | /* | 1029 | /* |
1035 | * We keep the reference to the task struct even if | 1030 | * We keep the reference to the task struct even if |
1036 | * the thread dies to avoid that the interrupt code | 1031 | * the thread dies to avoid that the interrupt code |
1037 | * references an already freed task_struct. | 1032 | * references an already freed task_struct. |
1038 | */ | 1033 | */ |
1039 | get_task_struct(t); | 1034 | get_task_struct(t); |
1040 | new->thread = t; | 1035 | new->thread = t; |
1041 | /* | 1036 | /* |
1042 | * Tell the thread to set its affinity. This is | 1037 | * Tell the thread to set its affinity. This is |
1043 | * important for shared interrupt handlers as we do | 1038 | * important for shared interrupt handlers as we do |
1044 | * not invoke setup_affinity() for the secondary | 1039 | * not invoke setup_affinity() for the secondary |
1045 | * handlers as everything is already set up. Even for | 1040 | * handlers as everything is already set up. Even for |
1046 | * interrupts marked with IRQF_NO_BALANCE this is | 1041 | * interrupts marked with IRQF_NO_BALANCE this is |
1047 | * correct as we want the thread to move to the cpu(s) | 1042 | * correct as we want the thread to move to the cpu(s) |
1048 | * on which the requesting code placed the interrupt. | 1043 | * on which the requesting code placed the interrupt. |
1049 | */ | 1044 | */ |
1050 | set_bit(IRQTF_AFFINITY, &new->thread_flags); | 1045 | set_bit(IRQTF_AFFINITY, &new->thread_flags); |
1051 | } | 1046 | } |
1052 | 1047 | ||
1053 | if (!alloc_cpumask_var(&mask, GFP_KERNEL)) { | 1048 | if (!alloc_cpumask_var(&mask, GFP_KERNEL)) { |
1054 | ret = -ENOMEM; | 1049 | ret = -ENOMEM; |
1055 | goto out_thread; | 1050 | goto out_thread; |
1056 | } | 1051 | } |
1057 | 1052 | ||
1058 | /* | 1053 | /* |
1059 | * Drivers are often written to work w/o knowledge about the | 1054 | * Drivers are often written to work w/o knowledge about the |
1060 | * underlying irq chip implementation, so a request for a | 1055 | * underlying irq chip implementation, so a request for a |
1061 | * threaded irq without a primary hard irq context handler | 1056 | * threaded irq without a primary hard irq context handler |
1062 | * requires the ONESHOT flag to be set. Some irq chips like | 1057 | * requires the ONESHOT flag to be set. Some irq chips like |
1063 | * MSI based interrupts are per se one shot safe. Check the | 1058 | * MSI based interrupts are per se one shot safe. Check the |
1064 | * chip flags, so we can avoid the unmask dance at the end of | 1059 | * chip flags, so we can avoid the unmask dance at the end of |
1065 | * the threaded handler for those. | 1060 | * the threaded handler for those. |
1066 | */ | 1061 | */ |
1067 | if (desc->irq_data.chip->flags & IRQCHIP_ONESHOT_SAFE) | 1062 | if (desc->irq_data.chip->flags & IRQCHIP_ONESHOT_SAFE) |
1068 | new->flags &= ~IRQF_ONESHOT; | 1063 | new->flags &= ~IRQF_ONESHOT; |
1069 | 1064 | ||
1070 | /* | 1065 | /* |
1071 | * The following block of code has to be executed atomically | 1066 | * The following block of code has to be executed atomically |
1072 | */ | 1067 | */ |
1073 | raw_spin_lock_irqsave(&desc->lock, flags); | 1068 | raw_spin_lock_irqsave(&desc->lock, flags); |
1074 | old_ptr = &desc->action; | 1069 | old_ptr = &desc->action; |
1075 | old = *old_ptr; | 1070 | old = *old_ptr; |
1076 | if (old) { | 1071 | if (old) { |
1077 | /* | 1072 | /* |
1078 | * Can't share interrupts unless both agree to and are | 1073 | * Can't share interrupts unless both agree to and are |
1079 | * the same type (level, edge, polarity). So both flag | 1074 | * the same type (level, edge, polarity). So both flag |
1080 | * fields must have IRQF_SHARED set and the bits which | 1075 | * fields must have IRQF_SHARED set and the bits which |
1081 | * set the trigger type must match. Also all must | 1076 | * set the trigger type must match. Also all must |
1082 | * agree on ONESHOT. | 1077 | * agree on ONESHOT. |
1083 | */ | 1078 | */ |
1084 | if (!((old->flags & new->flags) & IRQF_SHARED) || | 1079 | if (!((old->flags & new->flags) & IRQF_SHARED) || |
1085 | ((old->flags ^ new->flags) & IRQF_TRIGGER_MASK) || | 1080 | ((old->flags ^ new->flags) & IRQF_TRIGGER_MASK) || |
1086 | ((old->flags ^ new->flags) & IRQF_ONESHOT)) | 1081 | ((old->flags ^ new->flags) & IRQF_ONESHOT)) |
1087 | goto mismatch; | 1082 | goto mismatch; |
1088 | 1083 | ||
1089 | /* All handlers must agree on per-cpuness */ | 1084 | /* All handlers must agree on per-cpuness */ |
1090 | if ((old->flags & IRQF_PERCPU) != | 1085 | if ((old->flags & IRQF_PERCPU) != |
1091 | (new->flags & IRQF_PERCPU)) | 1086 | (new->flags & IRQF_PERCPU)) |
1092 | goto mismatch; | 1087 | goto mismatch; |
1093 | 1088 | ||
1094 | /* add new interrupt at end of irq queue */ | 1089 | /* add new interrupt at end of irq queue */ |
1095 | do { | 1090 | do { |
1096 | /* | 1091 | /* |
1097 | * Or all existing action->thread_mask bits, | 1092 | * Or all existing action->thread_mask bits, |
1098 | * so we can find the next zero bit for this | 1093 | * so we can find the next zero bit for this |
1099 | * new action. | 1094 | * new action. |
1100 | */ | 1095 | */ |
1101 | thread_mask |= old->thread_mask; | 1096 | thread_mask |= old->thread_mask; |
1102 | old_ptr = &old->next; | 1097 | old_ptr = &old->next; |
1103 | old = *old_ptr; | 1098 | old = *old_ptr; |
1104 | } while (old); | 1099 | } while (old); |
1105 | shared = 1; | 1100 | shared = 1; |
1106 | } | 1101 | } |
1107 | 1102 | ||
1108 | /* | 1103 | /* |
1109 | * Setup the thread mask for this irqaction for ONESHOT. For | 1104 | * Setup the thread mask for this irqaction for ONESHOT. For |
1110 | * !ONESHOT irqs the thread mask is 0 so we can avoid a | 1105 | * !ONESHOT irqs the thread mask is 0 so we can avoid a |
1111 | * conditional in irq_wake_thread(). | 1106 | * conditional in irq_wake_thread(). |
1112 | */ | 1107 | */ |
1113 | if (new->flags & IRQF_ONESHOT) { | 1108 | if (new->flags & IRQF_ONESHOT) { |
1114 | /* | 1109 | /* |
1115 | * Unlikely to have 32 resp 64 irqs sharing one line, | 1110 | * Unlikely to have 32 resp 64 irqs sharing one line, |
1116 | * but who knows. | 1111 | * but who knows. |
1117 | */ | 1112 | */ |
1118 | if (thread_mask == ~0UL) { | 1113 | if (thread_mask == ~0UL) { |
1119 | ret = -EBUSY; | 1114 | ret = -EBUSY; |
1120 | goto out_mask; | 1115 | goto out_mask; |
1121 | } | 1116 | } |
1122 | /* | 1117 | /* |
1123 | * The thread_mask for the action is or'ed to | 1118 | * The thread_mask for the action is or'ed to |
1124 | * desc->thread_active to indicate that the | 1119 | * desc->thread_active to indicate that the |
1125 | * IRQF_ONESHOT thread handler has been woken, but not | 1120 | * IRQF_ONESHOT thread handler has been woken, but not |
1126 | * yet finished. The bit is cleared when a thread | 1121 | * yet finished. The bit is cleared when a thread |
1127 | * completes. When all threads of a shared interrupt | 1122 | * completes. When all threads of a shared interrupt |
1128 | * line have completed desc->threads_active becomes | 1123 | * line have completed desc->threads_active becomes |
1129 | * zero and the interrupt line is unmasked. See | 1124 | * zero and the interrupt line is unmasked. See |
1130 | * handle.c:irq_wake_thread() for further information. | 1125 | * handle.c:irq_wake_thread() for further information. |
1131 | * | 1126 | * |
1132 | * If no thread is woken by primary (hard irq context) | 1127 | * If no thread is woken by primary (hard irq context) |
1133 | * interrupt handlers, then desc->threads_active is | 1128 | * interrupt handlers, then desc->threads_active is |
1134 | * also checked for zero to unmask the irq line in the | 1129 | * also checked for zero to unmask the irq line in the |
1135 | * affected hard irq flow handlers | 1130 | * affected hard irq flow handlers |
1136 | * (handle_[fasteoi|level]_irq). | 1131 | * (handle_[fasteoi|level]_irq). |
1137 | * | 1132 | * |
1138 | * The new action gets the first zero bit of | 1133 | * The new action gets the first zero bit of |
1139 | * thread_mask assigned. See the loop above which or's | 1134 | * thread_mask assigned. See the loop above which or's |
1140 | * all existing action->thread_mask bits. | 1135 | * all existing action->thread_mask bits. |
1141 | */ | 1136 | */ |
1142 | new->thread_mask = 1 << ffz(thread_mask); | 1137 | new->thread_mask = 1 << ffz(thread_mask); |
1143 | 1138 | ||
1144 | } else if (new->handler == irq_default_primary_handler && | 1139 | } else if (new->handler == irq_default_primary_handler && |
1145 | !(desc->irq_data.chip->flags & IRQCHIP_ONESHOT_SAFE)) { | 1140 | !(desc->irq_data.chip->flags & IRQCHIP_ONESHOT_SAFE)) { |
1146 | /* | 1141 | /* |
1147 | * The interrupt was requested with handler = NULL, so | 1142 | * The interrupt was requested with handler = NULL, so |
1148 | * we use the default primary handler for it. But it | 1143 | * we use the default primary handler for it. But it |
1149 | * does not have the oneshot flag set. In combination | 1144 | * does not have the oneshot flag set. In combination |
1150 | * with level interrupts this is deadly, because the | 1145 | * with level interrupts this is deadly, because the |
1151 | * default primary handler just wakes the thread, then | 1146 | * default primary handler just wakes the thread, then |
1152 | * the irq lines is reenabled, but the device still | 1147 | * the irq lines is reenabled, but the device still |
1153 | * has the level irq asserted. Rinse and repeat.... | 1148 | * has the level irq asserted. Rinse and repeat.... |
1154 | * | 1149 | * |
1155 | * While this works for edge type interrupts, we play | 1150 | * While this works for edge type interrupts, we play |
1156 | * it safe and reject unconditionally because we can't | 1151 | * it safe and reject unconditionally because we can't |
1157 | * say for sure which type this interrupt really | 1152 | * say for sure which type this interrupt really |
1158 | * has. The type flags are unreliable as the | 1153 | * has. The type flags are unreliable as the |
1159 | * underlying chip implementation can override them. | 1154 | * underlying chip implementation can override them. |
1160 | */ | 1155 | */ |
1161 | pr_err("Threaded irq requested with handler=NULL and !ONESHOT for irq %d\n", | 1156 | pr_err("Threaded irq requested with handler=NULL and !ONESHOT for irq %d\n", |
1162 | irq); | 1157 | irq); |
1163 | ret = -EINVAL; | 1158 | ret = -EINVAL; |
1164 | goto out_mask; | 1159 | goto out_mask; |
1165 | } | 1160 | } |
1166 | 1161 | ||
1167 | if (!shared) { | 1162 | if (!shared) { |
1168 | ret = irq_request_resources(desc); | 1163 | ret = irq_request_resources(desc); |
1169 | if (ret) { | 1164 | if (ret) { |
1170 | pr_err("Failed to request resources for %s (irq %d) on irqchip %s\n", | 1165 | pr_err("Failed to request resources for %s (irq %d) on irqchip %s\n", |
1171 | new->name, irq, desc->irq_data.chip->name); | 1166 | new->name, irq, desc->irq_data.chip->name); |
1172 | goto out_mask; | 1167 | goto out_mask; |
1173 | } | 1168 | } |
1174 | 1169 | ||
1175 | init_waitqueue_head(&desc->wait_for_threads); | 1170 | init_waitqueue_head(&desc->wait_for_threads); |
1176 | 1171 | ||
1177 | /* Setup the type (level, edge polarity) if configured: */ | 1172 | /* Setup the type (level, edge polarity) if configured: */ |
1178 | if (new->flags & IRQF_TRIGGER_MASK) { | 1173 | if (new->flags & IRQF_TRIGGER_MASK) { |
1179 | ret = __irq_set_trigger(desc, irq, | 1174 | ret = __irq_set_trigger(desc, irq, |
1180 | new->flags & IRQF_TRIGGER_MASK); | 1175 | new->flags & IRQF_TRIGGER_MASK); |
1181 | 1176 | ||
1182 | if (ret) | 1177 | if (ret) |
1183 | goto out_mask; | 1178 | goto out_mask; |
1184 | } | 1179 | } |
1185 | 1180 | ||
1186 | desc->istate &= ~(IRQS_AUTODETECT | IRQS_SPURIOUS_DISABLED | \ | 1181 | desc->istate &= ~(IRQS_AUTODETECT | IRQS_SPURIOUS_DISABLED | \ |
1187 | IRQS_ONESHOT | IRQS_WAITING); | 1182 | IRQS_ONESHOT | IRQS_WAITING); |
1188 | irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS); | 1183 | irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS); |
1189 | 1184 | ||
1190 | if (new->flags & IRQF_PERCPU) { | 1185 | if (new->flags & IRQF_PERCPU) { |
1191 | irqd_set(&desc->irq_data, IRQD_PER_CPU); | 1186 | irqd_set(&desc->irq_data, IRQD_PER_CPU); |
1192 | irq_settings_set_per_cpu(desc); | 1187 | irq_settings_set_per_cpu(desc); |
1193 | } | 1188 | } |
1194 | 1189 | ||
1195 | if (new->flags & IRQF_ONESHOT) | 1190 | if (new->flags & IRQF_ONESHOT) |
1196 | desc->istate |= IRQS_ONESHOT; | 1191 | desc->istate |= IRQS_ONESHOT; |
1197 | 1192 | ||
1198 | if (irq_settings_can_autoenable(desc)) | 1193 | if (irq_settings_can_autoenable(desc)) |
1199 | irq_startup(desc, true); | 1194 | irq_startup(desc, true); |
1200 | else | 1195 | else |
1201 | /* Undo nested disables: */ | 1196 | /* Undo nested disables: */ |
1202 | desc->depth = 1; | 1197 | desc->depth = 1; |
1203 | 1198 | ||
1204 | /* Exclude IRQ from balancing if requested */ | 1199 | /* Exclude IRQ from balancing if requested */ |
1205 | if (new->flags & IRQF_NOBALANCING) { | 1200 | if (new->flags & IRQF_NOBALANCING) { |
1206 | irq_settings_set_no_balancing(desc); | 1201 | irq_settings_set_no_balancing(desc); |
1207 | irqd_set(&desc->irq_data, IRQD_NO_BALANCING); | 1202 | irqd_set(&desc->irq_data, IRQD_NO_BALANCING); |
1208 | } | 1203 | } |
1209 | 1204 | ||
1210 | /* Set default affinity mask once everything is setup */ | 1205 | /* Set default affinity mask once everything is setup */ |
1211 | setup_affinity(irq, desc, mask); | 1206 | setup_affinity(irq, desc, mask); |
1212 | 1207 | ||
1213 | } else if (new->flags & IRQF_TRIGGER_MASK) { | 1208 | } else if (new->flags & IRQF_TRIGGER_MASK) { |
1214 | unsigned int nmsk = new->flags & IRQF_TRIGGER_MASK; | 1209 | unsigned int nmsk = new->flags & IRQF_TRIGGER_MASK; |
1215 | unsigned int omsk = irq_settings_get_trigger_mask(desc); | 1210 | unsigned int omsk = irq_settings_get_trigger_mask(desc); |
1216 | 1211 | ||
1217 | if (nmsk != omsk) | 1212 | if (nmsk != omsk) |
1218 | /* hope the handler works with current trigger mode */ | 1213 | /* hope the handler works with current trigger mode */ |
1219 | pr_warning("irq %d uses trigger mode %u; requested %u\n", | 1214 | pr_warning("irq %d uses trigger mode %u; requested %u\n", |
1220 | irq, nmsk, omsk); | 1215 | irq, nmsk, omsk); |
1221 | } | 1216 | } |
1222 | 1217 | ||
1223 | new->irq = irq; | 1218 | new->irq = irq; |
1224 | *old_ptr = new; | 1219 | *old_ptr = new; |
1225 | 1220 | ||
1226 | /* Reset broken irq detection when installing new handler */ | 1221 | /* Reset broken irq detection when installing new handler */ |
1227 | desc->irq_count = 0; | 1222 | desc->irq_count = 0; |
1228 | desc->irqs_unhandled = 0; | 1223 | desc->irqs_unhandled = 0; |
1229 | 1224 | ||
1230 | /* | 1225 | /* |
1231 | * Check whether we disabled the irq via the spurious handler | 1226 | * Check whether we disabled the irq via the spurious handler |
1232 | * before. Reenable it and give it another chance. | 1227 | * before. Reenable it and give it another chance. |
1233 | */ | 1228 | */ |
1234 | if (shared && (desc->istate & IRQS_SPURIOUS_DISABLED)) { | 1229 | if (shared && (desc->istate & IRQS_SPURIOUS_DISABLED)) { |
1235 | desc->istate &= ~IRQS_SPURIOUS_DISABLED; | 1230 | desc->istate &= ~IRQS_SPURIOUS_DISABLED; |
1236 | __enable_irq(desc, irq, false); | 1231 | __enable_irq(desc, irq, false); |
1237 | } | 1232 | } |
1238 | 1233 | ||
1239 | raw_spin_unlock_irqrestore(&desc->lock, flags); | 1234 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
1240 | 1235 | ||
1241 | /* | 1236 | /* |
1242 | * Strictly no need to wake it up, but hung_task complains | 1237 | * Strictly no need to wake it up, but hung_task complains |
1243 | * when no hard interrupt wakes the thread up. | 1238 | * when no hard interrupt wakes the thread up. |
1244 | */ | 1239 | */ |
1245 | if (new->thread) | 1240 | if (new->thread) |
1246 | wake_up_process(new->thread); | 1241 | wake_up_process(new->thread); |
1247 | 1242 | ||
1248 | register_irq_proc(irq, desc); | 1243 | register_irq_proc(irq, desc); |
1249 | new->dir = NULL; | 1244 | new->dir = NULL; |
1250 | register_handler_proc(irq, new); | 1245 | register_handler_proc(irq, new); |
1251 | free_cpumask_var(mask); | 1246 | free_cpumask_var(mask); |
1252 | 1247 | ||
1253 | return 0; | 1248 | return 0; |
1254 | 1249 | ||
1255 | mismatch: | 1250 | mismatch: |
1256 | if (!(new->flags & IRQF_PROBE_SHARED)) { | 1251 | if (!(new->flags & IRQF_PROBE_SHARED)) { |
1257 | pr_err("Flags mismatch irq %d. %08x (%s) vs. %08x (%s)\n", | 1252 | pr_err("Flags mismatch irq %d. %08x (%s) vs. %08x (%s)\n", |
1258 | irq, new->flags, new->name, old->flags, old->name); | 1253 | irq, new->flags, new->name, old->flags, old->name); |
1259 | #ifdef CONFIG_DEBUG_SHIRQ | 1254 | #ifdef CONFIG_DEBUG_SHIRQ |
1260 | dump_stack(); | 1255 | dump_stack(); |
1261 | #endif | 1256 | #endif |
1262 | } | 1257 | } |
1263 | ret = -EBUSY; | 1258 | ret = -EBUSY; |
1264 | 1259 | ||
1265 | out_mask: | 1260 | out_mask: |
1266 | raw_spin_unlock_irqrestore(&desc->lock, flags); | 1261 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
1267 | free_cpumask_var(mask); | 1262 | free_cpumask_var(mask); |
1268 | 1263 | ||
1269 | out_thread: | 1264 | out_thread: |
1270 | if (new->thread) { | 1265 | if (new->thread) { |
1271 | struct task_struct *t = new->thread; | 1266 | struct task_struct *t = new->thread; |
1272 | 1267 | ||
1273 | new->thread = NULL; | 1268 | new->thread = NULL; |
1274 | kthread_stop(t); | 1269 | kthread_stop(t); |
1275 | put_task_struct(t); | 1270 | put_task_struct(t); |
1276 | } | 1271 | } |
1277 | out_mput: | 1272 | out_mput: |
1278 | module_put(desc->owner); | 1273 | module_put(desc->owner); |
1279 | return ret; | 1274 | return ret; |
1280 | } | 1275 | } |
1281 | 1276 | ||
1282 | /** | 1277 | /** |
1283 | * setup_irq - setup an interrupt | 1278 | * setup_irq - setup an interrupt |
1284 | * @irq: Interrupt line to setup | 1279 | * @irq: Interrupt line to setup |
1285 | * @act: irqaction for the interrupt | 1280 | * @act: irqaction for the interrupt |
1286 | * | 1281 | * |
1287 | * Used to statically setup interrupts in the early boot process. | 1282 | * Used to statically setup interrupts in the early boot process. |
1288 | */ | 1283 | */ |
1289 | int setup_irq(unsigned int irq, struct irqaction *act) | 1284 | int setup_irq(unsigned int irq, struct irqaction *act) |
1290 | { | 1285 | { |
1291 | int retval; | 1286 | int retval; |
1292 | struct irq_desc *desc = irq_to_desc(irq); | 1287 | struct irq_desc *desc = irq_to_desc(irq); |
1293 | 1288 | ||
1294 | if (WARN_ON(irq_settings_is_per_cpu_devid(desc))) | 1289 | if (WARN_ON(irq_settings_is_per_cpu_devid(desc))) |
1295 | return -EINVAL; | 1290 | return -EINVAL; |
1296 | chip_bus_lock(desc); | 1291 | chip_bus_lock(desc); |
1297 | retval = __setup_irq(irq, desc, act); | 1292 | retval = __setup_irq(irq, desc, act); |
1298 | chip_bus_sync_unlock(desc); | 1293 | chip_bus_sync_unlock(desc); |
1299 | 1294 | ||
1300 | return retval; | 1295 | return retval; |
1301 | } | 1296 | } |
1302 | EXPORT_SYMBOL_GPL(setup_irq); | 1297 | EXPORT_SYMBOL_GPL(setup_irq); |
1303 | 1298 | ||
1304 | /* | 1299 | /* |
1305 | * Internal function to unregister an irqaction - used to free | 1300 | * Internal function to unregister an irqaction - used to free |
1306 | * regular and special interrupts that are part of the architecture. | 1301 | * regular and special interrupts that are part of the architecture. |
1307 | */ | 1302 | */ |
1308 | static struct irqaction *__free_irq(unsigned int irq, void *dev_id) | 1303 | static struct irqaction *__free_irq(unsigned int irq, void *dev_id) |
1309 | { | 1304 | { |
1310 | struct irq_desc *desc = irq_to_desc(irq); | 1305 | struct irq_desc *desc = irq_to_desc(irq); |
1311 | struct irqaction *action, **action_ptr; | 1306 | struct irqaction *action, **action_ptr; |
1312 | unsigned long flags; | 1307 | unsigned long flags; |
1313 | 1308 | ||
1314 | WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq); | 1309 | WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq); |
1315 | 1310 | ||
1316 | if (!desc) | 1311 | if (!desc) |
1317 | return NULL; | 1312 | return NULL; |
1318 | 1313 | ||
1319 | raw_spin_lock_irqsave(&desc->lock, flags); | 1314 | raw_spin_lock_irqsave(&desc->lock, flags); |
1320 | 1315 | ||
1321 | /* | 1316 | /* |
1322 | * There can be multiple actions per IRQ descriptor, find the right | 1317 | * There can be multiple actions per IRQ descriptor, find the right |
1323 | * one based on the dev_id: | 1318 | * one based on the dev_id: |
1324 | */ | 1319 | */ |
1325 | action_ptr = &desc->action; | 1320 | action_ptr = &desc->action; |
1326 | for (;;) { | 1321 | for (;;) { |
1327 | action = *action_ptr; | 1322 | action = *action_ptr; |
1328 | 1323 | ||
1329 | if (!action) { | 1324 | if (!action) { |
1330 | WARN(1, "Trying to free already-free IRQ %d\n", irq); | 1325 | WARN(1, "Trying to free already-free IRQ %d\n", irq); |
1331 | raw_spin_unlock_irqrestore(&desc->lock, flags); | 1326 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
1332 | 1327 | ||
1333 | return NULL; | 1328 | return NULL; |
1334 | } | 1329 | } |
1335 | 1330 | ||
1336 | if (action->dev_id == dev_id) | 1331 | if (action->dev_id == dev_id) |
1337 | break; | 1332 | break; |
1338 | action_ptr = &action->next; | 1333 | action_ptr = &action->next; |
1339 | } | 1334 | } |
1340 | 1335 | ||
1341 | /* Found it - now remove it from the list of entries: */ | 1336 | /* Found it - now remove it from the list of entries: */ |
1342 | *action_ptr = action->next; | 1337 | *action_ptr = action->next; |
1343 | 1338 | ||
1344 | /* If this was the last handler, shut down the IRQ line: */ | 1339 | /* If this was the last handler, shut down the IRQ line: */ |
1345 | if (!desc->action) { | 1340 | if (!desc->action) { |
1346 | irq_shutdown(desc); | 1341 | irq_shutdown(desc); |
1347 | irq_release_resources(desc); | 1342 | irq_release_resources(desc); |
1348 | } | 1343 | } |
1349 | 1344 | ||
1350 | #ifdef CONFIG_SMP | 1345 | #ifdef CONFIG_SMP |
1351 | /* make sure affinity_hint is cleaned up */ | 1346 | /* make sure affinity_hint is cleaned up */ |
1352 | if (WARN_ON_ONCE(desc->affinity_hint)) | 1347 | if (WARN_ON_ONCE(desc->affinity_hint)) |
1353 | desc->affinity_hint = NULL; | 1348 | desc->affinity_hint = NULL; |
1354 | #endif | 1349 | #endif |
1355 | 1350 | ||
1356 | raw_spin_unlock_irqrestore(&desc->lock, flags); | 1351 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
1357 | 1352 | ||
1358 | unregister_handler_proc(irq, action); | 1353 | unregister_handler_proc(irq, action); |
1359 | 1354 | ||
1360 | /* Make sure it's not being used on another CPU: */ | 1355 | /* Make sure it's not being used on another CPU: */ |
1361 | synchronize_irq(irq); | 1356 | synchronize_irq(irq); |
1362 | 1357 | ||
1363 | #ifdef CONFIG_DEBUG_SHIRQ | 1358 | #ifdef CONFIG_DEBUG_SHIRQ |
1364 | /* | 1359 | /* |
1365 | * It's a shared IRQ -- the driver ought to be prepared for an IRQ | 1360 | * It's a shared IRQ -- the driver ought to be prepared for an IRQ |
1366 | * event to happen even now it's being freed, so let's make sure that | 1361 | * event to happen even now it's being freed, so let's make sure that |
1367 | * is so by doing an extra call to the handler .... | 1362 | * is so by doing an extra call to the handler .... |
1368 | * | 1363 | * |
1369 | * ( We do this after actually deregistering it, to make sure that a | 1364 | * ( We do this after actually deregistering it, to make sure that a |
1370 | * 'real' IRQ doesn't run in * parallel with our fake. ) | 1365 | * 'real' IRQ doesn't run in * parallel with our fake. ) |
1371 | */ | 1366 | */ |
1372 | if (action->flags & IRQF_SHARED) { | 1367 | if (action->flags & IRQF_SHARED) { |
1373 | local_irq_save(flags); | 1368 | local_irq_save(flags); |
1374 | action->handler(irq, dev_id); | 1369 | action->handler(irq, dev_id); |
1375 | local_irq_restore(flags); | 1370 | local_irq_restore(flags); |
1376 | } | 1371 | } |
1377 | #endif | 1372 | #endif |
1378 | 1373 | ||
1379 | if (action->thread) { | 1374 | if (action->thread) { |
1380 | kthread_stop(action->thread); | 1375 | kthread_stop(action->thread); |
1381 | put_task_struct(action->thread); | 1376 | put_task_struct(action->thread); |
1382 | } | 1377 | } |
1383 | 1378 | ||
1384 | module_put(desc->owner); | 1379 | module_put(desc->owner); |
1385 | return action; | 1380 | return action; |
1386 | } | 1381 | } |
1387 | 1382 | ||
1388 | /** | 1383 | /** |
1389 | * remove_irq - free an interrupt | 1384 | * remove_irq - free an interrupt |
1390 | * @irq: Interrupt line to free | 1385 | * @irq: Interrupt line to free |
1391 | * @act: irqaction for the interrupt | 1386 | * @act: irqaction for the interrupt |
1392 | * | 1387 | * |
1393 | * Used to remove interrupts statically setup by the early boot process. | 1388 | * Used to remove interrupts statically setup by the early boot process. |
1394 | */ | 1389 | */ |
1395 | void remove_irq(unsigned int irq, struct irqaction *act) | 1390 | void remove_irq(unsigned int irq, struct irqaction *act) |
1396 | { | 1391 | { |
1397 | struct irq_desc *desc = irq_to_desc(irq); | 1392 | struct irq_desc *desc = irq_to_desc(irq); |
1398 | 1393 | ||
1399 | if (desc && !WARN_ON(irq_settings_is_per_cpu_devid(desc))) | 1394 | if (desc && !WARN_ON(irq_settings_is_per_cpu_devid(desc))) |
1400 | __free_irq(irq, act->dev_id); | 1395 | __free_irq(irq, act->dev_id); |
1401 | } | 1396 | } |
1402 | EXPORT_SYMBOL_GPL(remove_irq); | 1397 | EXPORT_SYMBOL_GPL(remove_irq); |
1403 | 1398 | ||
1404 | /** | 1399 | /** |
1405 | * free_irq - free an interrupt allocated with request_irq | 1400 | * free_irq - free an interrupt allocated with request_irq |
1406 | * @irq: Interrupt line to free | 1401 | * @irq: Interrupt line to free |
1407 | * @dev_id: Device identity to free | 1402 | * @dev_id: Device identity to free |
1408 | * | 1403 | * |
1409 | * Remove an interrupt handler. The handler is removed and if the | 1404 | * Remove an interrupt handler. The handler is removed and if the |
1410 | * interrupt line is no longer in use by any driver it is disabled. | 1405 | * interrupt line is no longer in use by any driver it is disabled. |
1411 | * On a shared IRQ the caller must ensure the interrupt is disabled | 1406 | * On a shared IRQ the caller must ensure the interrupt is disabled |
1412 | * on the card it drives before calling this function. The function | 1407 | * on the card it drives before calling this function. The function |
1413 | * does not return until any executing interrupts for this IRQ | 1408 | * does not return until any executing interrupts for this IRQ |
1414 | * have completed. | 1409 | * have completed. |
1415 | * | 1410 | * |
1416 | * This function must not be called from interrupt context. | 1411 | * This function must not be called from interrupt context. |
1417 | */ | 1412 | */ |
1418 | void free_irq(unsigned int irq, void *dev_id) | 1413 | void free_irq(unsigned int irq, void *dev_id) |
1419 | { | 1414 | { |
1420 | struct irq_desc *desc = irq_to_desc(irq); | 1415 | struct irq_desc *desc = irq_to_desc(irq); |
1421 | 1416 | ||
1422 | if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc))) | 1417 | if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc))) |
1423 | return; | 1418 | return; |
1424 | 1419 | ||
1425 | #ifdef CONFIG_SMP | 1420 | #ifdef CONFIG_SMP |
1426 | if (WARN_ON(desc->affinity_notify)) | 1421 | if (WARN_ON(desc->affinity_notify)) |
1427 | desc->affinity_notify = NULL; | 1422 | desc->affinity_notify = NULL; |
1428 | #endif | 1423 | #endif |
1429 | 1424 | ||
1430 | chip_bus_lock(desc); | 1425 | chip_bus_lock(desc); |
1431 | kfree(__free_irq(irq, dev_id)); | 1426 | kfree(__free_irq(irq, dev_id)); |
1432 | chip_bus_sync_unlock(desc); | 1427 | chip_bus_sync_unlock(desc); |
1433 | } | 1428 | } |
1434 | EXPORT_SYMBOL(free_irq); | 1429 | EXPORT_SYMBOL(free_irq); |
1435 | 1430 | ||
1436 | /** | 1431 | /** |
1437 | * request_threaded_irq - allocate an interrupt line | 1432 | * request_threaded_irq - allocate an interrupt line |
1438 | * @irq: Interrupt line to allocate | 1433 | * @irq: Interrupt line to allocate |
1439 | * @handler: Function to be called when the IRQ occurs. | 1434 | * @handler: Function to be called when the IRQ occurs. |
1440 | * Primary handler for threaded interrupts | 1435 | * Primary handler for threaded interrupts |
1441 | * If NULL and thread_fn != NULL the default | 1436 | * If NULL and thread_fn != NULL the default |
1442 | * primary handler is installed | 1437 | * primary handler is installed |
1443 | * @thread_fn: Function called from the irq handler thread | 1438 | * @thread_fn: Function called from the irq handler thread |
1444 | * If NULL, no irq thread is created | 1439 | * If NULL, no irq thread is created |
1445 | * @irqflags: Interrupt type flags | 1440 | * @irqflags: Interrupt type flags |
1446 | * @devname: An ascii name for the claiming device | 1441 | * @devname: An ascii name for the claiming device |
1447 | * @dev_id: A cookie passed back to the handler function | 1442 | * @dev_id: A cookie passed back to the handler function |
1448 | * | 1443 | * |
1449 | * This call allocates interrupt resources and enables the | 1444 | * This call allocates interrupt resources and enables the |
1450 | * interrupt line and IRQ handling. From the point this | 1445 | * interrupt line and IRQ handling. From the point this |
1451 | * call is made your handler function may be invoked. Since | 1446 | * call is made your handler function may be invoked. Since |
1452 | * your handler function must clear any interrupt the board | 1447 | * your handler function must clear any interrupt the board |
1453 | * raises, you must take care both to initialise your hardware | 1448 | * raises, you must take care both to initialise your hardware |
1454 | * and to set up the interrupt handler in the right order. | 1449 | * and to set up the interrupt handler in the right order. |
1455 | * | 1450 | * |
1456 | * If you want to set up a threaded irq handler for your device | 1451 | * If you want to set up a threaded irq handler for your device |
1457 | * then you need to supply @handler and @thread_fn. @handler is | 1452 | * then you need to supply @handler and @thread_fn. @handler is |
1458 | * still called in hard interrupt context and has to check | 1453 | * still called in hard interrupt context and has to check |
1459 | * whether the interrupt originates from the device. If yes it | 1454 | * whether the interrupt originates from the device. If yes it |
1460 | * needs to disable the interrupt on the device and return | 1455 | * needs to disable the interrupt on the device and return |
1461 | * IRQ_WAKE_THREAD which will wake up the handler thread and run | 1456 | * IRQ_WAKE_THREAD which will wake up the handler thread and run |
1462 | * @thread_fn. This split handler design is necessary to support | 1457 | * @thread_fn. This split handler design is necessary to support |
1463 | * shared interrupts. | 1458 | * shared interrupts. |
1464 | * | 1459 | * |
1465 | * Dev_id must be globally unique. Normally the address of the | 1460 | * Dev_id must be globally unique. Normally the address of the |
1466 | * device data structure is used as the cookie. Since the handler | 1461 | * device data structure is used as the cookie. Since the handler |
1467 | * receives this value it makes sense to use it. | 1462 | * receives this value it makes sense to use it. |
1468 | * | 1463 | * |
1469 | * If your interrupt is shared you must pass a non NULL dev_id | 1464 | * If your interrupt is shared you must pass a non NULL dev_id |
1470 | * as this is required when freeing the interrupt. | 1465 | * as this is required when freeing the interrupt. |
1471 | * | 1466 | * |
1472 | * Flags: | 1467 | * Flags: |
1473 | * | 1468 | * |
1474 | * IRQF_SHARED Interrupt is shared | 1469 | * IRQF_SHARED Interrupt is shared |
1475 | * IRQF_TRIGGER_* Specify active edge(s) or level | 1470 | * IRQF_TRIGGER_* Specify active edge(s) or level |
1476 | * | 1471 | * |
1477 | */ | 1472 | */ |
1478 | int request_threaded_irq(unsigned int irq, irq_handler_t handler, | 1473 | int request_threaded_irq(unsigned int irq, irq_handler_t handler, |
1479 | irq_handler_t thread_fn, unsigned long irqflags, | 1474 | irq_handler_t thread_fn, unsigned long irqflags, |
1480 | const char *devname, void *dev_id) | 1475 | const char *devname, void *dev_id) |
1481 | { | 1476 | { |
1482 | struct irqaction *action; | 1477 | struct irqaction *action; |
1483 | struct irq_desc *desc; | 1478 | struct irq_desc *desc; |
1484 | int retval; | 1479 | int retval; |
1485 | 1480 | ||
1486 | /* | 1481 | /* |
1487 | * Sanity-check: shared interrupts must pass in a real dev-ID, | 1482 | * Sanity-check: shared interrupts must pass in a real dev-ID, |
1488 | * otherwise we'll have trouble later trying to figure out | 1483 | * otherwise we'll have trouble later trying to figure out |
1489 | * which interrupt is which (messes up the interrupt freeing | 1484 | * which interrupt is which (messes up the interrupt freeing |
1490 | * logic etc). | 1485 | * logic etc). |
1491 | */ | 1486 | */ |
1492 | if ((irqflags & IRQF_SHARED) && !dev_id) | 1487 | if ((irqflags & IRQF_SHARED) && !dev_id) |
1493 | return -EINVAL; | 1488 | return -EINVAL; |
1494 | 1489 | ||
1495 | desc = irq_to_desc(irq); | 1490 | desc = irq_to_desc(irq); |
1496 | if (!desc) | 1491 | if (!desc) |
1497 | return -EINVAL; | 1492 | return -EINVAL; |
1498 | 1493 | ||
1499 | if (!irq_settings_can_request(desc) || | 1494 | if (!irq_settings_can_request(desc) || |
1500 | WARN_ON(irq_settings_is_per_cpu_devid(desc))) | 1495 | WARN_ON(irq_settings_is_per_cpu_devid(desc))) |
1501 | return -EINVAL; | 1496 | return -EINVAL; |
1502 | 1497 | ||
1503 | if (!handler) { | 1498 | if (!handler) { |
1504 | if (!thread_fn) | 1499 | if (!thread_fn) |
1505 | return -EINVAL; | 1500 | return -EINVAL; |
1506 | handler = irq_default_primary_handler; | 1501 | handler = irq_default_primary_handler; |
1507 | } | 1502 | } |
1508 | 1503 | ||
1509 | action = kzalloc(sizeof(struct irqaction), GFP_KERNEL); | 1504 | action = kzalloc(sizeof(struct irqaction), GFP_KERNEL); |
1510 | if (!action) | 1505 | if (!action) |
1511 | return -ENOMEM; | 1506 | return -ENOMEM; |
1512 | 1507 | ||
1513 | action->handler = handler; | 1508 | action->handler = handler; |
1514 | action->thread_fn = thread_fn; | 1509 | action->thread_fn = thread_fn; |
1515 | action->flags = irqflags; | 1510 | action->flags = irqflags; |
1516 | action->name = devname; | 1511 | action->name = devname; |
1517 | action->dev_id = dev_id; | 1512 | action->dev_id = dev_id; |
1518 | 1513 | ||
1519 | chip_bus_lock(desc); | 1514 | chip_bus_lock(desc); |
1520 | retval = __setup_irq(irq, desc, action); | 1515 | retval = __setup_irq(irq, desc, action); |
1521 | chip_bus_sync_unlock(desc); | 1516 | chip_bus_sync_unlock(desc); |
1522 | 1517 | ||
1523 | if (retval) | 1518 | if (retval) |
1524 | kfree(action); | 1519 | kfree(action); |
1525 | 1520 | ||
1526 | #ifdef CONFIG_DEBUG_SHIRQ_FIXME | 1521 | #ifdef CONFIG_DEBUG_SHIRQ_FIXME |
1527 | if (!retval && (irqflags & IRQF_SHARED)) { | 1522 | if (!retval && (irqflags & IRQF_SHARED)) { |
1528 | /* | 1523 | /* |
1529 | * It's a shared IRQ -- the driver ought to be prepared for it | 1524 | * It's a shared IRQ -- the driver ought to be prepared for it |
1530 | * to happen immediately, so let's make sure.... | 1525 | * to happen immediately, so let's make sure.... |
1531 | * We disable the irq to make sure that a 'real' IRQ doesn't | 1526 | * We disable the irq to make sure that a 'real' IRQ doesn't |
1532 | * run in parallel with our fake. | 1527 | * run in parallel with our fake. |
1533 | */ | 1528 | */ |
1534 | unsigned long flags; | 1529 | unsigned long flags; |
1535 | 1530 | ||
1536 | disable_irq(irq); | 1531 | disable_irq(irq); |
1537 | local_irq_save(flags); | 1532 | local_irq_save(flags); |
1538 | 1533 | ||
1539 | handler(irq, dev_id); | 1534 | handler(irq, dev_id); |
1540 | 1535 | ||
1541 | local_irq_restore(flags); | 1536 | local_irq_restore(flags); |
1542 | enable_irq(irq); | 1537 | enable_irq(irq); |
1543 | } | 1538 | } |
1544 | #endif | 1539 | #endif |
1545 | return retval; | 1540 | return retval; |
1546 | } | 1541 | } |
1547 | EXPORT_SYMBOL(request_threaded_irq); | 1542 | EXPORT_SYMBOL(request_threaded_irq); |
1548 | 1543 | ||
1549 | /** | 1544 | /** |
1550 | * request_any_context_irq - allocate an interrupt line | 1545 | * request_any_context_irq - allocate an interrupt line |
1551 | * @irq: Interrupt line to allocate | 1546 | * @irq: Interrupt line to allocate |
1552 | * @handler: Function to be called when the IRQ occurs. | 1547 | * @handler: Function to be called when the IRQ occurs. |
1553 | * Threaded handler for threaded interrupts. | 1548 | * Threaded handler for threaded interrupts. |
1554 | * @flags: Interrupt type flags | 1549 | * @flags: Interrupt type flags |
1555 | * @name: An ascii name for the claiming device | 1550 | * @name: An ascii name for the claiming device |
1556 | * @dev_id: A cookie passed back to the handler function | 1551 | * @dev_id: A cookie passed back to the handler function |
1557 | * | 1552 | * |
1558 | * This call allocates interrupt resources and enables the | 1553 | * This call allocates interrupt resources and enables the |
1559 | * interrupt line and IRQ handling. It selects either a | 1554 | * interrupt line and IRQ handling. It selects either a |
1560 | * hardirq or threaded handling method depending on the | 1555 | * hardirq or threaded handling method depending on the |
1561 | * context. | 1556 | * context. |
1562 | * | 1557 | * |
1563 | * On failure, it returns a negative value. On success, | 1558 | * On failure, it returns a negative value. On success, |
1564 | * it returns either IRQC_IS_HARDIRQ or IRQC_IS_NESTED. | 1559 | * it returns either IRQC_IS_HARDIRQ or IRQC_IS_NESTED. |
1565 | */ | 1560 | */ |
1566 | int request_any_context_irq(unsigned int irq, irq_handler_t handler, | 1561 | int request_any_context_irq(unsigned int irq, irq_handler_t handler, |
1567 | unsigned long flags, const char *name, void *dev_id) | 1562 | unsigned long flags, const char *name, void *dev_id) |
1568 | { | 1563 | { |
1569 | struct irq_desc *desc = irq_to_desc(irq); | 1564 | struct irq_desc *desc = irq_to_desc(irq); |
1570 | int ret; | 1565 | int ret; |
1571 | 1566 | ||
1572 | if (!desc) | 1567 | if (!desc) |
1573 | return -EINVAL; | 1568 | return -EINVAL; |
1574 | 1569 | ||
1575 | if (irq_settings_is_nested_thread(desc)) { | 1570 | if (irq_settings_is_nested_thread(desc)) { |
1576 | ret = request_threaded_irq(irq, NULL, handler, | 1571 | ret = request_threaded_irq(irq, NULL, handler, |
1577 | flags, name, dev_id); | 1572 | flags, name, dev_id); |
1578 | return !ret ? IRQC_IS_NESTED : ret; | 1573 | return !ret ? IRQC_IS_NESTED : ret; |
1579 | } | 1574 | } |
1580 | 1575 | ||
1581 | ret = request_irq(irq, handler, flags, name, dev_id); | 1576 | ret = request_irq(irq, handler, flags, name, dev_id); |
1582 | return !ret ? IRQC_IS_HARDIRQ : ret; | 1577 | return !ret ? IRQC_IS_HARDIRQ : ret; |
1583 | } | 1578 | } |
1584 | EXPORT_SYMBOL_GPL(request_any_context_irq); | 1579 | EXPORT_SYMBOL_GPL(request_any_context_irq); |
1585 | 1580 | ||
1586 | void enable_percpu_irq(unsigned int irq, unsigned int type) | 1581 | void enable_percpu_irq(unsigned int irq, unsigned int type) |
1587 | { | 1582 | { |
1588 | unsigned int cpu = smp_processor_id(); | 1583 | unsigned int cpu = smp_processor_id(); |
1589 | unsigned long flags; | 1584 | unsigned long flags; |
1590 | struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU); | 1585 | struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU); |
1591 | 1586 | ||
1592 | if (!desc) | 1587 | if (!desc) |
1593 | return; | 1588 | return; |
1594 | 1589 | ||
1595 | type &= IRQ_TYPE_SENSE_MASK; | 1590 | type &= IRQ_TYPE_SENSE_MASK; |
1596 | if (type != IRQ_TYPE_NONE) { | 1591 | if (type != IRQ_TYPE_NONE) { |
1597 | int ret; | 1592 | int ret; |
1598 | 1593 | ||
1599 | ret = __irq_set_trigger(desc, irq, type); | 1594 | ret = __irq_set_trigger(desc, irq, type); |
1600 | 1595 | ||
1601 | if (ret) { | 1596 | if (ret) { |
1602 | WARN(1, "failed to set type for IRQ%d\n", irq); | 1597 | WARN(1, "failed to set type for IRQ%d\n", irq); |
1603 | goto out; | 1598 | goto out; |
1604 | } | 1599 | } |
1605 | } | 1600 | } |
1606 | 1601 | ||
1607 | irq_percpu_enable(desc, cpu); | 1602 | irq_percpu_enable(desc, cpu); |
1608 | out: | 1603 | out: |
1609 | irq_put_desc_unlock(desc, flags); | 1604 | irq_put_desc_unlock(desc, flags); |
1610 | } | 1605 | } |
1611 | EXPORT_SYMBOL_GPL(enable_percpu_irq); | 1606 | EXPORT_SYMBOL_GPL(enable_percpu_irq); |
1612 | 1607 | ||
1613 | void disable_percpu_irq(unsigned int irq) | 1608 | void disable_percpu_irq(unsigned int irq) |
1614 | { | 1609 | { |
1615 | unsigned int cpu = smp_processor_id(); | 1610 | unsigned int cpu = smp_processor_id(); |
1616 | unsigned long flags; | 1611 | unsigned long flags; |
1617 | struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU); | 1612 | struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU); |
1618 | 1613 | ||
1619 | if (!desc) | 1614 | if (!desc) |
1620 | return; | 1615 | return; |
1621 | 1616 | ||
1622 | irq_percpu_disable(desc, cpu); | 1617 | irq_percpu_disable(desc, cpu); |
1623 | irq_put_desc_unlock(desc, flags); | 1618 | irq_put_desc_unlock(desc, flags); |
1624 | } | 1619 | } |
1625 | EXPORT_SYMBOL_GPL(disable_percpu_irq); | 1620 | EXPORT_SYMBOL_GPL(disable_percpu_irq); |
1626 | 1621 | ||
1627 | /* | 1622 | /* |
1628 | * Internal function to unregister a percpu irqaction. | 1623 | * Internal function to unregister a percpu irqaction. |
1629 | */ | 1624 | */ |
1630 | static struct irqaction *__free_percpu_irq(unsigned int irq, void __percpu *dev_id) | 1625 | static struct irqaction *__free_percpu_irq(unsigned int irq, void __percpu *dev_id) |
1631 | { | 1626 | { |
1632 | struct irq_desc *desc = irq_to_desc(irq); | 1627 | struct irq_desc *desc = irq_to_desc(irq); |
1633 | struct irqaction *action; | 1628 | struct irqaction *action; |
1634 | unsigned long flags; | 1629 | unsigned long flags; |
1635 | 1630 | ||
1636 | WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq); | 1631 | WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq); |
1637 | 1632 | ||
1638 | if (!desc) | 1633 | if (!desc) |
1639 | return NULL; | 1634 | return NULL; |
1640 | 1635 | ||
1641 | raw_spin_lock_irqsave(&desc->lock, flags); | 1636 | raw_spin_lock_irqsave(&desc->lock, flags); |
1642 | 1637 | ||
1643 | action = desc->action; | 1638 | action = desc->action; |
1644 | if (!action || action->percpu_dev_id != dev_id) { | 1639 | if (!action || action->percpu_dev_id != dev_id) { |
1645 | WARN(1, "Trying to free already-free IRQ %d\n", irq); | 1640 | WARN(1, "Trying to free already-free IRQ %d\n", irq); |
1646 | goto bad; | 1641 | goto bad; |
1647 | } | 1642 | } |
1648 | 1643 | ||
1649 | if (!cpumask_empty(desc->percpu_enabled)) { | 1644 | if (!cpumask_empty(desc->percpu_enabled)) { |
1650 | WARN(1, "percpu IRQ %d still enabled on CPU%d!\n", | 1645 | WARN(1, "percpu IRQ %d still enabled on CPU%d!\n", |
1651 | irq, cpumask_first(desc->percpu_enabled)); | 1646 | irq, cpumask_first(desc->percpu_enabled)); |
1652 | goto bad; | 1647 | goto bad; |
1653 | } | 1648 | } |
1654 | 1649 | ||
1655 | /* Found it - now remove it from the list of entries: */ | 1650 | /* Found it - now remove it from the list of entries: */ |
1656 | desc->action = NULL; | 1651 | desc->action = NULL; |
1657 | 1652 | ||
1658 | raw_spin_unlock_irqrestore(&desc->lock, flags); | 1653 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
1659 | 1654 | ||
1660 | unregister_handler_proc(irq, action); | 1655 | unregister_handler_proc(irq, action); |
1661 | 1656 | ||
1662 | module_put(desc->owner); | 1657 | module_put(desc->owner); |
1663 | return action; | 1658 | return action; |
1664 | 1659 | ||
1665 | bad: | 1660 | bad: |
1666 | raw_spin_unlock_irqrestore(&desc->lock, flags); | 1661 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
1667 | return NULL; | 1662 | return NULL; |
1668 | } | 1663 | } |
1669 | 1664 | ||
1670 | /** | 1665 | /** |
1671 | * remove_percpu_irq - free a per-cpu interrupt | 1666 | * remove_percpu_irq - free a per-cpu interrupt |
1672 | * @irq: Interrupt line to free | 1667 | * @irq: Interrupt line to free |
1673 | * @act: irqaction for the interrupt | 1668 | * @act: irqaction for the interrupt |
1674 | * | 1669 | * |
1675 | * Used to remove interrupts statically setup by the early boot process. | 1670 | * Used to remove interrupts statically setup by the early boot process. |
1676 | */ | 1671 | */ |
1677 | void remove_percpu_irq(unsigned int irq, struct irqaction *act) | 1672 | void remove_percpu_irq(unsigned int irq, struct irqaction *act) |
1678 | { | 1673 | { |
1679 | struct irq_desc *desc = irq_to_desc(irq); | 1674 | struct irq_desc *desc = irq_to_desc(irq); |
1680 | 1675 | ||
1681 | if (desc && irq_settings_is_per_cpu_devid(desc)) | 1676 | if (desc && irq_settings_is_per_cpu_devid(desc)) |
1682 | __free_percpu_irq(irq, act->percpu_dev_id); | 1677 | __free_percpu_irq(irq, act->percpu_dev_id); |
1683 | } | 1678 | } |
1684 | 1679 | ||
1685 | /** | 1680 | /** |
1686 | * free_percpu_irq - free an interrupt allocated with request_percpu_irq | 1681 | * free_percpu_irq - free an interrupt allocated with request_percpu_irq |
1687 | * @irq: Interrupt line to free | 1682 | * @irq: Interrupt line to free |
1688 | * @dev_id: Device identity to free | 1683 | * @dev_id: Device identity to free |
1689 | * | 1684 | * |
1690 | * Remove a percpu interrupt handler. The handler is removed, but | 1685 | * Remove a percpu interrupt handler. The handler is removed, but |
1691 | * the interrupt line is not disabled. This must be done on each | 1686 | * the interrupt line is not disabled. This must be done on each |
1692 | * CPU before calling this function. The function does not return | 1687 | * CPU before calling this function. The function does not return |
1693 | * until any executing interrupts for this IRQ have completed. | 1688 | * until any executing interrupts for this IRQ have completed. |
1694 | * | 1689 | * |
1695 | * This function must not be called from interrupt context. | 1690 | * This function must not be called from interrupt context. |
1696 | */ | 1691 | */ |
1697 | void free_percpu_irq(unsigned int irq, void __percpu *dev_id) | 1692 | void free_percpu_irq(unsigned int irq, void __percpu *dev_id) |
1698 | { | 1693 | { |
1699 | struct irq_desc *desc = irq_to_desc(irq); | 1694 | struct irq_desc *desc = irq_to_desc(irq); |
1700 | 1695 | ||
1701 | if (!desc || !irq_settings_is_per_cpu_devid(desc)) | 1696 | if (!desc || !irq_settings_is_per_cpu_devid(desc)) |
1702 | return; | 1697 | return; |
1703 | 1698 | ||
1704 | chip_bus_lock(desc); | 1699 | chip_bus_lock(desc); |
1705 | kfree(__free_percpu_irq(irq, dev_id)); | 1700 | kfree(__free_percpu_irq(irq, dev_id)); |
1706 | chip_bus_sync_unlock(desc); | 1701 | chip_bus_sync_unlock(desc); |
1707 | } | 1702 | } |
1708 | 1703 | ||
1709 | /** | 1704 | /** |
1710 | * setup_percpu_irq - setup a per-cpu interrupt | 1705 | * setup_percpu_irq - setup a per-cpu interrupt |
1711 | * @irq: Interrupt line to setup | 1706 | * @irq: Interrupt line to setup |
1712 | * @act: irqaction for the interrupt | 1707 | * @act: irqaction for the interrupt |
1713 | * | 1708 | * |
1714 | * Used to statically setup per-cpu interrupts in the early boot process. | 1709 | * Used to statically setup per-cpu interrupts in the early boot process. |
1715 | */ | 1710 | */ |
1716 | int setup_percpu_irq(unsigned int irq, struct irqaction *act) | 1711 | int setup_percpu_irq(unsigned int irq, struct irqaction *act) |
1717 | { | 1712 | { |
1718 | struct irq_desc *desc = irq_to_desc(irq); | 1713 | struct irq_desc *desc = irq_to_desc(irq); |
1719 | int retval; | 1714 | int retval; |
1720 | 1715 | ||
1721 | if (!desc || !irq_settings_is_per_cpu_devid(desc)) | 1716 | if (!desc || !irq_settings_is_per_cpu_devid(desc)) |
1722 | return -EINVAL; | 1717 | return -EINVAL; |
1723 | chip_bus_lock(desc); | 1718 | chip_bus_lock(desc); |
1724 | retval = __setup_irq(irq, desc, act); | 1719 | retval = __setup_irq(irq, desc, act); |
1725 | chip_bus_sync_unlock(desc); | 1720 | chip_bus_sync_unlock(desc); |
1726 | 1721 | ||
1727 | return retval; | 1722 | return retval; |
1728 | } | 1723 | } |
1729 | 1724 | ||
1730 | /** | 1725 | /** |
1731 | * request_percpu_irq - allocate a percpu interrupt line | 1726 | * request_percpu_irq - allocate a percpu interrupt line |
1732 | * @irq: Interrupt line to allocate | 1727 | * @irq: Interrupt line to allocate |
1733 | * @handler: Function to be called when the IRQ occurs. | 1728 | * @handler: Function to be called when the IRQ occurs. |
1734 | * @devname: An ascii name for the claiming device | 1729 | * @devname: An ascii name for the claiming device |
1735 | * @dev_id: A percpu cookie passed back to the handler function | 1730 | * @dev_id: A percpu cookie passed back to the handler function |
1736 | * | 1731 | * |
1737 | * This call allocates interrupt resources, but doesn't | 1732 | * This call allocates interrupt resources, but doesn't |
1738 | * automatically enable the interrupt. It has to be done on each | 1733 | * automatically enable the interrupt. It has to be done on each |
1739 | * CPU using enable_percpu_irq(). | 1734 | * CPU using enable_percpu_irq(). |
1740 | * | 1735 | * |
1741 | * Dev_id must be globally unique. It is a per-cpu variable, and | 1736 | * Dev_id must be globally unique. It is a per-cpu variable, and |
1742 | * the handler gets called with the interrupted CPU's instance of | 1737 | * the handler gets called with the interrupted CPU's instance of |
1743 | * that variable. | 1738 | * that variable. |
1744 | */ | 1739 | */ |
1745 | int request_percpu_irq(unsigned int irq, irq_handler_t handler, | 1740 | int request_percpu_irq(unsigned int irq, irq_handler_t handler, |
1746 | const char *devname, void __percpu *dev_id) | 1741 | const char *devname, void __percpu *dev_id) |
1747 | { | 1742 | { |
1748 | struct irqaction *action; | 1743 | struct irqaction *action; |
1749 | struct irq_desc *desc; | 1744 | struct irq_desc *desc; |
1750 | int retval; | 1745 | int retval; |
1751 | 1746 | ||
1752 | if (!dev_id) | 1747 | if (!dev_id) |
1753 | return -EINVAL; | 1748 | return -EINVAL; |
1754 | 1749 | ||
1755 | desc = irq_to_desc(irq); | 1750 | desc = irq_to_desc(irq); |
1756 | if (!desc || !irq_settings_can_request(desc) || | 1751 | if (!desc || !irq_settings_can_request(desc) || |
1757 | !irq_settings_is_per_cpu_devid(desc)) | 1752 | !irq_settings_is_per_cpu_devid(desc)) |
1758 | return -EINVAL; | 1753 | return -EINVAL; |
1759 | 1754 | ||
1760 | action = kzalloc(sizeof(struct irqaction), GFP_KERNEL); | 1755 | action = kzalloc(sizeof(struct irqaction), GFP_KERNEL); |
1761 | if (!action) | 1756 | if (!action) |
1762 | return -ENOMEM; | 1757 | return -ENOMEM; |
1763 | 1758 | ||
1764 | action->handler = handler; | 1759 | action->handler = handler; |
1765 | action->flags = IRQF_PERCPU | IRQF_NO_SUSPEND; | 1760 | action->flags = IRQF_PERCPU | IRQF_NO_SUSPEND; |
1766 | action->name = devname; | 1761 | action->name = devname; |
1767 | action->percpu_dev_id = dev_id; | 1762 | action->percpu_dev_id = dev_id; |
1768 | 1763 | ||
1769 | chip_bus_lock(desc); | 1764 | chip_bus_lock(desc); |
1770 | retval = __setup_irq(irq, desc, action); | 1765 | retval = __setup_irq(irq, desc, action); |
1771 | chip_bus_sync_unlock(desc); | 1766 | chip_bus_sync_unlock(desc); |
1772 | 1767 | ||
1773 | if (retval) | 1768 | if (retval) |
1774 | kfree(action); | 1769 | kfree(action); |
1775 | 1770 | ||
1776 | return retval; | 1771 | return retval; |
1777 | } | 1772 | } |
-
mentioned in commit 4c88d7
-
mentioned in commit 601c94
-
mentioned in commit 8f1410
-
mentioned in commit bae362
-
mentioned in commit 3e32c2
-
mentioned in commit 8f1410
-
mentioned in commit bae362
-
mentioned in commit 3e32c2
-
mentioned in commit 4d9feb
-
mentioned in commit 7c2fbe
-
mentioned in commit 4d9feb
-
mentioned in commit 7c2fbe
-
mentioned in commit 8f1410
-
mentioned in commit bae362
-
mentioned in commit 3e32c2
-
mentioned in commit 4d9feb
-
mentioned in commit 7c2fbe
-
mentioned in commit 8f1410
-
mentioned in commit bae362
-
mentioned in commit 3e32c2
-
mentioned in commit 4d9feb
-
mentioned in commit 8f1410
-
mentioned in commit 7c2fbe
-
mentioned in commit bae362
-
mentioned in commit 3e32c2
-
mentioned in commit 4d9feb
-
mentioned in commit 7c2fbe
-
mentioned in commit 4c88d7
-
mentioned in commit 4c88d7
-
mentioned in commit 601c94
-
mentioned in commit 601c94
-
mentioned in commit a04080
-
mentioned in commit a04080
-
mentioned in commit 3d8afe
-
mentioned in commit 3d8afe
-
mentioned in commit 4c88d7
-
mentioned in commit 601c94
-
mentioned in commit a04080
-
mentioned in commit 3d8afe
-
mentioned in commit 4c88d7
-
mentioned in commit 601c94
-
mentioned in commit a04080
-
mentioned in commit 3d8afe
-
mentioned in commit 4c88d7
-
mentioned in commit 601c94
-
mentioned in commit a04080
-
mentioned in commit 3d8afe
-
mentioned in commit 4c88d7
-
mentioned in commit 601c94
-
mentioned in commit a04080
-
mentioned in commit 3d8afe
-
mentioned in commit 4c88d7
-
mentioned in commit 601c94
-
mentioned in commit a04080
-
mentioned in commit 3d8afe
-
mentioned in commit 4c88d7
-
mentioned in commit 601c94
-
mentioned in commit a04080
-
mentioned in commit 3d8afe
-
mentioned in commit 4c88d7
-
mentioned in commit 601c94
-
mentioned in commit a04080
-
mentioned in commit 3d8afe
-
mentioned in commit 4c88d7
-
mentioned in commit 601c94
-
mentioned in commit a04080
-
mentioned in commit 3d8afe
-
mentioned in commit 4c88d7
-
mentioned in commit 601c94
-
mentioned in commit a04080
-
mentioned in commit 3d8afe
-
mentioned in commit 4c88d7
-
mentioned in commit 601c94
-
mentioned in commit 4c88d7
-
mentioned in commit 601c94
-
mentioned in commit a04080
-
mentioned in commit 3d8afe
-
mentioned in commit a04080
-
mentioned in commit 3d8afe