Commit d9e9e8e2fe832180f5c8f659a63def2e8fcaea4a
Exists in
master
and in
13 other branches
Merge branch 'irq-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull irq fixes from Thomas Gleixner: "A slighlty large fix for a subtle issue in the CPU hotplug code of certain ARM SoCs, where the not yet online cpu needs to setup the cpu local timer and needs to set the interrupt affinity to itself. Setting interrupt affinity to a not online cpu is prohibited and therefor the timer interrupt ends up on the wrong cpu, which leads to nasty complications. The SoC folks tried to hack around that in the SoC code in some more than nasty ways. The proper solution is to have a way to enforce the affinity setting to a not online cpu. The core patch to the genirq code provides that facility and the follow up patches make use of it in the GIC interrupt controller and the exynos timer driver. The change to the core code has no implications to existing users, except for the rename of the locked function and therefor the necessary fixup in mips/cavium. Aside of that, no runtime impact is possible, as none of the existing interrupt chips implements anything which depends on the force argument of the irq_set_affinity() callback" * 'irq-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: clocksource: Exynos_mct: Register clock event after request_irq() clocksource: Exynos_mct: Use irq_force_affinity() in cpu bringup irqchip: Gic: Support forced affinity setting genirq: Allow forcing cpu affinity of interrupts
Showing 6 changed files Inline Diff
arch/mips/cavium-octeon/octeon-irq.c
1 | /* | 1 | /* |
2 | * This file is subject to the terms and conditions of the GNU General Public | 2 | * This file is subject to the terms and conditions of the GNU General Public |
3 | * License. See the file "COPYING" in the main directory of this archive | 3 | * License. See the file "COPYING" in the main directory of this archive |
4 | * for more details. | 4 | * for more details. |
5 | * | 5 | * |
6 | * Copyright (C) 2004-2012 Cavium, Inc. | 6 | * Copyright (C) 2004-2012 Cavium, Inc. |
7 | */ | 7 | */ |
8 | 8 | ||
9 | #include <linux/interrupt.h> | 9 | #include <linux/interrupt.h> |
10 | #include <linux/irqdomain.h> | 10 | #include <linux/irqdomain.h> |
11 | #include <linux/bitops.h> | 11 | #include <linux/bitops.h> |
12 | #include <linux/percpu.h> | 12 | #include <linux/percpu.h> |
13 | #include <linux/slab.h> | 13 | #include <linux/slab.h> |
14 | #include <linux/irq.h> | 14 | #include <linux/irq.h> |
15 | #include <linux/smp.h> | 15 | #include <linux/smp.h> |
16 | #include <linux/of.h> | 16 | #include <linux/of.h> |
17 | 17 | ||
18 | #include <asm/octeon/octeon.h> | 18 | #include <asm/octeon/octeon.h> |
19 | #include <asm/octeon/cvmx-ciu2-defs.h> | 19 | #include <asm/octeon/cvmx-ciu2-defs.h> |
20 | 20 | ||
21 | static DEFINE_PER_CPU(unsigned long, octeon_irq_ciu0_en_mirror); | 21 | static DEFINE_PER_CPU(unsigned long, octeon_irq_ciu0_en_mirror); |
22 | static DEFINE_PER_CPU(unsigned long, octeon_irq_ciu1_en_mirror); | 22 | static DEFINE_PER_CPU(unsigned long, octeon_irq_ciu1_en_mirror); |
23 | static DEFINE_PER_CPU(raw_spinlock_t, octeon_irq_ciu_spinlock); | 23 | static DEFINE_PER_CPU(raw_spinlock_t, octeon_irq_ciu_spinlock); |
24 | 24 | ||
25 | static __read_mostly u8 octeon_irq_ciu_to_irq[8][64]; | 25 | static __read_mostly u8 octeon_irq_ciu_to_irq[8][64]; |
26 | 26 | ||
27 | union octeon_ciu_chip_data { | 27 | union octeon_ciu_chip_data { |
28 | void *p; | 28 | void *p; |
29 | unsigned long l; | 29 | unsigned long l; |
30 | struct { | 30 | struct { |
31 | unsigned long line:6; | 31 | unsigned long line:6; |
32 | unsigned long bit:6; | 32 | unsigned long bit:6; |
33 | unsigned long gpio_line:6; | 33 | unsigned long gpio_line:6; |
34 | } s; | 34 | } s; |
35 | }; | 35 | }; |
36 | 36 | ||
37 | struct octeon_core_chip_data { | 37 | struct octeon_core_chip_data { |
38 | struct mutex core_irq_mutex; | 38 | struct mutex core_irq_mutex; |
39 | bool current_en; | 39 | bool current_en; |
40 | bool desired_en; | 40 | bool desired_en; |
41 | u8 bit; | 41 | u8 bit; |
42 | }; | 42 | }; |
43 | 43 | ||
44 | #define MIPS_CORE_IRQ_LINES 8 | 44 | #define MIPS_CORE_IRQ_LINES 8 |
45 | 45 | ||
46 | static struct octeon_core_chip_data octeon_irq_core_chip_data[MIPS_CORE_IRQ_LINES]; | 46 | static struct octeon_core_chip_data octeon_irq_core_chip_data[MIPS_CORE_IRQ_LINES]; |
47 | 47 | ||
48 | static void octeon_irq_set_ciu_mapping(int irq, int line, int bit, int gpio_line, | 48 | static void octeon_irq_set_ciu_mapping(int irq, int line, int bit, int gpio_line, |
49 | struct irq_chip *chip, | 49 | struct irq_chip *chip, |
50 | irq_flow_handler_t handler) | 50 | irq_flow_handler_t handler) |
51 | { | 51 | { |
52 | union octeon_ciu_chip_data cd; | 52 | union octeon_ciu_chip_data cd; |
53 | 53 | ||
54 | irq_set_chip_and_handler(irq, chip, handler); | 54 | irq_set_chip_and_handler(irq, chip, handler); |
55 | 55 | ||
56 | cd.l = 0; | 56 | cd.l = 0; |
57 | cd.s.line = line; | 57 | cd.s.line = line; |
58 | cd.s.bit = bit; | 58 | cd.s.bit = bit; |
59 | cd.s.gpio_line = gpio_line; | 59 | cd.s.gpio_line = gpio_line; |
60 | 60 | ||
61 | irq_set_chip_data(irq, cd.p); | 61 | irq_set_chip_data(irq, cd.p); |
62 | octeon_irq_ciu_to_irq[line][bit] = irq; | 62 | octeon_irq_ciu_to_irq[line][bit] = irq; |
63 | } | 63 | } |
64 | 64 | ||
65 | static void octeon_irq_force_ciu_mapping(struct irq_domain *domain, | 65 | static void octeon_irq_force_ciu_mapping(struct irq_domain *domain, |
66 | int irq, int line, int bit) | 66 | int irq, int line, int bit) |
67 | { | 67 | { |
68 | irq_domain_associate(domain, irq, line << 6 | bit); | 68 | irq_domain_associate(domain, irq, line << 6 | bit); |
69 | } | 69 | } |
70 | 70 | ||
71 | static int octeon_coreid_for_cpu(int cpu) | 71 | static int octeon_coreid_for_cpu(int cpu) |
72 | { | 72 | { |
73 | #ifdef CONFIG_SMP | 73 | #ifdef CONFIG_SMP |
74 | return cpu_logical_map(cpu); | 74 | return cpu_logical_map(cpu); |
75 | #else | 75 | #else |
76 | return cvmx_get_core_num(); | 76 | return cvmx_get_core_num(); |
77 | #endif | 77 | #endif |
78 | } | 78 | } |
79 | 79 | ||
80 | static int octeon_cpu_for_coreid(int coreid) | 80 | static int octeon_cpu_for_coreid(int coreid) |
81 | { | 81 | { |
82 | #ifdef CONFIG_SMP | 82 | #ifdef CONFIG_SMP |
83 | return cpu_number_map(coreid); | 83 | return cpu_number_map(coreid); |
84 | #else | 84 | #else |
85 | return smp_processor_id(); | 85 | return smp_processor_id(); |
86 | #endif | 86 | #endif |
87 | } | 87 | } |
88 | 88 | ||
89 | static void octeon_irq_core_ack(struct irq_data *data) | 89 | static void octeon_irq_core_ack(struct irq_data *data) |
90 | { | 90 | { |
91 | struct octeon_core_chip_data *cd = irq_data_get_irq_chip_data(data); | 91 | struct octeon_core_chip_data *cd = irq_data_get_irq_chip_data(data); |
92 | unsigned int bit = cd->bit; | 92 | unsigned int bit = cd->bit; |
93 | 93 | ||
94 | /* | 94 | /* |
95 | * We don't need to disable IRQs to make these atomic since | 95 | * We don't need to disable IRQs to make these atomic since |
96 | * they are already disabled earlier in the low level | 96 | * they are already disabled earlier in the low level |
97 | * interrupt code. | 97 | * interrupt code. |
98 | */ | 98 | */ |
99 | clear_c0_status(0x100 << bit); | 99 | clear_c0_status(0x100 << bit); |
100 | /* The two user interrupts must be cleared manually. */ | 100 | /* The two user interrupts must be cleared manually. */ |
101 | if (bit < 2) | 101 | if (bit < 2) |
102 | clear_c0_cause(0x100 << bit); | 102 | clear_c0_cause(0x100 << bit); |
103 | } | 103 | } |
104 | 104 | ||
105 | static void octeon_irq_core_eoi(struct irq_data *data) | 105 | static void octeon_irq_core_eoi(struct irq_data *data) |
106 | { | 106 | { |
107 | struct octeon_core_chip_data *cd = irq_data_get_irq_chip_data(data); | 107 | struct octeon_core_chip_data *cd = irq_data_get_irq_chip_data(data); |
108 | 108 | ||
109 | /* | 109 | /* |
110 | * We don't need to disable IRQs to make these atomic since | 110 | * We don't need to disable IRQs to make these atomic since |
111 | * they are already disabled earlier in the low level | 111 | * they are already disabled earlier in the low level |
112 | * interrupt code. | 112 | * interrupt code. |
113 | */ | 113 | */ |
114 | set_c0_status(0x100 << cd->bit); | 114 | set_c0_status(0x100 << cd->bit); |
115 | } | 115 | } |
116 | 116 | ||
117 | static void octeon_irq_core_set_enable_local(void *arg) | 117 | static void octeon_irq_core_set_enable_local(void *arg) |
118 | { | 118 | { |
119 | struct irq_data *data = arg; | 119 | struct irq_data *data = arg; |
120 | struct octeon_core_chip_data *cd = irq_data_get_irq_chip_data(data); | 120 | struct octeon_core_chip_data *cd = irq_data_get_irq_chip_data(data); |
121 | unsigned int mask = 0x100 << cd->bit; | 121 | unsigned int mask = 0x100 << cd->bit; |
122 | 122 | ||
123 | /* | 123 | /* |
124 | * Interrupts are already disabled, so these are atomic. | 124 | * Interrupts are already disabled, so these are atomic. |
125 | */ | 125 | */ |
126 | if (cd->desired_en) | 126 | if (cd->desired_en) |
127 | set_c0_status(mask); | 127 | set_c0_status(mask); |
128 | else | 128 | else |
129 | clear_c0_status(mask); | 129 | clear_c0_status(mask); |
130 | 130 | ||
131 | } | 131 | } |
132 | 132 | ||
133 | static void octeon_irq_core_disable(struct irq_data *data) | 133 | static void octeon_irq_core_disable(struct irq_data *data) |
134 | { | 134 | { |
135 | struct octeon_core_chip_data *cd = irq_data_get_irq_chip_data(data); | 135 | struct octeon_core_chip_data *cd = irq_data_get_irq_chip_data(data); |
136 | cd->desired_en = false; | 136 | cd->desired_en = false; |
137 | } | 137 | } |
138 | 138 | ||
139 | static void octeon_irq_core_enable(struct irq_data *data) | 139 | static void octeon_irq_core_enable(struct irq_data *data) |
140 | { | 140 | { |
141 | struct octeon_core_chip_data *cd = irq_data_get_irq_chip_data(data); | 141 | struct octeon_core_chip_data *cd = irq_data_get_irq_chip_data(data); |
142 | cd->desired_en = true; | 142 | cd->desired_en = true; |
143 | } | 143 | } |
144 | 144 | ||
145 | static void octeon_irq_core_bus_lock(struct irq_data *data) | 145 | static void octeon_irq_core_bus_lock(struct irq_data *data) |
146 | { | 146 | { |
147 | struct octeon_core_chip_data *cd = irq_data_get_irq_chip_data(data); | 147 | struct octeon_core_chip_data *cd = irq_data_get_irq_chip_data(data); |
148 | 148 | ||
149 | mutex_lock(&cd->core_irq_mutex); | 149 | mutex_lock(&cd->core_irq_mutex); |
150 | } | 150 | } |
151 | 151 | ||
152 | static void octeon_irq_core_bus_sync_unlock(struct irq_data *data) | 152 | static void octeon_irq_core_bus_sync_unlock(struct irq_data *data) |
153 | { | 153 | { |
154 | struct octeon_core_chip_data *cd = irq_data_get_irq_chip_data(data); | 154 | struct octeon_core_chip_data *cd = irq_data_get_irq_chip_data(data); |
155 | 155 | ||
156 | if (cd->desired_en != cd->current_en) { | 156 | if (cd->desired_en != cd->current_en) { |
157 | on_each_cpu(octeon_irq_core_set_enable_local, data, 1); | 157 | on_each_cpu(octeon_irq_core_set_enable_local, data, 1); |
158 | 158 | ||
159 | cd->current_en = cd->desired_en; | 159 | cd->current_en = cd->desired_en; |
160 | } | 160 | } |
161 | 161 | ||
162 | mutex_unlock(&cd->core_irq_mutex); | 162 | mutex_unlock(&cd->core_irq_mutex); |
163 | } | 163 | } |
164 | 164 | ||
165 | static struct irq_chip octeon_irq_chip_core = { | 165 | static struct irq_chip octeon_irq_chip_core = { |
166 | .name = "Core", | 166 | .name = "Core", |
167 | .irq_enable = octeon_irq_core_enable, | 167 | .irq_enable = octeon_irq_core_enable, |
168 | .irq_disable = octeon_irq_core_disable, | 168 | .irq_disable = octeon_irq_core_disable, |
169 | .irq_ack = octeon_irq_core_ack, | 169 | .irq_ack = octeon_irq_core_ack, |
170 | .irq_eoi = octeon_irq_core_eoi, | 170 | .irq_eoi = octeon_irq_core_eoi, |
171 | .irq_bus_lock = octeon_irq_core_bus_lock, | 171 | .irq_bus_lock = octeon_irq_core_bus_lock, |
172 | .irq_bus_sync_unlock = octeon_irq_core_bus_sync_unlock, | 172 | .irq_bus_sync_unlock = octeon_irq_core_bus_sync_unlock, |
173 | 173 | ||
174 | .irq_cpu_online = octeon_irq_core_eoi, | 174 | .irq_cpu_online = octeon_irq_core_eoi, |
175 | .irq_cpu_offline = octeon_irq_core_ack, | 175 | .irq_cpu_offline = octeon_irq_core_ack, |
176 | .flags = IRQCHIP_ONOFFLINE_ENABLED, | 176 | .flags = IRQCHIP_ONOFFLINE_ENABLED, |
177 | }; | 177 | }; |
178 | 178 | ||
179 | static void __init octeon_irq_init_core(void) | 179 | static void __init octeon_irq_init_core(void) |
180 | { | 180 | { |
181 | int i; | 181 | int i; |
182 | int irq; | 182 | int irq; |
183 | struct octeon_core_chip_data *cd; | 183 | struct octeon_core_chip_data *cd; |
184 | 184 | ||
185 | for (i = 0; i < MIPS_CORE_IRQ_LINES; i++) { | 185 | for (i = 0; i < MIPS_CORE_IRQ_LINES; i++) { |
186 | cd = &octeon_irq_core_chip_data[i]; | 186 | cd = &octeon_irq_core_chip_data[i]; |
187 | cd->current_en = false; | 187 | cd->current_en = false; |
188 | cd->desired_en = false; | 188 | cd->desired_en = false; |
189 | cd->bit = i; | 189 | cd->bit = i; |
190 | mutex_init(&cd->core_irq_mutex); | 190 | mutex_init(&cd->core_irq_mutex); |
191 | 191 | ||
192 | irq = OCTEON_IRQ_SW0 + i; | 192 | irq = OCTEON_IRQ_SW0 + i; |
193 | irq_set_chip_data(irq, cd); | 193 | irq_set_chip_data(irq, cd); |
194 | irq_set_chip_and_handler(irq, &octeon_irq_chip_core, | 194 | irq_set_chip_and_handler(irq, &octeon_irq_chip_core, |
195 | handle_percpu_irq); | 195 | handle_percpu_irq); |
196 | } | 196 | } |
197 | } | 197 | } |
198 | 198 | ||
199 | static int next_cpu_for_irq(struct irq_data *data) | 199 | static int next_cpu_for_irq(struct irq_data *data) |
200 | { | 200 | { |
201 | 201 | ||
202 | #ifdef CONFIG_SMP | 202 | #ifdef CONFIG_SMP |
203 | int cpu; | 203 | int cpu; |
204 | int weight = cpumask_weight(data->affinity); | 204 | int weight = cpumask_weight(data->affinity); |
205 | 205 | ||
206 | if (weight > 1) { | 206 | if (weight > 1) { |
207 | cpu = smp_processor_id(); | 207 | cpu = smp_processor_id(); |
208 | for (;;) { | 208 | for (;;) { |
209 | cpu = cpumask_next(cpu, data->affinity); | 209 | cpu = cpumask_next(cpu, data->affinity); |
210 | if (cpu >= nr_cpu_ids) { | 210 | if (cpu >= nr_cpu_ids) { |
211 | cpu = -1; | 211 | cpu = -1; |
212 | continue; | 212 | continue; |
213 | } else if (cpumask_test_cpu(cpu, cpu_online_mask)) { | 213 | } else if (cpumask_test_cpu(cpu, cpu_online_mask)) { |
214 | break; | 214 | break; |
215 | } | 215 | } |
216 | } | 216 | } |
217 | } else if (weight == 1) { | 217 | } else if (weight == 1) { |
218 | cpu = cpumask_first(data->affinity); | 218 | cpu = cpumask_first(data->affinity); |
219 | } else { | 219 | } else { |
220 | cpu = smp_processor_id(); | 220 | cpu = smp_processor_id(); |
221 | } | 221 | } |
222 | return cpu; | 222 | return cpu; |
223 | #else | 223 | #else |
224 | return smp_processor_id(); | 224 | return smp_processor_id(); |
225 | #endif | 225 | #endif |
226 | } | 226 | } |
227 | 227 | ||
228 | static void octeon_irq_ciu_enable(struct irq_data *data) | 228 | static void octeon_irq_ciu_enable(struct irq_data *data) |
229 | { | 229 | { |
230 | int cpu = next_cpu_for_irq(data); | 230 | int cpu = next_cpu_for_irq(data); |
231 | int coreid = octeon_coreid_for_cpu(cpu); | 231 | int coreid = octeon_coreid_for_cpu(cpu); |
232 | unsigned long *pen; | 232 | unsigned long *pen; |
233 | unsigned long flags; | 233 | unsigned long flags; |
234 | union octeon_ciu_chip_data cd; | 234 | union octeon_ciu_chip_data cd; |
235 | raw_spinlock_t *lock = &per_cpu(octeon_irq_ciu_spinlock, cpu); | 235 | raw_spinlock_t *lock = &per_cpu(octeon_irq_ciu_spinlock, cpu); |
236 | 236 | ||
237 | cd.p = irq_data_get_irq_chip_data(data); | 237 | cd.p = irq_data_get_irq_chip_data(data); |
238 | 238 | ||
239 | raw_spin_lock_irqsave(lock, flags); | 239 | raw_spin_lock_irqsave(lock, flags); |
240 | if (cd.s.line == 0) { | 240 | if (cd.s.line == 0) { |
241 | pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu); | 241 | pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu); |
242 | __set_bit(cd.s.bit, pen); | 242 | __set_bit(cd.s.bit, pen); |
243 | /* | 243 | /* |
244 | * Must be visible to octeon_irq_ip{2,3}_ciu() before | 244 | * Must be visible to octeon_irq_ip{2,3}_ciu() before |
245 | * enabling the irq. | 245 | * enabling the irq. |
246 | */ | 246 | */ |
247 | wmb(); | 247 | wmb(); |
248 | cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), *pen); | 248 | cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), *pen); |
249 | } else { | 249 | } else { |
250 | pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu); | 250 | pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu); |
251 | __set_bit(cd.s.bit, pen); | 251 | __set_bit(cd.s.bit, pen); |
252 | /* | 252 | /* |
253 | * Must be visible to octeon_irq_ip{2,3}_ciu() before | 253 | * Must be visible to octeon_irq_ip{2,3}_ciu() before |
254 | * enabling the irq. | 254 | * enabling the irq. |
255 | */ | 255 | */ |
256 | wmb(); | 256 | wmb(); |
257 | cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), *pen); | 257 | cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), *pen); |
258 | } | 258 | } |
259 | raw_spin_unlock_irqrestore(lock, flags); | 259 | raw_spin_unlock_irqrestore(lock, flags); |
260 | } | 260 | } |
261 | 261 | ||
262 | static void octeon_irq_ciu_enable_local(struct irq_data *data) | 262 | static void octeon_irq_ciu_enable_local(struct irq_data *data) |
263 | { | 263 | { |
264 | unsigned long *pen; | 264 | unsigned long *pen; |
265 | unsigned long flags; | 265 | unsigned long flags; |
266 | union octeon_ciu_chip_data cd; | 266 | union octeon_ciu_chip_data cd; |
267 | raw_spinlock_t *lock = &__get_cpu_var(octeon_irq_ciu_spinlock); | 267 | raw_spinlock_t *lock = &__get_cpu_var(octeon_irq_ciu_spinlock); |
268 | 268 | ||
269 | cd.p = irq_data_get_irq_chip_data(data); | 269 | cd.p = irq_data_get_irq_chip_data(data); |
270 | 270 | ||
271 | raw_spin_lock_irqsave(lock, flags); | 271 | raw_spin_lock_irqsave(lock, flags); |
272 | if (cd.s.line == 0) { | 272 | if (cd.s.line == 0) { |
273 | pen = &__get_cpu_var(octeon_irq_ciu0_en_mirror); | 273 | pen = &__get_cpu_var(octeon_irq_ciu0_en_mirror); |
274 | __set_bit(cd.s.bit, pen); | 274 | __set_bit(cd.s.bit, pen); |
275 | /* | 275 | /* |
276 | * Must be visible to octeon_irq_ip{2,3}_ciu() before | 276 | * Must be visible to octeon_irq_ip{2,3}_ciu() before |
277 | * enabling the irq. | 277 | * enabling the irq. |
278 | */ | 278 | */ |
279 | wmb(); | 279 | wmb(); |
280 | cvmx_write_csr(CVMX_CIU_INTX_EN0(cvmx_get_core_num() * 2), *pen); | 280 | cvmx_write_csr(CVMX_CIU_INTX_EN0(cvmx_get_core_num() * 2), *pen); |
281 | } else { | 281 | } else { |
282 | pen = &__get_cpu_var(octeon_irq_ciu1_en_mirror); | 282 | pen = &__get_cpu_var(octeon_irq_ciu1_en_mirror); |
283 | __set_bit(cd.s.bit, pen); | 283 | __set_bit(cd.s.bit, pen); |
284 | /* | 284 | /* |
285 | * Must be visible to octeon_irq_ip{2,3}_ciu() before | 285 | * Must be visible to octeon_irq_ip{2,3}_ciu() before |
286 | * enabling the irq. | 286 | * enabling the irq. |
287 | */ | 287 | */ |
288 | wmb(); | 288 | wmb(); |
289 | cvmx_write_csr(CVMX_CIU_INTX_EN1(cvmx_get_core_num() * 2 + 1), *pen); | 289 | cvmx_write_csr(CVMX_CIU_INTX_EN1(cvmx_get_core_num() * 2 + 1), *pen); |
290 | } | 290 | } |
291 | raw_spin_unlock_irqrestore(lock, flags); | 291 | raw_spin_unlock_irqrestore(lock, flags); |
292 | } | 292 | } |
293 | 293 | ||
294 | static void octeon_irq_ciu_disable_local(struct irq_data *data) | 294 | static void octeon_irq_ciu_disable_local(struct irq_data *data) |
295 | { | 295 | { |
296 | unsigned long *pen; | 296 | unsigned long *pen; |
297 | unsigned long flags; | 297 | unsigned long flags; |
298 | union octeon_ciu_chip_data cd; | 298 | union octeon_ciu_chip_data cd; |
299 | raw_spinlock_t *lock = &__get_cpu_var(octeon_irq_ciu_spinlock); | 299 | raw_spinlock_t *lock = &__get_cpu_var(octeon_irq_ciu_spinlock); |
300 | 300 | ||
301 | cd.p = irq_data_get_irq_chip_data(data); | 301 | cd.p = irq_data_get_irq_chip_data(data); |
302 | 302 | ||
303 | raw_spin_lock_irqsave(lock, flags); | 303 | raw_spin_lock_irqsave(lock, flags); |
304 | if (cd.s.line == 0) { | 304 | if (cd.s.line == 0) { |
305 | pen = &__get_cpu_var(octeon_irq_ciu0_en_mirror); | 305 | pen = &__get_cpu_var(octeon_irq_ciu0_en_mirror); |
306 | __clear_bit(cd.s.bit, pen); | 306 | __clear_bit(cd.s.bit, pen); |
307 | /* | 307 | /* |
308 | * Must be visible to octeon_irq_ip{2,3}_ciu() before | 308 | * Must be visible to octeon_irq_ip{2,3}_ciu() before |
309 | * enabling the irq. | 309 | * enabling the irq. |
310 | */ | 310 | */ |
311 | wmb(); | 311 | wmb(); |
312 | cvmx_write_csr(CVMX_CIU_INTX_EN0(cvmx_get_core_num() * 2), *pen); | 312 | cvmx_write_csr(CVMX_CIU_INTX_EN0(cvmx_get_core_num() * 2), *pen); |
313 | } else { | 313 | } else { |
314 | pen = &__get_cpu_var(octeon_irq_ciu1_en_mirror); | 314 | pen = &__get_cpu_var(octeon_irq_ciu1_en_mirror); |
315 | __clear_bit(cd.s.bit, pen); | 315 | __clear_bit(cd.s.bit, pen); |
316 | /* | 316 | /* |
317 | * Must be visible to octeon_irq_ip{2,3}_ciu() before | 317 | * Must be visible to octeon_irq_ip{2,3}_ciu() before |
318 | * enabling the irq. | 318 | * enabling the irq. |
319 | */ | 319 | */ |
320 | wmb(); | 320 | wmb(); |
321 | cvmx_write_csr(CVMX_CIU_INTX_EN1(cvmx_get_core_num() * 2 + 1), *pen); | 321 | cvmx_write_csr(CVMX_CIU_INTX_EN1(cvmx_get_core_num() * 2 + 1), *pen); |
322 | } | 322 | } |
323 | raw_spin_unlock_irqrestore(lock, flags); | 323 | raw_spin_unlock_irqrestore(lock, flags); |
324 | } | 324 | } |
325 | 325 | ||
326 | static void octeon_irq_ciu_disable_all(struct irq_data *data) | 326 | static void octeon_irq_ciu_disable_all(struct irq_data *data) |
327 | { | 327 | { |
328 | unsigned long flags; | 328 | unsigned long flags; |
329 | unsigned long *pen; | 329 | unsigned long *pen; |
330 | int cpu; | 330 | int cpu; |
331 | union octeon_ciu_chip_data cd; | 331 | union octeon_ciu_chip_data cd; |
332 | raw_spinlock_t *lock; | 332 | raw_spinlock_t *lock; |
333 | 333 | ||
334 | cd.p = irq_data_get_irq_chip_data(data); | 334 | cd.p = irq_data_get_irq_chip_data(data); |
335 | 335 | ||
336 | for_each_online_cpu(cpu) { | 336 | for_each_online_cpu(cpu) { |
337 | int coreid = octeon_coreid_for_cpu(cpu); | 337 | int coreid = octeon_coreid_for_cpu(cpu); |
338 | lock = &per_cpu(octeon_irq_ciu_spinlock, cpu); | 338 | lock = &per_cpu(octeon_irq_ciu_spinlock, cpu); |
339 | if (cd.s.line == 0) | 339 | if (cd.s.line == 0) |
340 | pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu); | 340 | pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu); |
341 | else | 341 | else |
342 | pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu); | 342 | pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu); |
343 | 343 | ||
344 | raw_spin_lock_irqsave(lock, flags); | 344 | raw_spin_lock_irqsave(lock, flags); |
345 | __clear_bit(cd.s.bit, pen); | 345 | __clear_bit(cd.s.bit, pen); |
346 | /* | 346 | /* |
347 | * Must be visible to octeon_irq_ip{2,3}_ciu() before | 347 | * Must be visible to octeon_irq_ip{2,3}_ciu() before |
348 | * enabling the irq. | 348 | * enabling the irq. |
349 | */ | 349 | */ |
350 | wmb(); | 350 | wmb(); |
351 | if (cd.s.line == 0) | 351 | if (cd.s.line == 0) |
352 | cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), *pen); | 352 | cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), *pen); |
353 | else | 353 | else |
354 | cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), *pen); | 354 | cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), *pen); |
355 | raw_spin_unlock_irqrestore(lock, flags); | 355 | raw_spin_unlock_irqrestore(lock, flags); |
356 | } | 356 | } |
357 | } | 357 | } |
358 | 358 | ||
359 | static void octeon_irq_ciu_enable_all(struct irq_data *data) | 359 | static void octeon_irq_ciu_enable_all(struct irq_data *data) |
360 | { | 360 | { |
361 | unsigned long flags; | 361 | unsigned long flags; |
362 | unsigned long *pen; | 362 | unsigned long *pen; |
363 | int cpu; | 363 | int cpu; |
364 | union octeon_ciu_chip_data cd; | 364 | union octeon_ciu_chip_data cd; |
365 | raw_spinlock_t *lock; | 365 | raw_spinlock_t *lock; |
366 | 366 | ||
367 | cd.p = irq_data_get_irq_chip_data(data); | 367 | cd.p = irq_data_get_irq_chip_data(data); |
368 | 368 | ||
369 | for_each_online_cpu(cpu) { | 369 | for_each_online_cpu(cpu) { |
370 | int coreid = octeon_coreid_for_cpu(cpu); | 370 | int coreid = octeon_coreid_for_cpu(cpu); |
371 | lock = &per_cpu(octeon_irq_ciu_spinlock, cpu); | 371 | lock = &per_cpu(octeon_irq_ciu_spinlock, cpu); |
372 | if (cd.s.line == 0) | 372 | if (cd.s.line == 0) |
373 | pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu); | 373 | pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu); |
374 | else | 374 | else |
375 | pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu); | 375 | pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu); |
376 | 376 | ||
377 | raw_spin_lock_irqsave(lock, flags); | 377 | raw_spin_lock_irqsave(lock, flags); |
378 | __set_bit(cd.s.bit, pen); | 378 | __set_bit(cd.s.bit, pen); |
379 | /* | 379 | /* |
380 | * Must be visible to octeon_irq_ip{2,3}_ciu() before | 380 | * Must be visible to octeon_irq_ip{2,3}_ciu() before |
381 | * enabling the irq. | 381 | * enabling the irq. |
382 | */ | 382 | */ |
383 | wmb(); | 383 | wmb(); |
384 | if (cd.s.line == 0) | 384 | if (cd.s.line == 0) |
385 | cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), *pen); | 385 | cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), *pen); |
386 | else | 386 | else |
387 | cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), *pen); | 387 | cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), *pen); |
388 | raw_spin_unlock_irqrestore(lock, flags); | 388 | raw_spin_unlock_irqrestore(lock, flags); |
389 | } | 389 | } |
390 | } | 390 | } |
391 | 391 | ||
392 | /* | 392 | /* |
393 | * Enable the irq on the next core in the affinity set for chips that | 393 | * Enable the irq on the next core in the affinity set for chips that |
394 | * have the EN*_W1{S,C} registers. | 394 | * have the EN*_W1{S,C} registers. |
395 | */ | 395 | */ |
396 | static void octeon_irq_ciu_enable_v2(struct irq_data *data) | 396 | static void octeon_irq_ciu_enable_v2(struct irq_data *data) |
397 | { | 397 | { |
398 | u64 mask; | 398 | u64 mask; |
399 | int cpu = next_cpu_for_irq(data); | 399 | int cpu = next_cpu_for_irq(data); |
400 | union octeon_ciu_chip_data cd; | 400 | union octeon_ciu_chip_data cd; |
401 | 401 | ||
402 | cd.p = irq_data_get_irq_chip_data(data); | 402 | cd.p = irq_data_get_irq_chip_data(data); |
403 | mask = 1ull << (cd.s.bit); | 403 | mask = 1ull << (cd.s.bit); |
404 | 404 | ||
405 | /* | 405 | /* |
406 | * Called under the desc lock, so these should never get out | 406 | * Called under the desc lock, so these should never get out |
407 | * of sync. | 407 | * of sync. |
408 | */ | 408 | */ |
409 | if (cd.s.line == 0) { | 409 | if (cd.s.line == 0) { |
410 | int index = octeon_coreid_for_cpu(cpu) * 2; | 410 | int index = octeon_coreid_for_cpu(cpu) * 2; |
411 | set_bit(cd.s.bit, &per_cpu(octeon_irq_ciu0_en_mirror, cpu)); | 411 | set_bit(cd.s.bit, &per_cpu(octeon_irq_ciu0_en_mirror, cpu)); |
412 | cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask); | 412 | cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask); |
413 | } else { | 413 | } else { |
414 | int index = octeon_coreid_for_cpu(cpu) * 2 + 1; | 414 | int index = octeon_coreid_for_cpu(cpu) * 2 + 1; |
415 | set_bit(cd.s.bit, &per_cpu(octeon_irq_ciu1_en_mirror, cpu)); | 415 | set_bit(cd.s.bit, &per_cpu(octeon_irq_ciu1_en_mirror, cpu)); |
416 | cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask); | 416 | cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask); |
417 | } | 417 | } |
418 | } | 418 | } |
419 | 419 | ||
420 | /* | 420 | /* |
421 | * Enable the irq on the current CPU for chips that | 421 | * Enable the irq on the current CPU for chips that |
422 | * have the EN*_W1{S,C} registers. | 422 | * have the EN*_W1{S,C} registers. |
423 | */ | 423 | */ |
424 | static void octeon_irq_ciu_enable_local_v2(struct irq_data *data) | 424 | static void octeon_irq_ciu_enable_local_v2(struct irq_data *data) |
425 | { | 425 | { |
426 | u64 mask; | 426 | u64 mask; |
427 | union octeon_ciu_chip_data cd; | 427 | union octeon_ciu_chip_data cd; |
428 | 428 | ||
429 | cd.p = irq_data_get_irq_chip_data(data); | 429 | cd.p = irq_data_get_irq_chip_data(data); |
430 | mask = 1ull << (cd.s.bit); | 430 | mask = 1ull << (cd.s.bit); |
431 | 431 | ||
432 | if (cd.s.line == 0) { | 432 | if (cd.s.line == 0) { |
433 | int index = cvmx_get_core_num() * 2; | 433 | int index = cvmx_get_core_num() * 2; |
434 | set_bit(cd.s.bit, &__get_cpu_var(octeon_irq_ciu0_en_mirror)); | 434 | set_bit(cd.s.bit, &__get_cpu_var(octeon_irq_ciu0_en_mirror)); |
435 | cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask); | 435 | cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask); |
436 | } else { | 436 | } else { |
437 | int index = cvmx_get_core_num() * 2 + 1; | 437 | int index = cvmx_get_core_num() * 2 + 1; |
438 | set_bit(cd.s.bit, &__get_cpu_var(octeon_irq_ciu1_en_mirror)); | 438 | set_bit(cd.s.bit, &__get_cpu_var(octeon_irq_ciu1_en_mirror)); |
439 | cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask); | 439 | cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask); |
440 | } | 440 | } |
441 | } | 441 | } |
442 | 442 | ||
443 | static void octeon_irq_ciu_disable_local_v2(struct irq_data *data) | 443 | static void octeon_irq_ciu_disable_local_v2(struct irq_data *data) |
444 | { | 444 | { |
445 | u64 mask; | 445 | u64 mask; |
446 | union octeon_ciu_chip_data cd; | 446 | union octeon_ciu_chip_data cd; |
447 | 447 | ||
448 | cd.p = irq_data_get_irq_chip_data(data); | 448 | cd.p = irq_data_get_irq_chip_data(data); |
449 | mask = 1ull << (cd.s.bit); | 449 | mask = 1ull << (cd.s.bit); |
450 | 450 | ||
451 | if (cd.s.line == 0) { | 451 | if (cd.s.line == 0) { |
452 | int index = cvmx_get_core_num() * 2; | 452 | int index = cvmx_get_core_num() * 2; |
453 | clear_bit(cd.s.bit, &__get_cpu_var(octeon_irq_ciu0_en_mirror)); | 453 | clear_bit(cd.s.bit, &__get_cpu_var(octeon_irq_ciu0_en_mirror)); |
454 | cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index), mask); | 454 | cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index), mask); |
455 | } else { | 455 | } else { |
456 | int index = cvmx_get_core_num() * 2 + 1; | 456 | int index = cvmx_get_core_num() * 2 + 1; |
457 | clear_bit(cd.s.bit, &__get_cpu_var(octeon_irq_ciu1_en_mirror)); | 457 | clear_bit(cd.s.bit, &__get_cpu_var(octeon_irq_ciu1_en_mirror)); |
458 | cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index), mask); | 458 | cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index), mask); |
459 | } | 459 | } |
460 | } | 460 | } |
461 | 461 | ||
462 | /* | 462 | /* |
463 | * Write to the W1C bit in CVMX_CIU_INTX_SUM0 to clear the irq. | 463 | * Write to the W1C bit in CVMX_CIU_INTX_SUM0 to clear the irq. |
464 | */ | 464 | */ |
465 | static void octeon_irq_ciu_ack(struct irq_data *data) | 465 | static void octeon_irq_ciu_ack(struct irq_data *data) |
466 | { | 466 | { |
467 | u64 mask; | 467 | u64 mask; |
468 | union octeon_ciu_chip_data cd; | 468 | union octeon_ciu_chip_data cd; |
469 | 469 | ||
470 | cd.p = irq_data_get_irq_chip_data(data); | 470 | cd.p = irq_data_get_irq_chip_data(data); |
471 | mask = 1ull << (cd.s.bit); | 471 | mask = 1ull << (cd.s.bit); |
472 | 472 | ||
473 | if (cd.s.line == 0) { | 473 | if (cd.s.line == 0) { |
474 | int index = cvmx_get_core_num() * 2; | 474 | int index = cvmx_get_core_num() * 2; |
475 | cvmx_write_csr(CVMX_CIU_INTX_SUM0(index), mask); | 475 | cvmx_write_csr(CVMX_CIU_INTX_SUM0(index), mask); |
476 | } else { | 476 | } else { |
477 | cvmx_write_csr(CVMX_CIU_INT_SUM1, mask); | 477 | cvmx_write_csr(CVMX_CIU_INT_SUM1, mask); |
478 | } | 478 | } |
479 | } | 479 | } |
480 | 480 | ||
481 | /* | 481 | /* |
482 | * Disable the irq on the all cores for chips that have the EN*_W1{S,C} | 482 | * Disable the irq on the all cores for chips that have the EN*_W1{S,C} |
483 | * registers. | 483 | * registers. |
484 | */ | 484 | */ |
485 | static void octeon_irq_ciu_disable_all_v2(struct irq_data *data) | 485 | static void octeon_irq_ciu_disable_all_v2(struct irq_data *data) |
486 | { | 486 | { |
487 | int cpu; | 487 | int cpu; |
488 | u64 mask; | 488 | u64 mask; |
489 | union octeon_ciu_chip_data cd; | 489 | union octeon_ciu_chip_data cd; |
490 | 490 | ||
491 | cd.p = irq_data_get_irq_chip_data(data); | 491 | cd.p = irq_data_get_irq_chip_data(data); |
492 | mask = 1ull << (cd.s.bit); | 492 | mask = 1ull << (cd.s.bit); |
493 | 493 | ||
494 | if (cd.s.line == 0) { | 494 | if (cd.s.line == 0) { |
495 | for_each_online_cpu(cpu) { | 495 | for_each_online_cpu(cpu) { |
496 | int index = octeon_coreid_for_cpu(cpu) * 2; | 496 | int index = octeon_coreid_for_cpu(cpu) * 2; |
497 | clear_bit(cd.s.bit, &per_cpu(octeon_irq_ciu0_en_mirror, cpu)); | 497 | clear_bit(cd.s.bit, &per_cpu(octeon_irq_ciu0_en_mirror, cpu)); |
498 | cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index), mask); | 498 | cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index), mask); |
499 | } | 499 | } |
500 | } else { | 500 | } else { |
501 | for_each_online_cpu(cpu) { | 501 | for_each_online_cpu(cpu) { |
502 | int index = octeon_coreid_for_cpu(cpu) * 2 + 1; | 502 | int index = octeon_coreid_for_cpu(cpu) * 2 + 1; |
503 | clear_bit(cd.s.bit, &per_cpu(octeon_irq_ciu1_en_mirror, cpu)); | 503 | clear_bit(cd.s.bit, &per_cpu(octeon_irq_ciu1_en_mirror, cpu)); |
504 | cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index), mask); | 504 | cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index), mask); |
505 | } | 505 | } |
506 | } | 506 | } |
507 | } | 507 | } |
508 | 508 | ||
509 | /* | 509 | /* |
510 | * Enable the irq on the all cores for chips that have the EN*_W1{S,C} | 510 | * Enable the irq on the all cores for chips that have the EN*_W1{S,C} |
511 | * registers. | 511 | * registers. |
512 | */ | 512 | */ |
513 | static void octeon_irq_ciu_enable_all_v2(struct irq_data *data) | 513 | static void octeon_irq_ciu_enable_all_v2(struct irq_data *data) |
514 | { | 514 | { |
515 | int cpu; | 515 | int cpu; |
516 | u64 mask; | 516 | u64 mask; |
517 | union octeon_ciu_chip_data cd; | 517 | union octeon_ciu_chip_data cd; |
518 | 518 | ||
519 | cd.p = irq_data_get_irq_chip_data(data); | 519 | cd.p = irq_data_get_irq_chip_data(data); |
520 | mask = 1ull << (cd.s.bit); | 520 | mask = 1ull << (cd.s.bit); |
521 | 521 | ||
522 | if (cd.s.line == 0) { | 522 | if (cd.s.line == 0) { |
523 | for_each_online_cpu(cpu) { | 523 | for_each_online_cpu(cpu) { |
524 | int index = octeon_coreid_for_cpu(cpu) * 2; | 524 | int index = octeon_coreid_for_cpu(cpu) * 2; |
525 | set_bit(cd.s.bit, &per_cpu(octeon_irq_ciu0_en_mirror, cpu)); | 525 | set_bit(cd.s.bit, &per_cpu(octeon_irq_ciu0_en_mirror, cpu)); |
526 | cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask); | 526 | cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask); |
527 | } | 527 | } |
528 | } else { | 528 | } else { |
529 | for_each_online_cpu(cpu) { | 529 | for_each_online_cpu(cpu) { |
530 | int index = octeon_coreid_for_cpu(cpu) * 2 + 1; | 530 | int index = octeon_coreid_for_cpu(cpu) * 2 + 1; |
531 | set_bit(cd.s.bit, &per_cpu(octeon_irq_ciu1_en_mirror, cpu)); | 531 | set_bit(cd.s.bit, &per_cpu(octeon_irq_ciu1_en_mirror, cpu)); |
532 | cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask); | 532 | cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask); |
533 | } | 533 | } |
534 | } | 534 | } |
535 | } | 535 | } |
536 | 536 | ||
537 | static void octeon_irq_gpio_setup(struct irq_data *data) | 537 | static void octeon_irq_gpio_setup(struct irq_data *data) |
538 | { | 538 | { |
539 | union cvmx_gpio_bit_cfgx cfg; | 539 | union cvmx_gpio_bit_cfgx cfg; |
540 | union octeon_ciu_chip_data cd; | 540 | union octeon_ciu_chip_data cd; |
541 | u32 t = irqd_get_trigger_type(data); | 541 | u32 t = irqd_get_trigger_type(data); |
542 | 542 | ||
543 | cd.p = irq_data_get_irq_chip_data(data); | 543 | cd.p = irq_data_get_irq_chip_data(data); |
544 | 544 | ||
545 | cfg.u64 = 0; | 545 | cfg.u64 = 0; |
546 | cfg.s.int_en = 1; | 546 | cfg.s.int_en = 1; |
547 | cfg.s.int_type = (t & IRQ_TYPE_EDGE_BOTH) != 0; | 547 | cfg.s.int_type = (t & IRQ_TYPE_EDGE_BOTH) != 0; |
548 | cfg.s.rx_xor = (t & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_EDGE_FALLING)) != 0; | 548 | cfg.s.rx_xor = (t & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_EDGE_FALLING)) != 0; |
549 | 549 | ||
550 | /* 140 nS glitch filter*/ | 550 | /* 140 nS glitch filter*/ |
551 | cfg.s.fil_cnt = 7; | 551 | cfg.s.fil_cnt = 7; |
552 | cfg.s.fil_sel = 3; | 552 | cfg.s.fil_sel = 3; |
553 | 553 | ||
554 | cvmx_write_csr(CVMX_GPIO_BIT_CFGX(cd.s.gpio_line), cfg.u64); | 554 | cvmx_write_csr(CVMX_GPIO_BIT_CFGX(cd.s.gpio_line), cfg.u64); |
555 | } | 555 | } |
556 | 556 | ||
557 | static void octeon_irq_ciu_enable_gpio_v2(struct irq_data *data) | 557 | static void octeon_irq_ciu_enable_gpio_v2(struct irq_data *data) |
558 | { | 558 | { |
559 | octeon_irq_gpio_setup(data); | 559 | octeon_irq_gpio_setup(data); |
560 | octeon_irq_ciu_enable_v2(data); | 560 | octeon_irq_ciu_enable_v2(data); |
561 | } | 561 | } |
562 | 562 | ||
563 | static void octeon_irq_ciu_enable_gpio(struct irq_data *data) | 563 | static void octeon_irq_ciu_enable_gpio(struct irq_data *data) |
564 | { | 564 | { |
565 | octeon_irq_gpio_setup(data); | 565 | octeon_irq_gpio_setup(data); |
566 | octeon_irq_ciu_enable(data); | 566 | octeon_irq_ciu_enable(data); |
567 | } | 567 | } |
568 | 568 | ||
569 | static int octeon_irq_ciu_gpio_set_type(struct irq_data *data, unsigned int t) | 569 | static int octeon_irq_ciu_gpio_set_type(struct irq_data *data, unsigned int t) |
570 | { | 570 | { |
571 | irqd_set_trigger_type(data, t); | 571 | irqd_set_trigger_type(data, t); |
572 | octeon_irq_gpio_setup(data); | 572 | octeon_irq_gpio_setup(data); |
573 | 573 | ||
574 | return IRQ_SET_MASK_OK; | 574 | return IRQ_SET_MASK_OK; |
575 | } | 575 | } |
576 | 576 | ||
577 | static void octeon_irq_ciu_disable_gpio_v2(struct irq_data *data) | 577 | static void octeon_irq_ciu_disable_gpio_v2(struct irq_data *data) |
578 | { | 578 | { |
579 | union octeon_ciu_chip_data cd; | 579 | union octeon_ciu_chip_data cd; |
580 | 580 | ||
581 | cd.p = irq_data_get_irq_chip_data(data); | 581 | cd.p = irq_data_get_irq_chip_data(data); |
582 | cvmx_write_csr(CVMX_GPIO_BIT_CFGX(cd.s.gpio_line), 0); | 582 | cvmx_write_csr(CVMX_GPIO_BIT_CFGX(cd.s.gpio_line), 0); |
583 | 583 | ||
584 | octeon_irq_ciu_disable_all_v2(data); | 584 | octeon_irq_ciu_disable_all_v2(data); |
585 | } | 585 | } |
586 | 586 | ||
587 | static void octeon_irq_ciu_disable_gpio(struct irq_data *data) | 587 | static void octeon_irq_ciu_disable_gpio(struct irq_data *data) |
588 | { | 588 | { |
589 | union octeon_ciu_chip_data cd; | 589 | union octeon_ciu_chip_data cd; |
590 | 590 | ||
591 | cd.p = irq_data_get_irq_chip_data(data); | 591 | cd.p = irq_data_get_irq_chip_data(data); |
592 | cvmx_write_csr(CVMX_GPIO_BIT_CFGX(cd.s.gpio_line), 0); | 592 | cvmx_write_csr(CVMX_GPIO_BIT_CFGX(cd.s.gpio_line), 0); |
593 | 593 | ||
594 | octeon_irq_ciu_disable_all(data); | 594 | octeon_irq_ciu_disable_all(data); |
595 | } | 595 | } |
596 | 596 | ||
597 | static void octeon_irq_ciu_gpio_ack(struct irq_data *data) | 597 | static void octeon_irq_ciu_gpio_ack(struct irq_data *data) |
598 | { | 598 | { |
599 | union octeon_ciu_chip_data cd; | 599 | union octeon_ciu_chip_data cd; |
600 | u64 mask; | 600 | u64 mask; |
601 | 601 | ||
602 | cd.p = irq_data_get_irq_chip_data(data); | 602 | cd.p = irq_data_get_irq_chip_data(data); |
603 | mask = 1ull << (cd.s.gpio_line); | 603 | mask = 1ull << (cd.s.gpio_line); |
604 | 604 | ||
605 | cvmx_write_csr(CVMX_GPIO_INT_CLR, mask); | 605 | cvmx_write_csr(CVMX_GPIO_INT_CLR, mask); |
606 | } | 606 | } |
607 | 607 | ||
608 | static void octeon_irq_handle_gpio(unsigned int irq, struct irq_desc *desc) | 608 | static void octeon_irq_handle_gpio(unsigned int irq, struct irq_desc *desc) |
609 | { | 609 | { |
610 | if (irq_get_trigger_type(irq) & IRQ_TYPE_EDGE_BOTH) | 610 | if (irq_get_trigger_type(irq) & IRQ_TYPE_EDGE_BOTH) |
611 | handle_edge_irq(irq, desc); | 611 | handle_edge_irq(irq, desc); |
612 | else | 612 | else |
613 | handle_level_irq(irq, desc); | 613 | handle_level_irq(irq, desc); |
614 | } | 614 | } |
615 | 615 | ||
616 | #ifdef CONFIG_SMP | 616 | #ifdef CONFIG_SMP |
617 | 617 | ||
618 | static void octeon_irq_cpu_offline_ciu(struct irq_data *data) | 618 | static void octeon_irq_cpu_offline_ciu(struct irq_data *data) |
619 | { | 619 | { |
620 | int cpu = smp_processor_id(); | 620 | int cpu = smp_processor_id(); |
621 | cpumask_t new_affinity; | 621 | cpumask_t new_affinity; |
622 | 622 | ||
623 | if (!cpumask_test_cpu(cpu, data->affinity)) | 623 | if (!cpumask_test_cpu(cpu, data->affinity)) |
624 | return; | 624 | return; |
625 | 625 | ||
626 | if (cpumask_weight(data->affinity) > 1) { | 626 | if (cpumask_weight(data->affinity) > 1) { |
627 | /* | 627 | /* |
628 | * It has multi CPU affinity, just remove this CPU | 628 | * It has multi CPU affinity, just remove this CPU |
629 | * from the affinity set. | 629 | * from the affinity set. |
630 | */ | 630 | */ |
631 | cpumask_copy(&new_affinity, data->affinity); | 631 | cpumask_copy(&new_affinity, data->affinity); |
632 | cpumask_clear_cpu(cpu, &new_affinity); | 632 | cpumask_clear_cpu(cpu, &new_affinity); |
633 | } else { | 633 | } else { |
634 | /* Otherwise, put it on lowest numbered online CPU. */ | 634 | /* Otherwise, put it on lowest numbered online CPU. */ |
635 | cpumask_clear(&new_affinity); | 635 | cpumask_clear(&new_affinity); |
636 | cpumask_set_cpu(cpumask_first(cpu_online_mask), &new_affinity); | 636 | cpumask_set_cpu(cpumask_first(cpu_online_mask), &new_affinity); |
637 | } | 637 | } |
638 | __irq_set_affinity_locked(data, &new_affinity); | 638 | irq_set_affinity_locked(data, &new_affinity, false); |
639 | } | 639 | } |
640 | 640 | ||
641 | static int octeon_irq_ciu_set_affinity(struct irq_data *data, | 641 | static int octeon_irq_ciu_set_affinity(struct irq_data *data, |
642 | const struct cpumask *dest, bool force) | 642 | const struct cpumask *dest, bool force) |
643 | { | 643 | { |
644 | int cpu; | 644 | int cpu; |
645 | bool enable_one = !irqd_irq_disabled(data) && !irqd_irq_masked(data); | 645 | bool enable_one = !irqd_irq_disabled(data) && !irqd_irq_masked(data); |
646 | unsigned long flags; | 646 | unsigned long flags; |
647 | union octeon_ciu_chip_data cd; | 647 | union octeon_ciu_chip_data cd; |
648 | unsigned long *pen; | 648 | unsigned long *pen; |
649 | raw_spinlock_t *lock; | 649 | raw_spinlock_t *lock; |
650 | 650 | ||
651 | cd.p = irq_data_get_irq_chip_data(data); | 651 | cd.p = irq_data_get_irq_chip_data(data); |
652 | 652 | ||
653 | /* | 653 | /* |
654 | * For non-v2 CIU, we will allow only single CPU affinity. | 654 | * For non-v2 CIU, we will allow only single CPU affinity. |
655 | * This removes the need to do locking in the .ack/.eoi | 655 | * This removes the need to do locking in the .ack/.eoi |
656 | * functions. | 656 | * functions. |
657 | */ | 657 | */ |
658 | if (cpumask_weight(dest) != 1) | 658 | if (cpumask_weight(dest) != 1) |
659 | return -EINVAL; | 659 | return -EINVAL; |
660 | 660 | ||
661 | if (!enable_one) | 661 | if (!enable_one) |
662 | return 0; | 662 | return 0; |
663 | 663 | ||
664 | 664 | ||
665 | for_each_online_cpu(cpu) { | 665 | for_each_online_cpu(cpu) { |
666 | int coreid = octeon_coreid_for_cpu(cpu); | 666 | int coreid = octeon_coreid_for_cpu(cpu); |
667 | 667 | ||
668 | lock = &per_cpu(octeon_irq_ciu_spinlock, cpu); | 668 | lock = &per_cpu(octeon_irq_ciu_spinlock, cpu); |
669 | raw_spin_lock_irqsave(lock, flags); | 669 | raw_spin_lock_irqsave(lock, flags); |
670 | 670 | ||
671 | if (cd.s.line == 0) | 671 | if (cd.s.line == 0) |
672 | pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu); | 672 | pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu); |
673 | else | 673 | else |
674 | pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu); | 674 | pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu); |
675 | 675 | ||
676 | if (cpumask_test_cpu(cpu, dest) && enable_one) { | 676 | if (cpumask_test_cpu(cpu, dest) && enable_one) { |
677 | enable_one = 0; | 677 | enable_one = 0; |
678 | __set_bit(cd.s.bit, pen); | 678 | __set_bit(cd.s.bit, pen); |
679 | } else { | 679 | } else { |
680 | __clear_bit(cd.s.bit, pen); | 680 | __clear_bit(cd.s.bit, pen); |
681 | } | 681 | } |
682 | /* | 682 | /* |
683 | * Must be visible to octeon_irq_ip{2,3}_ciu() before | 683 | * Must be visible to octeon_irq_ip{2,3}_ciu() before |
684 | * enabling the irq. | 684 | * enabling the irq. |
685 | */ | 685 | */ |
686 | wmb(); | 686 | wmb(); |
687 | 687 | ||
688 | if (cd.s.line == 0) | 688 | if (cd.s.line == 0) |
689 | cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), *pen); | 689 | cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), *pen); |
690 | else | 690 | else |
691 | cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), *pen); | 691 | cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), *pen); |
692 | 692 | ||
693 | raw_spin_unlock_irqrestore(lock, flags); | 693 | raw_spin_unlock_irqrestore(lock, flags); |
694 | } | 694 | } |
695 | return 0; | 695 | return 0; |
696 | } | 696 | } |
697 | 697 | ||
698 | /* | 698 | /* |
699 | * Set affinity for the irq for chips that have the EN*_W1{S,C} | 699 | * Set affinity for the irq for chips that have the EN*_W1{S,C} |
700 | * registers. | 700 | * registers. |
701 | */ | 701 | */ |
702 | static int octeon_irq_ciu_set_affinity_v2(struct irq_data *data, | 702 | static int octeon_irq_ciu_set_affinity_v2(struct irq_data *data, |
703 | const struct cpumask *dest, | 703 | const struct cpumask *dest, |
704 | bool force) | 704 | bool force) |
705 | { | 705 | { |
706 | int cpu; | 706 | int cpu; |
707 | bool enable_one = !irqd_irq_disabled(data) && !irqd_irq_masked(data); | 707 | bool enable_one = !irqd_irq_disabled(data) && !irqd_irq_masked(data); |
708 | u64 mask; | 708 | u64 mask; |
709 | union octeon_ciu_chip_data cd; | 709 | union octeon_ciu_chip_data cd; |
710 | 710 | ||
711 | if (!enable_one) | 711 | if (!enable_one) |
712 | return 0; | 712 | return 0; |
713 | 713 | ||
714 | cd.p = irq_data_get_irq_chip_data(data); | 714 | cd.p = irq_data_get_irq_chip_data(data); |
715 | mask = 1ull << cd.s.bit; | 715 | mask = 1ull << cd.s.bit; |
716 | 716 | ||
717 | if (cd.s.line == 0) { | 717 | if (cd.s.line == 0) { |
718 | for_each_online_cpu(cpu) { | 718 | for_each_online_cpu(cpu) { |
719 | unsigned long *pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu); | 719 | unsigned long *pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu); |
720 | int index = octeon_coreid_for_cpu(cpu) * 2; | 720 | int index = octeon_coreid_for_cpu(cpu) * 2; |
721 | if (cpumask_test_cpu(cpu, dest) && enable_one) { | 721 | if (cpumask_test_cpu(cpu, dest) && enable_one) { |
722 | enable_one = false; | 722 | enable_one = false; |
723 | set_bit(cd.s.bit, pen); | 723 | set_bit(cd.s.bit, pen); |
724 | cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask); | 724 | cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask); |
725 | } else { | 725 | } else { |
726 | clear_bit(cd.s.bit, pen); | 726 | clear_bit(cd.s.bit, pen); |
727 | cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index), mask); | 727 | cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index), mask); |
728 | } | 728 | } |
729 | } | 729 | } |
730 | } else { | 730 | } else { |
731 | for_each_online_cpu(cpu) { | 731 | for_each_online_cpu(cpu) { |
732 | unsigned long *pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu); | 732 | unsigned long *pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu); |
733 | int index = octeon_coreid_for_cpu(cpu) * 2 + 1; | 733 | int index = octeon_coreid_for_cpu(cpu) * 2 + 1; |
734 | if (cpumask_test_cpu(cpu, dest) && enable_one) { | 734 | if (cpumask_test_cpu(cpu, dest) && enable_one) { |
735 | enable_one = false; | 735 | enable_one = false; |
736 | set_bit(cd.s.bit, pen); | 736 | set_bit(cd.s.bit, pen); |
737 | cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask); | 737 | cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask); |
738 | } else { | 738 | } else { |
739 | clear_bit(cd.s.bit, pen); | 739 | clear_bit(cd.s.bit, pen); |
740 | cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index), mask); | 740 | cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index), mask); |
741 | } | 741 | } |
742 | } | 742 | } |
743 | } | 743 | } |
744 | return 0; | 744 | return 0; |
745 | } | 745 | } |
746 | #endif | 746 | #endif |
747 | 747 | ||
748 | /* | 748 | /* |
749 | * Newer octeon chips have support for lockless CIU operation. | 749 | * Newer octeon chips have support for lockless CIU operation. |
750 | */ | 750 | */ |
751 | static struct irq_chip octeon_irq_chip_ciu_v2 = { | 751 | static struct irq_chip octeon_irq_chip_ciu_v2 = { |
752 | .name = "CIU", | 752 | .name = "CIU", |
753 | .irq_enable = octeon_irq_ciu_enable_v2, | 753 | .irq_enable = octeon_irq_ciu_enable_v2, |
754 | .irq_disable = octeon_irq_ciu_disable_all_v2, | 754 | .irq_disable = octeon_irq_ciu_disable_all_v2, |
755 | .irq_ack = octeon_irq_ciu_ack, | 755 | .irq_ack = octeon_irq_ciu_ack, |
756 | .irq_mask = octeon_irq_ciu_disable_local_v2, | 756 | .irq_mask = octeon_irq_ciu_disable_local_v2, |
757 | .irq_unmask = octeon_irq_ciu_enable_v2, | 757 | .irq_unmask = octeon_irq_ciu_enable_v2, |
758 | #ifdef CONFIG_SMP | 758 | #ifdef CONFIG_SMP |
759 | .irq_set_affinity = octeon_irq_ciu_set_affinity_v2, | 759 | .irq_set_affinity = octeon_irq_ciu_set_affinity_v2, |
760 | .irq_cpu_offline = octeon_irq_cpu_offline_ciu, | 760 | .irq_cpu_offline = octeon_irq_cpu_offline_ciu, |
761 | #endif | 761 | #endif |
762 | }; | 762 | }; |
763 | 763 | ||
764 | static struct irq_chip octeon_irq_chip_ciu = { | 764 | static struct irq_chip octeon_irq_chip_ciu = { |
765 | .name = "CIU", | 765 | .name = "CIU", |
766 | .irq_enable = octeon_irq_ciu_enable, | 766 | .irq_enable = octeon_irq_ciu_enable, |
767 | .irq_disable = octeon_irq_ciu_disable_all, | 767 | .irq_disable = octeon_irq_ciu_disable_all, |
768 | .irq_ack = octeon_irq_ciu_ack, | 768 | .irq_ack = octeon_irq_ciu_ack, |
769 | .irq_mask = octeon_irq_ciu_disable_local, | 769 | .irq_mask = octeon_irq_ciu_disable_local, |
770 | .irq_unmask = octeon_irq_ciu_enable, | 770 | .irq_unmask = octeon_irq_ciu_enable, |
771 | #ifdef CONFIG_SMP | 771 | #ifdef CONFIG_SMP |
772 | .irq_set_affinity = octeon_irq_ciu_set_affinity, | 772 | .irq_set_affinity = octeon_irq_ciu_set_affinity, |
773 | .irq_cpu_offline = octeon_irq_cpu_offline_ciu, | 773 | .irq_cpu_offline = octeon_irq_cpu_offline_ciu, |
774 | #endif | 774 | #endif |
775 | }; | 775 | }; |
776 | 776 | ||
777 | /* The mbox versions don't do any affinity or round-robin. */ | 777 | /* The mbox versions don't do any affinity or round-robin. */ |
778 | static struct irq_chip octeon_irq_chip_ciu_mbox_v2 = { | 778 | static struct irq_chip octeon_irq_chip_ciu_mbox_v2 = { |
779 | .name = "CIU-M", | 779 | .name = "CIU-M", |
780 | .irq_enable = octeon_irq_ciu_enable_all_v2, | 780 | .irq_enable = octeon_irq_ciu_enable_all_v2, |
781 | .irq_disable = octeon_irq_ciu_disable_all_v2, | 781 | .irq_disable = octeon_irq_ciu_disable_all_v2, |
782 | .irq_ack = octeon_irq_ciu_disable_local_v2, | 782 | .irq_ack = octeon_irq_ciu_disable_local_v2, |
783 | .irq_eoi = octeon_irq_ciu_enable_local_v2, | 783 | .irq_eoi = octeon_irq_ciu_enable_local_v2, |
784 | 784 | ||
785 | .irq_cpu_online = octeon_irq_ciu_enable_local_v2, | 785 | .irq_cpu_online = octeon_irq_ciu_enable_local_v2, |
786 | .irq_cpu_offline = octeon_irq_ciu_disable_local_v2, | 786 | .irq_cpu_offline = octeon_irq_ciu_disable_local_v2, |
787 | .flags = IRQCHIP_ONOFFLINE_ENABLED, | 787 | .flags = IRQCHIP_ONOFFLINE_ENABLED, |
788 | }; | 788 | }; |
789 | 789 | ||
790 | static struct irq_chip octeon_irq_chip_ciu_mbox = { | 790 | static struct irq_chip octeon_irq_chip_ciu_mbox = { |
791 | .name = "CIU-M", | 791 | .name = "CIU-M", |
792 | .irq_enable = octeon_irq_ciu_enable_all, | 792 | .irq_enable = octeon_irq_ciu_enable_all, |
793 | .irq_disable = octeon_irq_ciu_disable_all, | 793 | .irq_disable = octeon_irq_ciu_disable_all, |
794 | .irq_ack = octeon_irq_ciu_disable_local, | 794 | .irq_ack = octeon_irq_ciu_disable_local, |
795 | .irq_eoi = octeon_irq_ciu_enable_local, | 795 | .irq_eoi = octeon_irq_ciu_enable_local, |
796 | 796 | ||
797 | .irq_cpu_online = octeon_irq_ciu_enable_local, | 797 | .irq_cpu_online = octeon_irq_ciu_enable_local, |
798 | .irq_cpu_offline = octeon_irq_ciu_disable_local, | 798 | .irq_cpu_offline = octeon_irq_ciu_disable_local, |
799 | .flags = IRQCHIP_ONOFFLINE_ENABLED, | 799 | .flags = IRQCHIP_ONOFFLINE_ENABLED, |
800 | }; | 800 | }; |
801 | 801 | ||
802 | static struct irq_chip octeon_irq_chip_ciu_gpio_v2 = { | 802 | static struct irq_chip octeon_irq_chip_ciu_gpio_v2 = { |
803 | .name = "CIU-GPIO", | 803 | .name = "CIU-GPIO", |
804 | .irq_enable = octeon_irq_ciu_enable_gpio_v2, | 804 | .irq_enable = octeon_irq_ciu_enable_gpio_v2, |
805 | .irq_disable = octeon_irq_ciu_disable_gpio_v2, | 805 | .irq_disable = octeon_irq_ciu_disable_gpio_v2, |
806 | .irq_ack = octeon_irq_ciu_gpio_ack, | 806 | .irq_ack = octeon_irq_ciu_gpio_ack, |
807 | .irq_mask = octeon_irq_ciu_disable_local_v2, | 807 | .irq_mask = octeon_irq_ciu_disable_local_v2, |
808 | .irq_unmask = octeon_irq_ciu_enable_v2, | 808 | .irq_unmask = octeon_irq_ciu_enable_v2, |
809 | .irq_set_type = octeon_irq_ciu_gpio_set_type, | 809 | .irq_set_type = octeon_irq_ciu_gpio_set_type, |
810 | #ifdef CONFIG_SMP | 810 | #ifdef CONFIG_SMP |
811 | .irq_set_affinity = octeon_irq_ciu_set_affinity_v2, | 811 | .irq_set_affinity = octeon_irq_ciu_set_affinity_v2, |
812 | #endif | 812 | #endif |
813 | .flags = IRQCHIP_SET_TYPE_MASKED, | 813 | .flags = IRQCHIP_SET_TYPE_MASKED, |
814 | }; | 814 | }; |
815 | 815 | ||
816 | static struct irq_chip octeon_irq_chip_ciu_gpio = { | 816 | static struct irq_chip octeon_irq_chip_ciu_gpio = { |
817 | .name = "CIU-GPIO", | 817 | .name = "CIU-GPIO", |
818 | .irq_enable = octeon_irq_ciu_enable_gpio, | 818 | .irq_enable = octeon_irq_ciu_enable_gpio, |
819 | .irq_disable = octeon_irq_ciu_disable_gpio, | 819 | .irq_disable = octeon_irq_ciu_disable_gpio, |
820 | .irq_mask = octeon_irq_ciu_disable_local, | 820 | .irq_mask = octeon_irq_ciu_disable_local, |
821 | .irq_unmask = octeon_irq_ciu_enable, | 821 | .irq_unmask = octeon_irq_ciu_enable, |
822 | .irq_ack = octeon_irq_ciu_gpio_ack, | 822 | .irq_ack = octeon_irq_ciu_gpio_ack, |
823 | .irq_set_type = octeon_irq_ciu_gpio_set_type, | 823 | .irq_set_type = octeon_irq_ciu_gpio_set_type, |
824 | #ifdef CONFIG_SMP | 824 | #ifdef CONFIG_SMP |
825 | .irq_set_affinity = octeon_irq_ciu_set_affinity, | 825 | .irq_set_affinity = octeon_irq_ciu_set_affinity, |
826 | #endif | 826 | #endif |
827 | .flags = IRQCHIP_SET_TYPE_MASKED, | 827 | .flags = IRQCHIP_SET_TYPE_MASKED, |
828 | }; | 828 | }; |
829 | 829 | ||
830 | /* | 830 | /* |
831 | * Watchdog interrupts are special. They are associated with a single | 831 | * Watchdog interrupts are special. They are associated with a single |
832 | * core, so we hardwire the affinity to that core. | 832 | * core, so we hardwire the affinity to that core. |
833 | */ | 833 | */ |
834 | static void octeon_irq_ciu_wd_enable(struct irq_data *data) | 834 | static void octeon_irq_ciu_wd_enable(struct irq_data *data) |
835 | { | 835 | { |
836 | unsigned long flags; | 836 | unsigned long flags; |
837 | unsigned long *pen; | 837 | unsigned long *pen; |
838 | int coreid = data->irq - OCTEON_IRQ_WDOG0; /* Bit 0-63 of EN1 */ | 838 | int coreid = data->irq - OCTEON_IRQ_WDOG0; /* Bit 0-63 of EN1 */ |
839 | int cpu = octeon_cpu_for_coreid(coreid); | 839 | int cpu = octeon_cpu_for_coreid(coreid); |
840 | raw_spinlock_t *lock = &per_cpu(octeon_irq_ciu_spinlock, cpu); | 840 | raw_spinlock_t *lock = &per_cpu(octeon_irq_ciu_spinlock, cpu); |
841 | 841 | ||
842 | raw_spin_lock_irqsave(lock, flags); | 842 | raw_spin_lock_irqsave(lock, flags); |
843 | pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu); | 843 | pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu); |
844 | __set_bit(coreid, pen); | 844 | __set_bit(coreid, pen); |
845 | /* | 845 | /* |
846 | * Must be visible to octeon_irq_ip{2,3}_ciu() before enabling | 846 | * Must be visible to octeon_irq_ip{2,3}_ciu() before enabling |
847 | * the irq. | 847 | * the irq. |
848 | */ | 848 | */ |
849 | wmb(); | 849 | wmb(); |
850 | cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), *pen); | 850 | cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), *pen); |
851 | raw_spin_unlock_irqrestore(lock, flags); | 851 | raw_spin_unlock_irqrestore(lock, flags); |
852 | } | 852 | } |
853 | 853 | ||
854 | /* | 854 | /* |
855 | * Watchdog interrupts are special. They are associated with a single | 855 | * Watchdog interrupts are special. They are associated with a single |
856 | * core, so we hardwire the affinity to that core. | 856 | * core, so we hardwire the affinity to that core. |
857 | */ | 857 | */ |
858 | static void octeon_irq_ciu1_wd_enable_v2(struct irq_data *data) | 858 | static void octeon_irq_ciu1_wd_enable_v2(struct irq_data *data) |
859 | { | 859 | { |
860 | int coreid = data->irq - OCTEON_IRQ_WDOG0; | 860 | int coreid = data->irq - OCTEON_IRQ_WDOG0; |
861 | int cpu = octeon_cpu_for_coreid(coreid); | 861 | int cpu = octeon_cpu_for_coreid(coreid); |
862 | 862 | ||
863 | set_bit(coreid, &per_cpu(octeon_irq_ciu1_en_mirror, cpu)); | 863 | set_bit(coreid, &per_cpu(octeon_irq_ciu1_en_mirror, cpu)); |
864 | cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(coreid * 2 + 1), 1ull << coreid); | 864 | cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(coreid * 2 + 1), 1ull << coreid); |
865 | } | 865 | } |
866 | 866 | ||
867 | 867 | ||
868 | static struct irq_chip octeon_irq_chip_ciu_wd_v2 = { | 868 | static struct irq_chip octeon_irq_chip_ciu_wd_v2 = { |
869 | .name = "CIU-W", | 869 | .name = "CIU-W", |
870 | .irq_enable = octeon_irq_ciu1_wd_enable_v2, | 870 | .irq_enable = octeon_irq_ciu1_wd_enable_v2, |
871 | .irq_disable = octeon_irq_ciu_disable_all_v2, | 871 | .irq_disable = octeon_irq_ciu_disable_all_v2, |
872 | .irq_mask = octeon_irq_ciu_disable_local_v2, | 872 | .irq_mask = octeon_irq_ciu_disable_local_v2, |
873 | .irq_unmask = octeon_irq_ciu_enable_local_v2, | 873 | .irq_unmask = octeon_irq_ciu_enable_local_v2, |
874 | }; | 874 | }; |
875 | 875 | ||
876 | static struct irq_chip octeon_irq_chip_ciu_wd = { | 876 | static struct irq_chip octeon_irq_chip_ciu_wd = { |
877 | .name = "CIU-W", | 877 | .name = "CIU-W", |
878 | .irq_enable = octeon_irq_ciu_wd_enable, | 878 | .irq_enable = octeon_irq_ciu_wd_enable, |
879 | .irq_disable = octeon_irq_ciu_disable_all, | 879 | .irq_disable = octeon_irq_ciu_disable_all, |
880 | .irq_mask = octeon_irq_ciu_disable_local, | 880 | .irq_mask = octeon_irq_ciu_disable_local, |
881 | .irq_unmask = octeon_irq_ciu_enable_local, | 881 | .irq_unmask = octeon_irq_ciu_enable_local, |
882 | }; | 882 | }; |
883 | 883 | ||
884 | static bool octeon_irq_ciu_is_edge(unsigned int line, unsigned int bit) | 884 | static bool octeon_irq_ciu_is_edge(unsigned int line, unsigned int bit) |
885 | { | 885 | { |
886 | bool edge = false; | 886 | bool edge = false; |
887 | 887 | ||
888 | if (line == 0) | 888 | if (line == 0) |
889 | switch (bit) { | 889 | switch (bit) { |
890 | case 48 ... 49: /* GMX DRP */ | 890 | case 48 ... 49: /* GMX DRP */ |
891 | case 50: /* IPD_DRP */ | 891 | case 50: /* IPD_DRP */ |
892 | case 52 ... 55: /* Timers */ | 892 | case 52 ... 55: /* Timers */ |
893 | case 58: /* MPI */ | 893 | case 58: /* MPI */ |
894 | edge = true; | 894 | edge = true; |
895 | break; | 895 | break; |
896 | default: | 896 | default: |
897 | break; | 897 | break; |
898 | } | 898 | } |
899 | else /* line == 1 */ | 899 | else /* line == 1 */ |
900 | switch (bit) { | 900 | switch (bit) { |
901 | case 47: /* PTP */ | 901 | case 47: /* PTP */ |
902 | edge = true; | 902 | edge = true; |
903 | break; | 903 | break; |
904 | default: | 904 | default: |
905 | break; | 905 | break; |
906 | } | 906 | } |
907 | return edge; | 907 | return edge; |
908 | } | 908 | } |
909 | 909 | ||
910 | struct octeon_irq_gpio_domain_data { | 910 | struct octeon_irq_gpio_domain_data { |
911 | unsigned int base_hwirq; | 911 | unsigned int base_hwirq; |
912 | }; | 912 | }; |
913 | 913 | ||
914 | static int octeon_irq_gpio_xlat(struct irq_domain *d, | 914 | static int octeon_irq_gpio_xlat(struct irq_domain *d, |
915 | struct device_node *node, | 915 | struct device_node *node, |
916 | const u32 *intspec, | 916 | const u32 *intspec, |
917 | unsigned int intsize, | 917 | unsigned int intsize, |
918 | unsigned long *out_hwirq, | 918 | unsigned long *out_hwirq, |
919 | unsigned int *out_type) | 919 | unsigned int *out_type) |
920 | { | 920 | { |
921 | unsigned int type; | 921 | unsigned int type; |
922 | unsigned int pin; | 922 | unsigned int pin; |
923 | unsigned int trigger; | 923 | unsigned int trigger; |
924 | 924 | ||
925 | if (d->of_node != node) | 925 | if (d->of_node != node) |
926 | return -EINVAL; | 926 | return -EINVAL; |
927 | 927 | ||
928 | if (intsize < 2) | 928 | if (intsize < 2) |
929 | return -EINVAL; | 929 | return -EINVAL; |
930 | 930 | ||
931 | pin = intspec[0]; | 931 | pin = intspec[0]; |
932 | if (pin >= 16) | 932 | if (pin >= 16) |
933 | return -EINVAL; | 933 | return -EINVAL; |
934 | 934 | ||
935 | trigger = intspec[1]; | 935 | trigger = intspec[1]; |
936 | 936 | ||
937 | switch (trigger) { | 937 | switch (trigger) { |
938 | case 1: | 938 | case 1: |
939 | type = IRQ_TYPE_EDGE_RISING; | 939 | type = IRQ_TYPE_EDGE_RISING; |
940 | break; | 940 | break; |
941 | case 2: | 941 | case 2: |
942 | type = IRQ_TYPE_EDGE_FALLING; | 942 | type = IRQ_TYPE_EDGE_FALLING; |
943 | break; | 943 | break; |
944 | case 4: | 944 | case 4: |
945 | type = IRQ_TYPE_LEVEL_HIGH; | 945 | type = IRQ_TYPE_LEVEL_HIGH; |
946 | break; | 946 | break; |
947 | case 8: | 947 | case 8: |
948 | type = IRQ_TYPE_LEVEL_LOW; | 948 | type = IRQ_TYPE_LEVEL_LOW; |
949 | break; | 949 | break; |
950 | default: | 950 | default: |
951 | pr_err("Error: (%s) Invalid irq trigger specification: %x\n", | 951 | pr_err("Error: (%s) Invalid irq trigger specification: %x\n", |
952 | node->name, | 952 | node->name, |
953 | trigger); | 953 | trigger); |
954 | type = IRQ_TYPE_LEVEL_LOW; | 954 | type = IRQ_TYPE_LEVEL_LOW; |
955 | break; | 955 | break; |
956 | } | 956 | } |
957 | *out_type = type; | 957 | *out_type = type; |
958 | *out_hwirq = pin; | 958 | *out_hwirq = pin; |
959 | 959 | ||
960 | return 0; | 960 | return 0; |
961 | } | 961 | } |
962 | 962 | ||
963 | static int octeon_irq_ciu_xlat(struct irq_domain *d, | 963 | static int octeon_irq_ciu_xlat(struct irq_domain *d, |
964 | struct device_node *node, | 964 | struct device_node *node, |
965 | const u32 *intspec, | 965 | const u32 *intspec, |
966 | unsigned int intsize, | 966 | unsigned int intsize, |
967 | unsigned long *out_hwirq, | 967 | unsigned long *out_hwirq, |
968 | unsigned int *out_type) | 968 | unsigned int *out_type) |
969 | { | 969 | { |
970 | unsigned int ciu, bit; | 970 | unsigned int ciu, bit; |
971 | 971 | ||
972 | ciu = intspec[0]; | 972 | ciu = intspec[0]; |
973 | bit = intspec[1]; | 973 | bit = intspec[1]; |
974 | 974 | ||
975 | if (ciu > 1 || bit > 63) | 975 | if (ciu > 1 || bit > 63) |
976 | return -EINVAL; | 976 | return -EINVAL; |
977 | 977 | ||
978 | *out_hwirq = (ciu << 6) | bit; | 978 | *out_hwirq = (ciu << 6) | bit; |
979 | *out_type = 0; | 979 | *out_type = 0; |
980 | 980 | ||
981 | return 0; | 981 | return 0; |
982 | } | 982 | } |
983 | 983 | ||
984 | static struct irq_chip *octeon_irq_ciu_chip; | 984 | static struct irq_chip *octeon_irq_ciu_chip; |
985 | static struct irq_chip *octeon_irq_gpio_chip; | 985 | static struct irq_chip *octeon_irq_gpio_chip; |
986 | 986 | ||
987 | static bool octeon_irq_virq_in_range(unsigned int virq) | 987 | static bool octeon_irq_virq_in_range(unsigned int virq) |
988 | { | 988 | { |
989 | /* We cannot let it overflow the mapping array. */ | 989 | /* We cannot let it overflow the mapping array. */ |
990 | if (virq < (1ul << 8 * sizeof(octeon_irq_ciu_to_irq[0][0]))) | 990 | if (virq < (1ul << 8 * sizeof(octeon_irq_ciu_to_irq[0][0]))) |
991 | return true; | 991 | return true; |
992 | 992 | ||
993 | WARN_ONCE(true, "virq out of range %u.\n", virq); | 993 | WARN_ONCE(true, "virq out of range %u.\n", virq); |
994 | return false; | 994 | return false; |
995 | } | 995 | } |
996 | 996 | ||
997 | static int octeon_irq_ciu_map(struct irq_domain *d, | 997 | static int octeon_irq_ciu_map(struct irq_domain *d, |
998 | unsigned int virq, irq_hw_number_t hw) | 998 | unsigned int virq, irq_hw_number_t hw) |
999 | { | 999 | { |
1000 | unsigned int line = hw >> 6; | 1000 | unsigned int line = hw >> 6; |
1001 | unsigned int bit = hw & 63; | 1001 | unsigned int bit = hw & 63; |
1002 | 1002 | ||
1003 | if (!octeon_irq_virq_in_range(virq)) | 1003 | if (!octeon_irq_virq_in_range(virq)) |
1004 | return -EINVAL; | 1004 | return -EINVAL; |
1005 | 1005 | ||
1006 | /* Don't map irq if it is reserved for GPIO. */ | 1006 | /* Don't map irq if it is reserved for GPIO. */ |
1007 | if (line == 0 && bit >= 16 && bit <32) | 1007 | if (line == 0 && bit >= 16 && bit <32) |
1008 | return 0; | 1008 | return 0; |
1009 | 1009 | ||
1010 | if (line > 1 || octeon_irq_ciu_to_irq[line][bit] != 0) | 1010 | if (line > 1 || octeon_irq_ciu_to_irq[line][bit] != 0) |
1011 | return -EINVAL; | 1011 | return -EINVAL; |
1012 | 1012 | ||
1013 | if (octeon_irq_ciu_is_edge(line, bit)) | 1013 | if (octeon_irq_ciu_is_edge(line, bit)) |
1014 | octeon_irq_set_ciu_mapping(virq, line, bit, 0, | 1014 | octeon_irq_set_ciu_mapping(virq, line, bit, 0, |
1015 | octeon_irq_ciu_chip, | 1015 | octeon_irq_ciu_chip, |
1016 | handle_edge_irq); | 1016 | handle_edge_irq); |
1017 | else | 1017 | else |
1018 | octeon_irq_set_ciu_mapping(virq, line, bit, 0, | 1018 | octeon_irq_set_ciu_mapping(virq, line, bit, 0, |
1019 | octeon_irq_ciu_chip, | 1019 | octeon_irq_ciu_chip, |
1020 | handle_level_irq); | 1020 | handle_level_irq); |
1021 | 1021 | ||
1022 | return 0; | 1022 | return 0; |
1023 | } | 1023 | } |
1024 | 1024 | ||
1025 | static int octeon_irq_gpio_map_common(struct irq_domain *d, | 1025 | static int octeon_irq_gpio_map_common(struct irq_domain *d, |
1026 | unsigned int virq, irq_hw_number_t hw, | 1026 | unsigned int virq, irq_hw_number_t hw, |
1027 | int line_limit, struct irq_chip *chip) | 1027 | int line_limit, struct irq_chip *chip) |
1028 | { | 1028 | { |
1029 | struct octeon_irq_gpio_domain_data *gpiod = d->host_data; | 1029 | struct octeon_irq_gpio_domain_data *gpiod = d->host_data; |
1030 | unsigned int line, bit; | 1030 | unsigned int line, bit; |
1031 | 1031 | ||
1032 | if (!octeon_irq_virq_in_range(virq)) | 1032 | if (!octeon_irq_virq_in_range(virq)) |
1033 | return -EINVAL; | 1033 | return -EINVAL; |
1034 | 1034 | ||
1035 | line = (hw + gpiod->base_hwirq) >> 6; | 1035 | line = (hw + gpiod->base_hwirq) >> 6; |
1036 | bit = (hw + gpiod->base_hwirq) & 63; | 1036 | bit = (hw + gpiod->base_hwirq) & 63; |
1037 | if (line > line_limit || octeon_irq_ciu_to_irq[line][bit] != 0) | 1037 | if (line > line_limit || octeon_irq_ciu_to_irq[line][bit] != 0) |
1038 | return -EINVAL; | 1038 | return -EINVAL; |
1039 | 1039 | ||
1040 | octeon_irq_set_ciu_mapping(virq, line, bit, hw, | 1040 | octeon_irq_set_ciu_mapping(virq, line, bit, hw, |
1041 | chip, octeon_irq_handle_gpio); | 1041 | chip, octeon_irq_handle_gpio); |
1042 | return 0; | 1042 | return 0; |
1043 | } | 1043 | } |
1044 | 1044 | ||
1045 | static int octeon_irq_gpio_map(struct irq_domain *d, | 1045 | static int octeon_irq_gpio_map(struct irq_domain *d, |
1046 | unsigned int virq, irq_hw_number_t hw) | 1046 | unsigned int virq, irq_hw_number_t hw) |
1047 | { | 1047 | { |
1048 | return octeon_irq_gpio_map_common(d, virq, hw, 1, octeon_irq_gpio_chip); | 1048 | return octeon_irq_gpio_map_common(d, virq, hw, 1, octeon_irq_gpio_chip); |
1049 | } | 1049 | } |
1050 | 1050 | ||
1051 | static struct irq_domain_ops octeon_irq_domain_ciu_ops = { | 1051 | static struct irq_domain_ops octeon_irq_domain_ciu_ops = { |
1052 | .map = octeon_irq_ciu_map, | 1052 | .map = octeon_irq_ciu_map, |
1053 | .xlate = octeon_irq_ciu_xlat, | 1053 | .xlate = octeon_irq_ciu_xlat, |
1054 | }; | 1054 | }; |
1055 | 1055 | ||
1056 | static struct irq_domain_ops octeon_irq_domain_gpio_ops = { | 1056 | static struct irq_domain_ops octeon_irq_domain_gpio_ops = { |
1057 | .map = octeon_irq_gpio_map, | 1057 | .map = octeon_irq_gpio_map, |
1058 | .xlate = octeon_irq_gpio_xlat, | 1058 | .xlate = octeon_irq_gpio_xlat, |
1059 | }; | 1059 | }; |
1060 | 1060 | ||
1061 | static void octeon_irq_ip2_ciu(void) | 1061 | static void octeon_irq_ip2_ciu(void) |
1062 | { | 1062 | { |
1063 | const unsigned long core_id = cvmx_get_core_num(); | 1063 | const unsigned long core_id = cvmx_get_core_num(); |
1064 | u64 ciu_sum = cvmx_read_csr(CVMX_CIU_INTX_SUM0(core_id * 2)); | 1064 | u64 ciu_sum = cvmx_read_csr(CVMX_CIU_INTX_SUM0(core_id * 2)); |
1065 | 1065 | ||
1066 | ciu_sum &= __get_cpu_var(octeon_irq_ciu0_en_mirror); | 1066 | ciu_sum &= __get_cpu_var(octeon_irq_ciu0_en_mirror); |
1067 | if (likely(ciu_sum)) { | 1067 | if (likely(ciu_sum)) { |
1068 | int bit = fls64(ciu_sum) - 1; | 1068 | int bit = fls64(ciu_sum) - 1; |
1069 | int irq = octeon_irq_ciu_to_irq[0][bit]; | 1069 | int irq = octeon_irq_ciu_to_irq[0][bit]; |
1070 | if (likely(irq)) | 1070 | if (likely(irq)) |
1071 | do_IRQ(irq); | 1071 | do_IRQ(irq); |
1072 | else | 1072 | else |
1073 | spurious_interrupt(); | 1073 | spurious_interrupt(); |
1074 | } else { | 1074 | } else { |
1075 | spurious_interrupt(); | 1075 | spurious_interrupt(); |
1076 | } | 1076 | } |
1077 | } | 1077 | } |
1078 | 1078 | ||
1079 | static void octeon_irq_ip3_ciu(void) | 1079 | static void octeon_irq_ip3_ciu(void) |
1080 | { | 1080 | { |
1081 | u64 ciu_sum = cvmx_read_csr(CVMX_CIU_INT_SUM1); | 1081 | u64 ciu_sum = cvmx_read_csr(CVMX_CIU_INT_SUM1); |
1082 | 1082 | ||
1083 | ciu_sum &= __get_cpu_var(octeon_irq_ciu1_en_mirror); | 1083 | ciu_sum &= __get_cpu_var(octeon_irq_ciu1_en_mirror); |
1084 | if (likely(ciu_sum)) { | 1084 | if (likely(ciu_sum)) { |
1085 | int bit = fls64(ciu_sum) - 1; | 1085 | int bit = fls64(ciu_sum) - 1; |
1086 | int irq = octeon_irq_ciu_to_irq[1][bit]; | 1086 | int irq = octeon_irq_ciu_to_irq[1][bit]; |
1087 | if (likely(irq)) | 1087 | if (likely(irq)) |
1088 | do_IRQ(irq); | 1088 | do_IRQ(irq); |
1089 | else | 1089 | else |
1090 | spurious_interrupt(); | 1090 | spurious_interrupt(); |
1091 | } else { | 1091 | } else { |
1092 | spurious_interrupt(); | 1092 | spurious_interrupt(); |
1093 | } | 1093 | } |
1094 | } | 1094 | } |
1095 | 1095 | ||
1096 | static bool octeon_irq_use_ip4; | 1096 | static bool octeon_irq_use_ip4; |
1097 | 1097 | ||
1098 | static void octeon_irq_local_enable_ip4(void *arg) | 1098 | static void octeon_irq_local_enable_ip4(void *arg) |
1099 | { | 1099 | { |
1100 | set_c0_status(STATUSF_IP4); | 1100 | set_c0_status(STATUSF_IP4); |
1101 | } | 1101 | } |
1102 | 1102 | ||
1103 | static void octeon_irq_ip4_mask(void) | 1103 | static void octeon_irq_ip4_mask(void) |
1104 | { | 1104 | { |
1105 | clear_c0_status(STATUSF_IP4); | 1105 | clear_c0_status(STATUSF_IP4); |
1106 | spurious_interrupt(); | 1106 | spurious_interrupt(); |
1107 | } | 1107 | } |
1108 | 1108 | ||
1109 | static void (*octeon_irq_ip2)(void); | 1109 | static void (*octeon_irq_ip2)(void); |
1110 | static void (*octeon_irq_ip3)(void); | 1110 | static void (*octeon_irq_ip3)(void); |
1111 | static void (*octeon_irq_ip4)(void); | 1111 | static void (*octeon_irq_ip4)(void); |
1112 | 1112 | ||
1113 | void (*octeon_irq_setup_secondary)(void); | 1113 | void (*octeon_irq_setup_secondary)(void); |
1114 | 1114 | ||
1115 | void octeon_irq_set_ip4_handler(octeon_irq_ip4_handler_t h) | 1115 | void octeon_irq_set_ip4_handler(octeon_irq_ip4_handler_t h) |
1116 | { | 1116 | { |
1117 | octeon_irq_ip4 = h; | 1117 | octeon_irq_ip4 = h; |
1118 | octeon_irq_use_ip4 = true; | 1118 | octeon_irq_use_ip4 = true; |
1119 | on_each_cpu(octeon_irq_local_enable_ip4, NULL, 1); | 1119 | on_each_cpu(octeon_irq_local_enable_ip4, NULL, 1); |
1120 | } | 1120 | } |
1121 | 1121 | ||
1122 | static void octeon_irq_percpu_enable(void) | 1122 | static void octeon_irq_percpu_enable(void) |
1123 | { | 1123 | { |
1124 | irq_cpu_online(); | 1124 | irq_cpu_online(); |
1125 | } | 1125 | } |
1126 | 1126 | ||
1127 | static void octeon_irq_init_ciu_percpu(void) | 1127 | static void octeon_irq_init_ciu_percpu(void) |
1128 | { | 1128 | { |
1129 | int coreid = cvmx_get_core_num(); | 1129 | int coreid = cvmx_get_core_num(); |
1130 | 1130 | ||
1131 | 1131 | ||
1132 | __get_cpu_var(octeon_irq_ciu0_en_mirror) = 0; | 1132 | __get_cpu_var(octeon_irq_ciu0_en_mirror) = 0; |
1133 | __get_cpu_var(octeon_irq_ciu1_en_mirror) = 0; | 1133 | __get_cpu_var(octeon_irq_ciu1_en_mirror) = 0; |
1134 | wmb(); | 1134 | wmb(); |
1135 | raw_spin_lock_init(&__get_cpu_var(octeon_irq_ciu_spinlock)); | 1135 | raw_spin_lock_init(&__get_cpu_var(octeon_irq_ciu_spinlock)); |
1136 | /* | 1136 | /* |
1137 | * Disable All CIU Interrupts. The ones we need will be | 1137 | * Disable All CIU Interrupts. The ones we need will be |
1138 | * enabled later. Read the SUM register so we know the write | 1138 | * enabled later. Read the SUM register so we know the write |
1139 | * completed. | 1139 | * completed. |
1140 | */ | 1140 | */ |
1141 | cvmx_write_csr(CVMX_CIU_INTX_EN0((coreid * 2)), 0); | 1141 | cvmx_write_csr(CVMX_CIU_INTX_EN0((coreid * 2)), 0); |
1142 | cvmx_write_csr(CVMX_CIU_INTX_EN0((coreid * 2 + 1)), 0); | 1142 | cvmx_write_csr(CVMX_CIU_INTX_EN0((coreid * 2 + 1)), 0); |
1143 | cvmx_write_csr(CVMX_CIU_INTX_EN1((coreid * 2)), 0); | 1143 | cvmx_write_csr(CVMX_CIU_INTX_EN1((coreid * 2)), 0); |
1144 | cvmx_write_csr(CVMX_CIU_INTX_EN1((coreid * 2 + 1)), 0); | 1144 | cvmx_write_csr(CVMX_CIU_INTX_EN1((coreid * 2 + 1)), 0); |
1145 | cvmx_read_csr(CVMX_CIU_INTX_SUM0((coreid * 2))); | 1145 | cvmx_read_csr(CVMX_CIU_INTX_SUM0((coreid * 2))); |
1146 | } | 1146 | } |
1147 | 1147 | ||
1148 | static void octeon_irq_init_ciu2_percpu(void) | 1148 | static void octeon_irq_init_ciu2_percpu(void) |
1149 | { | 1149 | { |
1150 | u64 regx, ipx; | 1150 | u64 regx, ipx; |
1151 | int coreid = cvmx_get_core_num(); | 1151 | int coreid = cvmx_get_core_num(); |
1152 | u64 base = CVMX_CIU2_EN_PPX_IP2_WRKQ(coreid); | 1152 | u64 base = CVMX_CIU2_EN_PPX_IP2_WRKQ(coreid); |
1153 | 1153 | ||
1154 | /* | 1154 | /* |
1155 | * Disable All CIU2 Interrupts. The ones we need will be | 1155 | * Disable All CIU2 Interrupts. The ones we need will be |
1156 | * enabled later. Read the SUM register so we know the write | 1156 | * enabled later. Read the SUM register so we know the write |
1157 | * completed. | 1157 | * completed. |
1158 | * | 1158 | * |
1159 | * There are 9 registers and 3 IPX levels with strides 0x1000 | 1159 | * There are 9 registers and 3 IPX levels with strides 0x1000 |
1160 | * and 0x200 respectivly. Use loops to clear them. | 1160 | * and 0x200 respectivly. Use loops to clear them. |
1161 | */ | 1161 | */ |
1162 | for (regx = 0; regx <= 0x8000; regx += 0x1000) { | 1162 | for (regx = 0; regx <= 0x8000; regx += 0x1000) { |
1163 | for (ipx = 0; ipx <= 0x400; ipx += 0x200) | 1163 | for (ipx = 0; ipx <= 0x400; ipx += 0x200) |
1164 | cvmx_write_csr(base + regx + ipx, 0); | 1164 | cvmx_write_csr(base + regx + ipx, 0); |
1165 | } | 1165 | } |
1166 | 1166 | ||
1167 | cvmx_read_csr(CVMX_CIU2_SUM_PPX_IP2(coreid)); | 1167 | cvmx_read_csr(CVMX_CIU2_SUM_PPX_IP2(coreid)); |
1168 | } | 1168 | } |
1169 | 1169 | ||
1170 | static void octeon_irq_setup_secondary_ciu(void) | 1170 | static void octeon_irq_setup_secondary_ciu(void) |
1171 | { | 1171 | { |
1172 | octeon_irq_init_ciu_percpu(); | 1172 | octeon_irq_init_ciu_percpu(); |
1173 | octeon_irq_percpu_enable(); | 1173 | octeon_irq_percpu_enable(); |
1174 | 1174 | ||
1175 | /* Enable the CIU lines */ | 1175 | /* Enable the CIU lines */ |
1176 | set_c0_status(STATUSF_IP3 | STATUSF_IP2); | 1176 | set_c0_status(STATUSF_IP3 | STATUSF_IP2); |
1177 | clear_c0_status(STATUSF_IP4); | 1177 | clear_c0_status(STATUSF_IP4); |
1178 | } | 1178 | } |
1179 | 1179 | ||
1180 | static void octeon_irq_setup_secondary_ciu2(void) | 1180 | static void octeon_irq_setup_secondary_ciu2(void) |
1181 | { | 1181 | { |
1182 | octeon_irq_init_ciu2_percpu(); | 1182 | octeon_irq_init_ciu2_percpu(); |
1183 | octeon_irq_percpu_enable(); | 1183 | octeon_irq_percpu_enable(); |
1184 | 1184 | ||
1185 | /* Enable the CIU lines */ | 1185 | /* Enable the CIU lines */ |
1186 | set_c0_status(STATUSF_IP3 | STATUSF_IP2); | 1186 | set_c0_status(STATUSF_IP3 | STATUSF_IP2); |
1187 | if (octeon_irq_use_ip4) | 1187 | if (octeon_irq_use_ip4) |
1188 | set_c0_status(STATUSF_IP4); | 1188 | set_c0_status(STATUSF_IP4); |
1189 | else | 1189 | else |
1190 | clear_c0_status(STATUSF_IP4); | 1190 | clear_c0_status(STATUSF_IP4); |
1191 | } | 1191 | } |
1192 | 1192 | ||
1193 | static void __init octeon_irq_init_ciu(void) | 1193 | static void __init octeon_irq_init_ciu(void) |
1194 | { | 1194 | { |
1195 | unsigned int i; | 1195 | unsigned int i; |
1196 | struct irq_chip *chip; | 1196 | struct irq_chip *chip; |
1197 | struct irq_chip *chip_mbox; | 1197 | struct irq_chip *chip_mbox; |
1198 | struct irq_chip *chip_wd; | 1198 | struct irq_chip *chip_wd; |
1199 | struct device_node *gpio_node; | 1199 | struct device_node *gpio_node; |
1200 | struct device_node *ciu_node; | 1200 | struct device_node *ciu_node; |
1201 | struct irq_domain *ciu_domain = NULL; | 1201 | struct irq_domain *ciu_domain = NULL; |
1202 | 1202 | ||
1203 | octeon_irq_init_ciu_percpu(); | 1203 | octeon_irq_init_ciu_percpu(); |
1204 | octeon_irq_setup_secondary = octeon_irq_setup_secondary_ciu; | 1204 | octeon_irq_setup_secondary = octeon_irq_setup_secondary_ciu; |
1205 | 1205 | ||
1206 | octeon_irq_ip2 = octeon_irq_ip2_ciu; | 1206 | octeon_irq_ip2 = octeon_irq_ip2_ciu; |
1207 | octeon_irq_ip3 = octeon_irq_ip3_ciu; | 1207 | octeon_irq_ip3 = octeon_irq_ip3_ciu; |
1208 | if (OCTEON_IS_MODEL(OCTEON_CN58XX_PASS2_X) || | 1208 | if (OCTEON_IS_MODEL(OCTEON_CN58XX_PASS2_X) || |
1209 | OCTEON_IS_MODEL(OCTEON_CN56XX_PASS2_X) || | 1209 | OCTEON_IS_MODEL(OCTEON_CN56XX_PASS2_X) || |
1210 | OCTEON_IS_MODEL(OCTEON_CN52XX_PASS2_X) || | 1210 | OCTEON_IS_MODEL(OCTEON_CN52XX_PASS2_X) || |
1211 | OCTEON_IS_MODEL(OCTEON_CN6XXX)) { | 1211 | OCTEON_IS_MODEL(OCTEON_CN6XXX)) { |
1212 | chip = &octeon_irq_chip_ciu_v2; | 1212 | chip = &octeon_irq_chip_ciu_v2; |
1213 | chip_mbox = &octeon_irq_chip_ciu_mbox_v2; | 1213 | chip_mbox = &octeon_irq_chip_ciu_mbox_v2; |
1214 | chip_wd = &octeon_irq_chip_ciu_wd_v2; | 1214 | chip_wd = &octeon_irq_chip_ciu_wd_v2; |
1215 | octeon_irq_gpio_chip = &octeon_irq_chip_ciu_gpio_v2; | 1215 | octeon_irq_gpio_chip = &octeon_irq_chip_ciu_gpio_v2; |
1216 | } else { | 1216 | } else { |
1217 | chip = &octeon_irq_chip_ciu; | 1217 | chip = &octeon_irq_chip_ciu; |
1218 | chip_mbox = &octeon_irq_chip_ciu_mbox; | 1218 | chip_mbox = &octeon_irq_chip_ciu_mbox; |
1219 | chip_wd = &octeon_irq_chip_ciu_wd; | 1219 | chip_wd = &octeon_irq_chip_ciu_wd; |
1220 | octeon_irq_gpio_chip = &octeon_irq_chip_ciu_gpio; | 1220 | octeon_irq_gpio_chip = &octeon_irq_chip_ciu_gpio; |
1221 | } | 1221 | } |
1222 | octeon_irq_ciu_chip = chip; | 1222 | octeon_irq_ciu_chip = chip; |
1223 | octeon_irq_ip4 = octeon_irq_ip4_mask; | 1223 | octeon_irq_ip4 = octeon_irq_ip4_mask; |
1224 | 1224 | ||
1225 | /* Mips internal */ | 1225 | /* Mips internal */ |
1226 | octeon_irq_init_core(); | 1226 | octeon_irq_init_core(); |
1227 | 1227 | ||
1228 | gpio_node = of_find_compatible_node(NULL, NULL, "cavium,octeon-3860-gpio"); | 1228 | gpio_node = of_find_compatible_node(NULL, NULL, "cavium,octeon-3860-gpio"); |
1229 | if (gpio_node) { | 1229 | if (gpio_node) { |
1230 | struct octeon_irq_gpio_domain_data *gpiod; | 1230 | struct octeon_irq_gpio_domain_data *gpiod; |
1231 | 1231 | ||
1232 | gpiod = kzalloc(sizeof(*gpiod), GFP_KERNEL); | 1232 | gpiod = kzalloc(sizeof(*gpiod), GFP_KERNEL); |
1233 | if (gpiod) { | 1233 | if (gpiod) { |
1234 | /* gpio domain host_data is the base hwirq number. */ | 1234 | /* gpio domain host_data is the base hwirq number. */ |
1235 | gpiod->base_hwirq = 16; | 1235 | gpiod->base_hwirq = 16; |
1236 | irq_domain_add_linear(gpio_node, 16, &octeon_irq_domain_gpio_ops, gpiod); | 1236 | irq_domain_add_linear(gpio_node, 16, &octeon_irq_domain_gpio_ops, gpiod); |
1237 | of_node_put(gpio_node); | 1237 | of_node_put(gpio_node); |
1238 | } else | 1238 | } else |
1239 | pr_warn("Cannot allocate memory for GPIO irq_domain.\n"); | 1239 | pr_warn("Cannot allocate memory for GPIO irq_domain.\n"); |
1240 | } else | 1240 | } else |
1241 | pr_warn("Cannot find device node for cavium,octeon-3860-gpio.\n"); | 1241 | pr_warn("Cannot find device node for cavium,octeon-3860-gpio.\n"); |
1242 | 1242 | ||
1243 | ciu_node = of_find_compatible_node(NULL, NULL, "cavium,octeon-3860-ciu"); | 1243 | ciu_node = of_find_compatible_node(NULL, NULL, "cavium,octeon-3860-ciu"); |
1244 | if (ciu_node) { | 1244 | if (ciu_node) { |
1245 | ciu_domain = irq_domain_add_tree(ciu_node, &octeon_irq_domain_ciu_ops, NULL); | 1245 | ciu_domain = irq_domain_add_tree(ciu_node, &octeon_irq_domain_ciu_ops, NULL); |
1246 | irq_set_default_host(ciu_domain); | 1246 | irq_set_default_host(ciu_domain); |
1247 | of_node_put(ciu_node); | 1247 | of_node_put(ciu_node); |
1248 | } else | 1248 | } else |
1249 | panic("Cannot find device node for cavium,octeon-3860-ciu."); | 1249 | panic("Cannot find device node for cavium,octeon-3860-ciu."); |
1250 | 1250 | ||
1251 | /* CIU_0 */ | 1251 | /* CIU_0 */ |
1252 | for (i = 0; i < 16; i++) | 1252 | for (i = 0; i < 16; i++) |
1253 | octeon_irq_force_ciu_mapping(ciu_domain, i + OCTEON_IRQ_WORKQ0, 0, i + 0); | 1253 | octeon_irq_force_ciu_mapping(ciu_domain, i + OCTEON_IRQ_WORKQ0, 0, i + 0); |
1254 | 1254 | ||
1255 | octeon_irq_set_ciu_mapping(OCTEON_IRQ_MBOX0, 0, 32, 0, chip_mbox, handle_percpu_irq); | 1255 | octeon_irq_set_ciu_mapping(OCTEON_IRQ_MBOX0, 0, 32, 0, chip_mbox, handle_percpu_irq); |
1256 | octeon_irq_set_ciu_mapping(OCTEON_IRQ_MBOX1, 0, 33, 0, chip_mbox, handle_percpu_irq); | 1256 | octeon_irq_set_ciu_mapping(OCTEON_IRQ_MBOX1, 0, 33, 0, chip_mbox, handle_percpu_irq); |
1257 | 1257 | ||
1258 | for (i = 0; i < 4; i++) | 1258 | for (i = 0; i < 4; i++) |
1259 | octeon_irq_force_ciu_mapping(ciu_domain, i + OCTEON_IRQ_PCI_INT0, 0, i + 36); | 1259 | octeon_irq_force_ciu_mapping(ciu_domain, i + OCTEON_IRQ_PCI_INT0, 0, i + 36); |
1260 | for (i = 0; i < 4; i++) | 1260 | for (i = 0; i < 4; i++) |
1261 | octeon_irq_force_ciu_mapping(ciu_domain, i + OCTEON_IRQ_PCI_MSI0, 0, i + 40); | 1261 | octeon_irq_force_ciu_mapping(ciu_domain, i + OCTEON_IRQ_PCI_MSI0, 0, i + 40); |
1262 | 1262 | ||
1263 | octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_RML, 0, 46); | 1263 | octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_RML, 0, 46); |
1264 | for (i = 0; i < 4; i++) | 1264 | for (i = 0; i < 4; i++) |
1265 | octeon_irq_force_ciu_mapping(ciu_domain, i + OCTEON_IRQ_TIMER0, 0, i + 52); | 1265 | octeon_irq_force_ciu_mapping(ciu_domain, i + OCTEON_IRQ_TIMER0, 0, i + 52); |
1266 | 1266 | ||
1267 | octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_USB0, 0, 56); | 1267 | octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_USB0, 0, 56); |
1268 | 1268 | ||
1269 | /* CIU_1 */ | 1269 | /* CIU_1 */ |
1270 | for (i = 0; i < 16; i++) | 1270 | for (i = 0; i < 16; i++) |
1271 | octeon_irq_set_ciu_mapping(i + OCTEON_IRQ_WDOG0, 1, i + 0, 0, chip_wd, handle_level_irq); | 1271 | octeon_irq_set_ciu_mapping(i + OCTEON_IRQ_WDOG0, 1, i + 0, 0, chip_wd, handle_level_irq); |
1272 | 1272 | ||
1273 | octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_USB1, 1, 17); | 1273 | octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_USB1, 1, 17); |
1274 | 1274 | ||
1275 | /* Enable the CIU lines */ | 1275 | /* Enable the CIU lines */ |
1276 | set_c0_status(STATUSF_IP3 | STATUSF_IP2); | 1276 | set_c0_status(STATUSF_IP3 | STATUSF_IP2); |
1277 | clear_c0_status(STATUSF_IP4); | 1277 | clear_c0_status(STATUSF_IP4); |
1278 | } | 1278 | } |
1279 | 1279 | ||
1280 | /* | 1280 | /* |
1281 | * Watchdog interrupts are special. They are associated with a single | 1281 | * Watchdog interrupts are special. They are associated with a single |
1282 | * core, so we hardwire the affinity to that core. | 1282 | * core, so we hardwire the affinity to that core. |
1283 | */ | 1283 | */ |
1284 | static void octeon_irq_ciu2_wd_enable(struct irq_data *data) | 1284 | static void octeon_irq_ciu2_wd_enable(struct irq_data *data) |
1285 | { | 1285 | { |
1286 | u64 mask; | 1286 | u64 mask; |
1287 | u64 en_addr; | 1287 | u64 en_addr; |
1288 | int coreid = data->irq - OCTEON_IRQ_WDOG0; | 1288 | int coreid = data->irq - OCTEON_IRQ_WDOG0; |
1289 | union octeon_ciu_chip_data cd; | 1289 | union octeon_ciu_chip_data cd; |
1290 | 1290 | ||
1291 | cd.p = irq_data_get_irq_chip_data(data); | 1291 | cd.p = irq_data_get_irq_chip_data(data); |
1292 | mask = 1ull << (cd.s.bit); | 1292 | mask = 1ull << (cd.s.bit); |
1293 | 1293 | ||
1294 | en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1S(coreid) + (0x1000ull * cd.s.line); | 1294 | en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1S(coreid) + (0x1000ull * cd.s.line); |
1295 | cvmx_write_csr(en_addr, mask); | 1295 | cvmx_write_csr(en_addr, mask); |
1296 | 1296 | ||
1297 | } | 1297 | } |
1298 | 1298 | ||
1299 | static void octeon_irq_ciu2_enable(struct irq_data *data) | 1299 | static void octeon_irq_ciu2_enable(struct irq_data *data) |
1300 | { | 1300 | { |
1301 | u64 mask; | 1301 | u64 mask; |
1302 | u64 en_addr; | 1302 | u64 en_addr; |
1303 | int cpu = next_cpu_for_irq(data); | 1303 | int cpu = next_cpu_for_irq(data); |
1304 | int coreid = octeon_coreid_for_cpu(cpu); | 1304 | int coreid = octeon_coreid_for_cpu(cpu); |
1305 | union octeon_ciu_chip_data cd; | 1305 | union octeon_ciu_chip_data cd; |
1306 | 1306 | ||
1307 | cd.p = irq_data_get_irq_chip_data(data); | 1307 | cd.p = irq_data_get_irq_chip_data(data); |
1308 | mask = 1ull << (cd.s.bit); | 1308 | mask = 1ull << (cd.s.bit); |
1309 | 1309 | ||
1310 | en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1S(coreid) + (0x1000ull * cd.s.line); | 1310 | en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1S(coreid) + (0x1000ull * cd.s.line); |
1311 | cvmx_write_csr(en_addr, mask); | 1311 | cvmx_write_csr(en_addr, mask); |
1312 | } | 1312 | } |
1313 | 1313 | ||
1314 | static void octeon_irq_ciu2_enable_local(struct irq_data *data) | 1314 | static void octeon_irq_ciu2_enable_local(struct irq_data *data) |
1315 | { | 1315 | { |
1316 | u64 mask; | 1316 | u64 mask; |
1317 | u64 en_addr; | 1317 | u64 en_addr; |
1318 | int coreid = cvmx_get_core_num(); | 1318 | int coreid = cvmx_get_core_num(); |
1319 | union octeon_ciu_chip_data cd; | 1319 | union octeon_ciu_chip_data cd; |
1320 | 1320 | ||
1321 | cd.p = irq_data_get_irq_chip_data(data); | 1321 | cd.p = irq_data_get_irq_chip_data(data); |
1322 | mask = 1ull << (cd.s.bit); | 1322 | mask = 1ull << (cd.s.bit); |
1323 | 1323 | ||
1324 | en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1S(coreid) + (0x1000ull * cd.s.line); | 1324 | en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1S(coreid) + (0x1000ull * cd.s.line); |
1325 | cvmx_write_csr(en_addr, mask); | 1325 | cvmx_write_csr(en_addr, mask); |
1326 | 1326 | ||
1327 | } | 1327 | } |
1328 | 1328 | ||
1329 | static void octeon_irq_ciu2_disable_local(struct irq_data *data) | 1329 | static void octeon_irq_ciu2_disable_local(struct irq_data *data) |
1330 | { | 1330 | { |
1331 | u64 mask; | 1331 | u64 mask; |
1332 | u64 en_addr; | 1332 | u64 en_addr; |
1333 | int coreid = cvmx_get_core_num(); | 1333 | int coreid = cvmx_get_core_num(); |
1334 | union octeon_ciu_chip_data cd; | 1334 | union octeon_ciu_chip_data cd; |
1335 | 1335 | ||
1336 | cd.p = irq_data_get_irq_chip_data(data); | 1336 | cd.p = irq_data_get_irq_chip_data(data); |
1337 | mask = 1ull << (cd.s.bit); | 1337 | mask = 1ull << (cd.s.bit); |
1338 | 1338 | ||
1339 | en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1C(coreid) + (0x1000ull * cd.s.line); | 1339 | en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1C(coreid) + (0x1000ull * cd.s.line); |
1340 | cvmx_write_csr(en_addr, mask); | 1340 | cvmx_write_csr(en_addr, mask); |
1341 | 1341 | ||
1342 | } | 1342 | } |
1343 | 1343 | ||
1344 | static void octeon_irq_ciu2_ack(struct irq_data *data) | 1344 | static void octeon_irq_ciu2_ack(struct irq_data *data) |
1345 | { | 1345 | { |
1346 | u64 mask; | 1346 | u64 mask; |
1347 | u64 en_addr; | 1347 | u64 en_addr; |
1348 | int coreid = cvmx_get_core_num(); | 1348 | int coreid = cvmx_get_core_num(); |
1349 | union octeon_ciu_chip_data cd; | 1349 | union octeon_ciu_chip_data cd; |
1350 | 1350 | ||
1351 | cd.p = irq_data_get_irq_chip_data(data); | 1351 | cd.p = irq_data_get_irq_chip_data(data); |
1352 | mask = 1ull << (cd.s.bit); | 1352 | mask = 1ull << (cd.s.bit); |
1353 | 1353 | ||
1354 | en_addr = CVMX_CIU2_RAW_PPX_IP2_WRKQ(coreid) + (0x1000ull * cd.s.line); | 1354 | en_addr = CVMX_CIU2_RAW_PPX_IP2_WRKQ(coreid) + (0x1000ull * cd.s.line); |
1355 | cvmx_write_csr(en_addr, mask); | 1355 | cvmx_write_csr(en_addr, mask); |
1356 | 1356 | ||
1357 | } | 1357 | } |
1358 | 1358 | ||
1359 | static void octeon_irq_ciu2_disable_all(struct irq_data *data) | 1359 | static void octeon_irq_ciu2_disable_all(struct irq_data *data) |
1360 | { | 1360 | { |
1361 | int cpu; | 1361 | int cpu; |
1362 | u64 mask; | 1362 | u64 mask; |
1363 | union octeon_ciu_chip_data cd; | 1363 | union octeon_ciu_chip_data cd; |
1364 | 1364 | ||
1365 | cd.p = irq_data_get_irq_chip_data(data); | 1365 | cd.p = irq_data_get_irq_chip_data(data); |
1366 | mask = 1ull << (cd.s.bit); | 1366 | mask = 1ull << (cd.s.bit); |
1367 | 1367 | ||
1368 | for_each_online_cpu(cpu) { | 1368 | for_each_online_cpu(cpu) { |
1369 | u64 en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1C(octeon_coreid_for_cpu(cpu)) + (0x1000ull * cd.s.line); | 1369 | u64 en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1C(octeon_coreid_for_cpu(cpu)) + (0x1000ull * cd.s.line); |
1370 | cvmx_write_csr(en_addr, mask); | 1370 | cvmx_write_csr(en_addr, mask); |
1371 | } | 1371 | } |
1372 | } | 1372 | } |
1373 | 1373 | ||
1374 | static void octeon_irq_ciu2_mbox_enable_all(struct irq_data *data) | 1374 | static void octeon_irq_ciu2_mbox_enable_all(struct irq_data *data) |
1375 | { | 1375 | { |
1376 | int cpu; | 1376 | int cpu; |
1377 | u64 mask; | 1377 | u64 mask; |
1378 | 1378 | ||
1379 | mask = 1ull << (data->irq - OCTEON_IRQ_MBOX0); | 1379 | mask = 1ull << (data->irq - OCTEON_IRQ_MBOX0); |
1380 | 1380 | ||
1381 | for_each_online_cpu(cpu) { | 1381 | for_each_online_cpu(cpu) { |
1382 | u64 en_addr = CVMX_CIU2_EN_PPX_IP3_MBOX_W1S(octeon_coreid_for_cpu(cpu)); | 1382 | u64 en_addr = CVMX_CIU2_EN_PPX_IP3_MBOX_W1S(octeon_coreid_for_cpu(cpu)); |
1383 | cvmx_write_csr(en_addr, mask); | 1383 | cvmx_write_csr(en_addr, mask); |
1384 | } | 1384 | } |
1385 | } | 1385 | } |
1386 | 1386 | ||
1387 | static void octeon_irq_ciu2_mbox_disable_all(struct irq_data *data) | 1387 | static void octeon_irq_ciu2_mbox_disable_all(struct irq_data *data) |
1388 | { | 1388 | { |
1389 | int cpu; | 1389 | int cpu; |
1390 | u64 mask; | 1390 | u64 mask; |
1391 | 1391 | ||
1392 | mask = 1ull << (data->irq - OCTEON_IRQ_MBOX0); | 1392 | mask = 1ull << (data->irq - OCTEON_IRQ_MBOX0); |
1393 | 1393 | ||
1394 | for_each_online_cpu(cpu) { | 1394 | for_each_online_cpu(cpu) { |
1395 | u64 en_addr = CVMX_CIU2_EN_PPX_IP3_MBOX_W1C(octeon_coreid_for_cpu(cpu)); | 1395 | u64 en_addr = CVMX_CIU2_EN_PPX_IP3_MBOX_W1C(octeon_coreid_for_cpu(cpu)); |
1396 | cvmx_write_csr(en_addr, mask); | 1396 | cvmx_write_csr(en_addr, mask); |
1397 | } | 1397 | } |
1398 | } | 1398 | } |
1399 | 1399 | ||
1400 | static void octeon_irq_ciu2_mbox_enable_local(struct irq_data *data) | 1400 | static void octeon_irq_ciu2_mbox_enable_local(struct irq_data *data) |
1401 | { | 1401 | { |
1402 | u64 mask; | 1402 | u64 mask; |
1403 | u64 en_addr; | 1403 | u64 en_addr; |
1404 | int coreid = cvmx_get_core_num(); | 1404 | int coreid = cvmx_get_core_num(); |
1405 | 1405 | ||
1406 | mask = 1ull << (data->irq - OCTEON_IRQ_MBOX0); | 1406 | mask = 1ull << (data->irq - OCTEON_IRQ_MBOX0); |
1407 | en_addr = CVMX_CIU2_EN_PPX_IP3_MBOX_W1S(coreid); | 1407 | en_addr = CVMX_CIU2_EN_PPX_IP3_MBOX_W1S(coreid); |
1408 | cvmx_write_csr(en_addr, mask); | 1408 | cvmx_write_csr(en_addr, mask); |
1409 | } | 1409 | } |
1410 | 1410 | ||
1411 | static void octeon_irq_ciu2_mbox_disable_local(struct irq_data *data) | 1411 | static void octeon_irq_ciu2_mbox_disable_local(struct irq_data *data) |
1412 | { | 1412 | { |
1413 | u64 mask; | 1413 | u64 mask; |
1414 | u64 en_addr; | 1414 | u64 en_addr; |
1415 | int coreid = cvmx_get_core_num(); | 1415 | int coreid = cvmx_get_core_num(); |
1416 | 1416 | ||
1417 | mask = 1ull << (data->irq - OCTEON_IRQ_MBOX0); | 1417 | mask = 1ull << (data->irq - OCTEON_IRQ_MBOX0); |
1418 | en_addr = CVMX_CIU2_EN_PPX_IP3_MBOX_W1C(coreid); | 1418 | en_addr = CVMX_CIU2_EN_PPX_IP3_MBOX_W1C(coreid); |
1419 | cvmx_write_csr(en_addr, mask); | 1419 | cvmx_write_csr(en_addr, mask); |
1420 | } | 1420 | } |
1421 | 1421 | ||
1422 | #ifdef CONFIG_SMP | 1422 | #ifdef CONFIG_SMP |
1423 | static int octeon_irq_ciu2_set_affinity(struct irq_data *data, | 1423 | static int octeon_irq_ciu2_set_affinity(struct irq_data *data, |
1424 | const struct cpumask *dest, bool force) | 1424 | const struct cpumask *dest, bool force) |
1425 | { | 1425 | { |
1426 | int cpu; | 1426 | int cpu; |
1427 | bool enable_one = !irqd_irq_disabled(data) && !irqd_irq_masked(data); | 1427 | bool enable_one = !irqd_irq_disabled(data) && !irqd_irq_masked(data); |
1428 | u64 mask; | 1428 | u64 mask; |
1429 | union octeon_ciu_chip_data cd; | 1429 | union octeon_ciu_chip_data cd; |
1430 | 1430 | ||
1431 | if (!enable_one) | 1431 | if (!enable_one) |
1432 | return 0; | 1432 | return 0; |
1433 | 1433 | ||
1434 | cd.p = irq_data_get_irq_chip_data(data); | 1434 | cd.p = irq_data_get_irq_chip_data(data); |
1435 | mask = 1ull << cd.s.bit; | 1435 | mask = 1ull << cd.s.bit; |
1436 | 1436 | ||
1437 | for_each_online_cpu(cpu) { | 1437 | for_each_online_cpu(cpu) { |
1438 | u64 en_addr; | 1438 | u64 en_addr; |
1439 | if (cpumask_test_cpu(cpu, dest) && enable_one) { | 1439 | if (cpumask_test_cpu(cpu, dest) && enable_one) { |
1440 | enable_one = false; | 1440 | enable_one = false; |
1441 | en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1S(octeon_coreid_for_cpu(cpu)) + (0x1000ull * cd.s.line); | 1441 | en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1S(octeon_coreid_for_cpu(cpu)) + (0x1000ull * cd.s.line); |
1442 | } else { | 1442 | } else { |
1443 | en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1C(octeon_coreid_for_cpu(cpu)) + (0x1000ull * cd.s.line); | 1443 | en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1C(octeon_coreid_for_cpu(cpu)) + (0x1000ull * cd.s.line); |
1444 | } | 1444 | } |
1445 | cvmx_write_csr(en_addr, mask); | 1445 | cvmx_write_csr(en_addr, mask); |
1446 | } | 1446 | } |
1447 | 1447 | ||
1448 | return 0; | 1448 | return 0; |
1449 | } | 1449 | } |
1450 | #endif | 1450 | #endif |
1451 | 1451 | ||
1452 | static void octeon_irq_ciu2_enable_gpio(struct irq_data *data) | 1452 | static void octeon_irq_ciu2_enable_gpio(struct irq_data *data) |
1453 | { | 1453 | { |
1454 | octeon_irq_gpio_setup(data); | 1454 | octeon_irq_gpio_setup(data); |
1455 | octeon_irq_ciu2_enable(data); | 1455 | octeon_irq_ciu2_enable(data); |
1456 | } | 1456 | } |
1457 | 1457 | ||
1458 | static void octeon_irq_ciu2_disable_gpio(struct irq_data *data) | 1458 | static void octeon_irq_ciu2_disable_gpio(struct irq_data *data) |
1459 | { | 1459 | { |
1460 | union octeon_ciu_chip_data cd; | 1460 | union octeon_ciu_chip_data cd; |
1461 | cd.p = irq_data_get_irq_chip_data(data); | 1461 | cd.p = irq_data_get_irq_chip_data(data); |
1462 | 1462 | ||
1463 | cvmx_write_csr(CVMX_GPIO_BIT_CFGX(cd.s.gpio_line), 0); | 1463 | cvmx_write_csr(CVMX_GPIO_BIT_CFGX(cd.s.gpio_line), 0); |
1464 | 1464 | ||
1465 | octeon_irq_ciu2_disable_all(data); | 1465 | octeon_irq_ciu2_disable_all(data); |
1466 | } | 1466 | } |
1467 | 1467 | ||
1468 | static struct irq_chip octeon_irq_chip_ciu2 = { | 1468 | static struct irq_chip octeon_irq_chip_ciu2 = { |
1469 | .name = "CIU2-E", | 1469 | .name = "CIU2-E", |
1470 | .irq_enable = octeon_irq_ciu2_enable, | 1470 | .irq_enable = octeon_irq_ciu2_enable, |
1471 | .irq_disable = octeon_irq_ciu2_disable_all, | 1471 | .irq_disable = octeon_irq_ciu2_disable_all, |
1472 | .irq_ack = octeon_irq_ciu2_ack, | 1472 | .irq_ack = octeon_irq_ciu2_ack, |
1473 | .irq_mask = octeon_irq_ciu2_disable_local, | 1473 | .irq_mask = octeon_irq_ciu2_disable_local, |
1474 | .irq_unmask = octeon_irq_ciu2_enable, | 1474 | .irq_unmask = octeon_irq_ciu2_enable, |
1475 | #ifdef CONFIG_SMP | 1475 | #ifdef CONFIG_SMP |
1476 | .irq_set_affinity = octeon_irq_ciu2_set_affinity, | 1476 | .irq_set_affinity = octeon_irq_ciu2_set_affinity, |
1477 | .irq_cpu_offline = octeon_irq_cpu_offline_ciu, | 1477 | .irq_cpu_offline = octeon_irq_cpu_offline_ciu, |
1478 | #endif | 1478 | #endif |
1479 | }; | 1479 | }; |
1480 | 1480 | ||
1481 | static struct irq_chip octeon_irq_chip_ciu2_mbox = { | 1481 | static struct irq_chip octeon_irq_chip_ciu2_mbox = { |
1482 | .name = "CIU2-M", | 1482 | .name = "CIU2-M", |
1483 | .irq_enable = octeon_irq_ciu2_mbox_enable_all, | 1483 | .irq_enable = octeon_irq_ciu2_mbox_enable_all, |
1484 | .irq_disable = octeon_irq_ciu2_mbox_disable_all, | 1484 | .irq_disable = octeon_irq_ciu2_mbox_disable_all, |
1485 | .irq_ack = octeon_irq_ciu2_mbox_disable_local, | 1485 | .irq_ack = octeon_irq_ciu2_mbox_disable_local, |
1486 | .irq_eoi = octeon_irq_ciu2_mbox_enable_local, | 1486 | .irq_eoi = octeon_irq_ciu2_mbox_enable_local, |
1487 | 1487 | ||
1488 | .irq_cpu_online = octeon_irq_ciu2_mbox_enable_local, | 1488 | .irq_cpu_online = octeon_irq_ciu2_mbox_enable_local, |
1489 | .irq_cpu_offline = octeon_irq_ciu2_mbox_disable_local, | 1489 | .irq_cpu_offline = octeon_irq_ciu2_mbox_disable_local, |
1490 | .flags = IRQCHIP_ONOFFLINE_ENABLED, | 1490 | .flags = IRQCHIP_ONOFFLINE_ENABLED, |
1491 | }; | 1491 | }; |
1492 | 1492 | ||
1493 | static struct irq_chip octeon_irq_chip_ciu2_wd = { | 1493 | static struct irq_chip octeon_irq_chip_ciu2_wd = { |
1494 | .name = "CIU2-W", | 1494 | .name = "CIU2-W", |
1495 | .irq_enable = octeon_irq_ciu2_wd_enable, | 1495 | .irq_enable = octeon_irq_ciu2_wd_enable, |
1496 | .irq_disable = octeon_irq_ciu2_disable_all, | 1496 | .irq_disable = octeon_irq_ciu2_disable_all, |
1497 | .irq_mask = octeon_irq_ciu2_disable_local, | 1497 | .irq_mask = octeon_irq_ciu2_disable_local, |
1498 | .irq_unmask = octeon_irq_ciu2_enable_local, | 1498 | .irq_unmask = octeon_irq_ciu2_enable_local, |
1499 | }; | 1499 | }; |
1500 | 1500 | ||
1501 | static struct irq_chip octeon_irq_chip_ciu2_gpio = { | 1501 | static struct irq_chip octeon_irq_chip_ciu2_gpio = { |
1502 | .name = "CIU-GPIO", | 1502 | .name = "CIU-GPIO", |
1503 | .irq_enable = octeon_irq_ciu2_enable_gpio, | 1503 | .irq_enable = octeon_irq_ciu2_enable_gpio, |
1504 | .irq_disable = octeon_irq_ciu2_disable_gpio, | 1504 | .irq_disable = octeon_irq_ciu2_disable_gpio, |
1505 | .irq_ack = octeon_irq_ciu_gpio_ack, | 1505 | .irq_ack = octeon_irq_ciu_gpio_ack, |
1506 | .irq_mask = octeon_irq_ciu2_disable_local, | 1506 | .irq_mask = octeon_irq_ciu2_disable_local, |
1507 | .irq_unmask = octeon_irq_ciu2_enable, | 1507 | .irq_unmask = octeon_irq_ciu2_enable, |
1508 | .irq_set_type = octeon_irq_ciu_gpio_set_type, | 1508 | .irq_set_type = octeon_irq_ciu_gpio_set_type, |
1509 | #ifdef CONFIG_SMP | 1509 | #ifdef CONFIG_SMP |
1510 | .irq_set_affinity = octeon_irq_ciu2_set_affinity, | 1510 | .irq_set_affinity = octeon_irq_ciu2_set_affinity, |
1511 | .irq_cpu_offline = octeon_irq_cpu_offline_ciu, | 1511 | .irq_cpu_offline = octeon_irq_cpu_offline_ciu, |
1512 | #endif | 1512 | #endif |
1513 | .flags = IRQCHIP_SET_TYPE_MASKED, | 1513 | .flags = IRQCHIP_SET_TYPE_MASKED, |
1514 | }; | 1514 | }; |
1515 | 1515 | ||
1516 | static int octeon_irq_ciu2_xlat(struct irq_domain *d, | 1516 | static int octeon_irq_ciu2_xlat(struct irq_domain *d, |
1517 | struct device_node *node, | 1517 | struct device_node *node, |
1518 | const u32 *intspec, | 1518 | const u32 *intspec, |
1519 | unsigned int intsize, | 1519 | unsigned int intsize, |
1520 | unsigned long *out_hwirq, | 1520 | unsigned long *out_hwirq, |
1521 | unsigned int *out_type) | 1521 | unsigned int *out_type) |
1522 | { | 1522 | { |
1523 | unsigned int ciu, bit; | 1523 | unsigned int ciu, bit; |
1524 | 1524 | ||
1525 | ciu = intspec[0]; | 1525 | ciu = intspec[0]; |
1526 | bit = intspec[1]; | 1526 | bit = intspec[1]; |
1527 | 1527 | ||
1528 | *out_hwirq = (ciu << 6) | bit; | 1528 | *out_hwirq = (ciu << 6) | bit; |
1529 | *out_type = 0; | 1529 | *out_type = 0; |
1530 | 1530 | ||
1531 | return 0; | 1531 | return 0; |
1532 | } | 1532 | } |
1533 | 1533 | ||
1534 | static bool octeon_irq_ciu2_is_edge(unsigned int line, unsigned int bit) | 1534 | static bool octeon_irq_ciu2_is_edge(unsigned int line, unsigned int bit) |
1535 | { | 1535 | { |
1536 | bool edge = false; | 1536 | bool edge = false; |
1537 | 1537 | ||
1538 | if (line == 3) /* MIO */ | 1538 | if (line == 3) /* MIO */ |
1539 | switch (bit) { | 1539 | switch (bit) { |
1540 | case 2: /* IPD_DRP */ | 1540 | case 2: /* IPD_DRP */ |
1541 | case 8 ... 11: /* Timers */ | 1541 | case 8 ... 11: /* Timers */ |
1542 | case 48: /* PTP */ | 1542 | case 48: /* PTP */ |
1543 | edge = true; | 1543 | edge = true; |
1544 | break; | 1544 | break; |
1545 | default: | 1545 | default: |
1546 | break; | 1546 | break; |
1547 | } | 1547 | } |
1548 | else if (line == 6) /* PKT */ | 1548 | else if (line == 6) /* PKT */ |
1549 | switch (bit) { | 1549 | switch (bit) { |
1550 | case 52 ... 53: /* ILK_DRP */ | 1550 | case 52 ... 53: /* ILK_DRP */ |
1551 | case 8 ... 12: /* GMX_DRP */ | 1551 | case 8 ... 12: /* GMX_DRP */ |
1552 | edge = true; | 1552 | edge = true; |
1553 | break; | 1553 | break; |
1554 | default: | 1554 | default: |
1555 | break; | 1555 | break; |
1556 | } | 1556 | } |
1557 | return edge; | 1557 | return edge; |
1558 | } | 1558 | } |
1559 | 1559 | ||
1560 | static int octeon_irq_ciu2_map(struct irq_domain *d, | 1560 | static int octeon_irq_ciu2_map(struct irq_domain *d, |
1561 | unsigned int virq, irq_hw_number_t hw) | 1561 | unsigned int virq, irq_hw_number_t hw) |
1562 | { | 1562 | { |
1563 | unsigned int line = hw >> 6; | 1563 | unsigned int line = hw >> 6; |
1564 | unsigned int bit = hw & 63; | 1564 | unsigned int bit = hw & 63; |
1565 | 1565 | ||
1566 | if (!octeon_irq_virq_in_range(virq)) | 1566 | if (!octeon_irq_virq_in_range(virq)) |
1567 | return -EINVAL; | 1567 | return -EINVAL; |
1568 | 1568 | ||
1569 | /* | 1569 | /* |
1570 | * Don't map irq if it is reserved for GPIO. | 1570 | * Don't map irq if it is reserved for GPIO. |
1571 | * (Line 7 are the GPIO lines.) | 1571 | * (Line 7 are the GPIO lines.) |
1572 | */ | 1572 | */ |
1573 | if (line == 7) | 1573 | if (line == 7) |
1574 | return 0; | 1574 | return 0; |
1575 | 1575 | ||
1576 | if (line > 7 || octeon_irq_ciu_to_irq[line][bit] != 0) | 1576 | if (line > 7 || octeon_irq_ciu_to_irq[line][bit] != 0) |
1577 | return -EINVAL; | 1577 | return -EINVAL; |
1578 | 1578 | ||
1579 | if (octeon_irq_ciu2_is_edge(line, bit)) | 1579 | if (octeon_irq_ciu2_is_edge(line, bit)) |
1580 | octeon_irq_set_ciu_mapping(virq, line, bit, 0, | 1580 | octeon_irq_set_ciu_mapping(virq, line, bit, 0, |
1581 | &octeon_irq_chip_ciu2, | 1581 | &octeon_irq_chip_ciu2, |
1582 | handle_edge_irq); | 1582 | handle_edge_irq); |
1583 | else | 1583 | else |
1584 | octeon_irq_set_ciu_mapping(virq, line, bit, 0, | 1584 | octeon_irq_set_ciu_mapping(virq, line, bit, 0, |
1585 | &octeon_irq_chip_ciu2, | 1585 | &octeon_irq_chip_ciu2, |
1586 | handle_level_irq); | 1586 | handle_level_irq); |
1587 | 1587 | ||
1588 | return 0; | 1588 | return 0; |
1589 | } | 1589 | } |
1590 | static int octeon_irq_ciu2_gpio_map(struct irq_domain *d, | 1590 | static int octeon_irq_ciu2_gpio_map(struct irq_domain *d, |
1591 | unsigned int virq, irq_hw_number_t hw) | 1591 | unsigned int virq, irq_hw_number_t hw) |
1592 | { | 1592 | { |
1593 | return octeon_irq_gpio_map_common(d, virq, hw, 7, &octeon_irq_chip_ciu2_gpio); | 1593 | return octeon_irq_gpio_map_common(d, virq, hw, 7, &octeon_irq_chip_ciu2_gpio); |
1594 | } | 1594 | } |
1595 | 1595 | ||
1596 | static struct irq_domain_ops octeon_irq_domain_ciu2_ops = { | 1596 | static struct irq_domain_ops octeon_irq_domain_ciu2_ops = { |
1597 | .map = octeon_irq_ciu2_map, | 1597 | .map = octeon_irq_ciu2_map, |
1598 | .xlate = octeon_irq_ciu2_xlat, | 1598 | .xlate = octeon_irq_ciu2_xlat, |
1599 | }; | 1599 | }; |
1600 | 1600 | ||
1601 | static struct irq_domain_ops octeon_irq_domain_ciu2_gpio_ops = { | 1601 | static struct irq_domain_ops octeon_irq_domain_ciu2_gpio_ops = { |
1602 | .map = octeon_irq_ciu2_gpio_map, | 1602 | .map = octeon_irq_ciu2_gpio_map, |
1603 | .xlate = octeon_irq_gpio_xlat, | 1603 | .xlate = octeon_irq_gpio_xlat, |
1604 | }; | 1604 | }; |
1605 | 1605 | ||
1606 | static void octeon_irq_ciu2(void) | 1606 | static void octeon_irq_ciu2(void) |
1607 | { | 1607 | { |
1608 | int line; | 1608 | int line; |
1609 | int bit; | 1609 | int bit; |
1610 | int irq; | 1610 | int irq; |
1611 | u64 src_reg, src, sum; | 1611 | u64 src_reg, src, sum; |
1612 | const unsigned long core_id = cvmx_get_core_num(); | 1612 | const unsigned long core_id = cvmx_get_core_num(); |
1613 | 1613 | ||
1614 | sum = cvmx_read_csr(CVMX_CIU2_SUM_PPX_IP2(core_id)) & 0xfful; | 1614 | sum = cvmx_read_csr(CVMX_CIU2_SUM_PPX_IP2(core_id)) & 0xfful; |
1615 | 1615 | ||
1616 | if (unlikely(!sum)) | 1616 | if (unlikely(!sum)) |
1617 | goto spurious; | 1617 | goto spurious; |
1618 | 1618 | ||
1619 | line = fls64(sum) - 1; | 1619 | line = fls64(sum) - 1; |
1620 | src_reg = CVMX_CIU2_SRC_PPX_IP2_WRKQ(core_id) + (0x1000 * line); | 1620 | src_reg = CVMX_CIU2_SRC_PPX_IP2_WRKQ(core_id) + (0x1000 * line); |
1621 | src = cvmx_read_csr(src_reg); | 1621 | src = cvmx_read_csr(src_reg); |
1622 | 1622 | ||
1623 | if (unlikely(!src)) | 1623 | if (unlikely(!src)) |
1624 | goto spurious; | 1624 | goto spurious; |
1625 | 1625 | ||
1626 | bit = fls64(src) - 1; | 1626 | bit = fls64(src) - 1; |
1627 | irq = octeon_irq_ciu_to_irq[line][bit]; | 1627 | irq = octeon_irq_ciu_to_irq[line][bit]; |
1628 | if (unlikely(!irq)) | 1628 | if (unlikely(!irq)) |
1629 | goto spurious; | 1629 | goto spurious; |
1630 | 1630 | ||
1631 | do_IRQ(irq); | 1631 | do_IRQ(irq); |
1632 | goto out; | 1632 | goto out; |
1633 | 1633 | ||
1634 | spurious: | 1634 | spurious: |
1635 | spurious_interrupt(); | 1635 | spurious_interrupt(); |
1636 | out: | 1636 | out: |
1637 | /* CN68XX pass 1.x has an errata that accessing the ACK registers | 1637 | /* CN68XX pass 1.x has an errata that accessing the ACK registers |
1638 | can stop interrupts from propagating */ | 1638 | can stop interrupts from propagating */ |
1639 | if (OCTEON_IS_MODEL(OCTEON_CN68XX)) | 1639 | if (OCTEON_IS_MODEL(OCTEON_CN68XX)) |
1640 | cvmx_read_csr(CVMX_CIU2_INTR_CIU_READY); | 1640 | cvmx_read_csr(CVMX_CIU2_INTR_CIU_READY); |
1641 | else | 1641 | else |
1642 | cvmx_read_csr(CVMX_CIU2_ACK_PPX_IP2(core_id)); | 1642 | cvmx_read_csr(CVMX_CIU2_ACK_PPX_IP2(core_id)); |
1643 | return; | 1643 | return; |
1644 | } | 1644 | } |
1645 | 1645 | ||
1646 | static void octeon_irq_ciu2_mbox(void) | 1646 | static void octeon_irq_ciu2_mbox(void) |
1647 | { | 1647 | { |
1648 | int line; | 1648 | int line; |
1649 | 1649 | ||
1650 | const unsigned long core_id = cvmx_get_core_num(); | 1650 | const unsigned long core_id = cvmx_get_core_num(); |
1651 | u64 sum = cvmx_read_csr(CVMX_CIU2_SUM_PPX_IP3(core_id)) >> 60; | 1651 | u64 sum = cvmx_read_csr(CVMX_CIU2_SUM_PPX_IP3(core_id)) >> 60; |
1652 | 1652 | ||
1653 | if (unlikely(!sum)) | 1653 | if (unlikely(!sum)) |
1654 | goto spurious; | 1654 | goto spurious; |
1655 | 1655 | ||
1656 | line = fls64(sum) - 1; | 1656 | line = fls64(sum) - 1; |
1657 | 1657 | ||
1658 | do_IRQ(OCTEON_IRQ_MBOX0 + line); | 1658 | do_IRQ(OCTEON_IRQ_MBOX0 + line); |
1659 | goto out; | 1659 | goto out; |
1660 | 1660 | ||
1661 | spurious: | 1661 | spurious: |
1662 | spurious_interrupt(); | 1662 | spurious_interrupt(); |
1663 | out: | 1663 | out: |
1664 | /* CN68XX pass 1.x has an errata that accessing the ACK registers | 1664 | /* CN68XX pass 1.x has an errata that accessing the ACK registers |
1665 | can stop interrupts from propagating */ | 1665 | can stop interrupts from propagating */ |
1666 | if (OCTEON_IS_MODEL(OCTEON_CN68XX)) | 1666 | if (OCTEON_IS_MODEL(OCTEON_CN68XX)) |
1667 | cvmx_read_csr(CVMX_CIU2_INTR_CIU_READY); | 1667 | cvmx_read_csr(CVMX_CIU2_INTR_CIU_READY); |
1668 | else | 1668 | else |
1669 | cvmx_read_csr(CVMX_CIU2_ACK_PPX_IP3(core_id)); | 1669 | cvmx_read_csr(CVMX_CIU2_ACK_PPX_IP3(core_id)); |
1670 | return; | 1670 | return; |
1671 | } | 1671 | } |
1672 | 1672 | ||
1673 | static void __init octeon_irq_init_ciu2(void) | 1673 | static void __init octeon_irq_init_ciu2(void) |
1674 | { | 1674 | { |
1675 | unsigned int i; | 1675 | unsigned int i; |
1676 | struct device_node *gpio_node; | 1676 | struct device_node *gpio_node; |
1677 | struct device_node *ciu_node; | 1677 | struct device_node *ciu_node; |
1678 | struct irq_domain *ciu_domain = NULL; | 1678 | struct irq_domain *ciu_domain = NULL; |
1679 | 1679 | ||
1680 | octeon_irq_init_ciu2_percpu(); | 1680 | octeon_irq_init_ciu2_percpu(); |
1681 | octeon_irq_setup_secondary = octeon_irq_setup_secondary_ciu2; | 1681 | octeon_irq_setup_secondary = octeon_irq_setup_secondary_ciu2; |
1682 | 1682 | ||
1683 | octeon_irq_ip2 = octeon_irq_ciu2; | 1683 | octeon_irq_ip2 = octeon_irq_ciu2; |
1684 | octeon_irq_ip3 = octeon_irq_ciu2_mbox; | 1684 | octeon_irq_ip3 = octeon_irq_ciu2_mbox; |
1685 | octeon_irq_ip4 = octeon_irq_ip4_mask; | 1685 | octeon_irq_ip4 = octeon_irq_ip4_mask; |
1686 | 1686 | ||
1687 | /* Mips internal */ | 1687 | /* Mips internal */ |
1688 | octeon_irq_init_core(); | 1688 | octeon_irq_init_core(); |
1689 | 1689 | ||
1690 | gpio_node = of_find_compatible_node(NULL, NULL, "cavium,octeon-3860-gpio"); | 1690 | gpio_node = of_find_compatible_node(NULL, NULL, "cavium,octeon-3860-gpio"); |
1691 | if (gpio_node) { | 1691 | if (gpio_node) { |
1692 | struct octeon_irq_gpio_domain_data *gpiod; | 1692 | struct octeon_irq_gpio_domain_data *gpiod; |
1693 | 1693 | ||
1694 | gpiod = kzalloc(sizeof(*gpiod), GFP_KERNEL); | 1694 | gpiod = kzalloc(sizeof(*gpiod), GFP_KERNEL); |
1695 | if (gpiod) { | 1695 | if (gpiod) { |
1696 | /* gpio domain host_data is the base hwirq number. */ | 1696 | /* gpio domain host_data is the base hwirq number. */ |
1697 | gpiod->base_hwirq = 7 << 6; | 1697 | gpiod->base_hwirq = 7 << 6; |
1698 | irq_domain_add_linear(gpio_node, 16, &octeon_irq_domain_ciu2_gpio_ops, gpiod); | 1698 | irq_domain_add_linear(gpio_node, 16, &octeon_irq_domain_ciu2_gpio_ops, gpiod); |
1699 | of_node_put(gpio_node); | 1699 | of_node_put(gpio_node); |
1700 | } else | 1700 | } else |
1701 | pr_warn("Cannot allocate memory for GPIO irq_domain.\n"); | 1701 | pr_warn("Cannot allocate memory for GPIO irq_domain.\n"); |
1702 | } else | 1702 | } else |
1703 | pr_warn("Cannot find device node for cavium,octeon-3860-gpio.\n"); | 1703 | pr_warn("Cannot find device node for cavium,octeon-3860-gpio.\n"); |
1704 | 1704 | ||
1705 | ciu_node = of_find_compatible_node(NULL, NULL, "cavium,octeon-6880-ciu2"); | 1705 | ciu_node = of_find_compatible_node(NULL, NULL, "cavium,octeon-6880-ciu2"); |
1706 | if (ciu_node) { | 1706 | if (ciu_node) { |
1707 | ciu_domain = irq_domain_add_tree(ciu_node, &octeon_irq_domain_ciu2_ops, NULL); | 1707 | ciu_domain = irq_domain_add_tree(ciu_node, &octeon_irq_domain_ciu2_ops, NULL); |
1708 | irq_set_default_host(ciu_domain); | 1708 | irq_set_default_host(ciu_domain); |
1709 | of_node_put(ciu_node); | 1709 | of_node_put(ciu_node); |
1710 | } else | 1710 | } else |
1711 | panic("Cannot find device node for cavium,octeon-6880-ciu2."); | 1711 | panic("Cannot find device node for cavium,octeon-6880-ciu2."); |
1712 | 1712 | ||
1713 | /* CUI2 */ | 1713 | /* CUI2 */ |
1714 | for (i = 0; i < 64; i++) | 1714 | for (i = 0; i < 64; i++) |
1715 | octeon_irq_force_ciu_mapping(ciu_domain, i + OCTEON_IRQ_WORKQ0, 0, i); | 1715 | octeon_irq_force_ciu_mapping(ciu_domain, i + OCTEON_IRQ_WORKQ0, 0, i); |
1716 | 1716 | ||
1717 | for (i = 0; i < 32; i++) | 1717 | for (i = 0; i < 32; i++) |
1718 | octeon_irq_set_ciu_mapping(i + OCTEON_IRQ_WDOG0, 1, i, 0, | 1718 | octeon_irq_set_ciu_mapping(i + OCTEON_IRQ_WDOG0, 1, i, 0, |
1719 | &octeon_irq_chip_ciu2_wd, handle_level_irq); | 1719 | &octeon_irq_chip_ciu2_wd, handle_level_irq); |
1720 | 1720 | ||
1721 | for (i = 0; i < 4; i++) | 1721 | for (i = 0; i < 4; i++) |
1722 | octeon_irq_force_ciu_mapping(ciu_domain, i + OCTEON_IRQ_TIMER0, 3, i + 8); | 1722 | octeon_irq_force_ciu_mapping(ciu_domain, i + OCTEON_IRQ_TIMER0, 3, i + 8); |
1723 | 1723 | ||
1724 | octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_USB0, 3, 44); | 1724 | octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_USB0, 3, 44); |
1725 | 1725 | ||
1726 | for (i = 0; i < 4; i++) | 1726 | for (i = 0; i < 4; i++) |
1727 | octeon_irq_force_ciu_mapping(ciu_domain, i + OCTEON_IRQ_PCI_INT0, 4, i); | 1727 | octeon_irq_force_ciu_mapping(ciu_domain, i + OCTEON_IRQ_PCI_INT0, 4, i); |
1728 | 1728 | ||
1729 | for (i = 0; i < 4; i++) | 1729 | for (i = 0; i < 4; i++) |
1730 | octeon_irq_force_ciu_mapping(ciu_domain, i + OCTEON_IRQ_PCI_MSI0, 4, i + 8); | 1730 | octeon_irq_force_ciu_mapping(ciu_domain, i + OCTEON_IRQ_PCI_MSI0, 4, i + 8); |
1731 | 1731 | ||
1732 | irq_set_chip_and_handler(OCTEON_IRQ_MBOX0, &octeon_irq_chip_ciu2_mbox, handle_percpu_irq); | 1732 | irq_set_chip_and_handler(OCTEON_IRQ_MBOX0, &octeon_irq_chip_ciu2_mbox, handle_percpu_irq); |
1733 | irq_set_chip_and_handler(OCTEON_IRQ_MBOX1, &octeon_irq_chip_ciu2_mbox, handle_percpu_irq); | 1733 | irq_set_chip_and_handler(OCTEON_IRQ_MBOX1, &octeon_irq_chip_ciu2_mbox, handle_percpu_irq); |
1734 | irq_set_chip_and_handler(OCTEON_IRQ_MBOX2, &octeon_irq_chip_ciu2_mbox, handle_percpu_irq); | 1734 | irq_set_chip_and_handler(OCTEON_IRQ_MBOX2, &octeon_irq_chip_ciu2_mbox, handle_percpu_irq); |
1735 | irq_set_chip_and_handler(OCTEON_IRQ_MBOX3, &octeon_irq_chip_ciu2_mbox, handle_percpu_irq); | 1735 | irq_set_chip_and_handler(OCTEON_IRQ_MBOX3, &octeon_irq_chip_ciu2_mbox, handle_percpu_irq); |
1736 | 1736 | ||
1737 | /* Enable the CIU lines */ | 1737 | /* Enable the CIU lines */ |
1738 | set_c0_status(STATUSF_IP3 | STATUSF_IP2); | 1738 | set_c0_status(STATUSF_IP3 | STATUSF_IP2); |
1739 | clear_c0_status(STATUSF_IP4); | 1739 | clear_c0_status(STATUSF_IP4); |
1740 | } | 1740 | } |
1741 | 1741 | ||
1742 | void __init arch_init_irq(void) | 1742 | void __init arch_init_irq(void) |
1743 | { | 1743 | { |
1744 | #ifdef CONFIG_SMP | 1744 | #ifdef CONFIG_SMP |
1745 | /* Set the default affinity to the boot cpu. */ | 1745 | /* Set the default affinity to the boot cpu. */ |
1746 | cpumask_clear(irq_default_affinity); | 1746 | cpumask_clear(irq_default_affinity); |
1747 | cpumask_set_cpu(smp_processor_id(), irq_default_affinity); | 1747 | cpumask_set_cpu(smp_processor_id(), irq_default_affinity); |
1748 | #endif | 1748 | #endif |
1749 | if (OCTEON_IS_MODEL(OCTEON_CN68XX)) | 1749 | if (OCTEON_IS_MODEL(OCTEON_CN68XX)) |
1750 | octeon_irq_init_ciu2(); | 1750 | octeon_irq_init_ciu2(); |
1751 | else | 1751 | else |
1752 | octeon_irq_init_ciu(); | 1752 | octeon_irq_init_ciu(); |
1753 | } | 1753 | } |
1754 | 1754 | ||
1755 | asmlinkage void plat_irq_dispatch(void) | 1755 | asmlinkage void plat_irq_dispatch(void) |
1756 | { | 1756 | { |
1757 | unsigned long cop0_cause; | 1757 | unsigned long cop0_cause; |
1758 | unsigned long cop0_status; | 1758 | unsigned long cop0_status; |
1759 | 1759 | ||
1760 | while (1) { | 1760 | while (1) { |
1761 | cop0_cause = read_c0_cause(); | 1761 | cop0_cause = read_c0_cause(); |
1762 | cop0_status = read_c0_status(); | 1762 | cop0_status = read_c0_status(); |
1763 | cop0_cause &= cop0_status; | 1763 | cop0_cause &= cop0_status; |
1764 | cop0_cause &= ST0_IM; | 1764 | cop0_cause &= ST0_IM; |
1765 | 1765 | ||
1766 | if (unlikely(cop0_cause & STATUSF_IP2)) | 1766 | if (unlikely(cop0_cause & STATUSF_IP2)) |
1767 | octeon_irq_ip2(); | 1767 | octeon_irq_ip2(); |
1768 | else if (unlikely(cop0_cause & STATUSF_IP3)) | 1768 | else if (unlikely(cop0_cause & STATUSF_IP3)) |
1769 | octeon_irq_ip3(); | 1769 | octeon_irq_ip3(); |
1770 | else if (unlikely(cop0_cause & STATUSF_IP4)) | 1770 | else if (unlikely(cop0_cause & STATUSF_IP4)) |
1771 | octeon_irq_ip4(); | 1771 | octeon_irq_ip4(); |
1772 | else if (likely(cop0_cause)) | 1772 | else if (likely(cop0_cause)) |
1773 | do_IRQ(fls(cop0_cause) - 9 + MIPS_CPU_IRQ_BASE); | 1773 | do_IRQ(fls(cop0_cause) - 9 + MIPS_CPU_IRQ_BASE); |
1774 | else | 1774 | else |
1775 | break; | 1775 | break; |
1776 | } | 1776 | } |
1777 | } | 1777 | } |
1778 | 1778 | ||
1779 | #ifdef CONFIG_HOTPLUG_CPU | 1779 | #ifdef CONFIG_HOTPLUG_CPU |
1780 | 1780 | ||
1781 | void octeon_fixup_irqs(void) | 1781 | void octeon_fixup_irqs(void) |
1782 | { | 1782 | { |
1783 | irq_cpu_offline(); | 1783 | irq_cpu_offline(); |
1784 | } | 1784 | } |
1785 | 1785 | ||
1786 | #endif /* CONFIG_HOTPLUG_CPU */ | 1786 | #endif /* CONFIG_HOTPLUG_CPU */ |
1787 | 1787 |
drivers/clocksource/exynos_mct.c
1 | /* linux/arch/arm/mach-exynos4/mct.c | 1 | /* linux/arch/arm/mach-exynos4/mct.c |
2 | * | 2 | * |
3 | * Copyright (c) 2011 Samsung Electronics Co., Ltd. | 3 | * Copyright (c) 2011 Samsung Electronics Co., Ltd. |
4 | * http://www.samsung.com | 4 | * http://www.samsung.com |
5 | * | 5 | * |
6 | * EXYNOS4 MCT(Multi-Core Timer) support | 6 | * EXYNOS4 MCT(Multi-Core Timer) support |
7 | * | 7 | * |
8 | * This program is free software; you can redistribute it and/or modify | 8 | * This program is free software; you can redistribute it and/or modify |
9 | * it under the terms of the GNU General Public License version 2 as | 9 | * it under the terms of the GNU General Public License version 2 as |
10 | * published by the Free Software Foundation. | 10 | * published by the Free Software Foundation. |
11 | */ | 11 | */ |
12 | 12 | ||
13 | #include <linux/sched.h> | 13 | #include <linux/sched.h> |
14 | #include <linux/interrupt.h> | 14 | #include <linux/interrupt.h> |
15 | #include <linux/irq.h> | 15 | #include <linux/irq.h> |
16 | #include <linux/err.h> | 16 | #include <linux/err.h> |
17 | #include <linux/clk.h> | 17 | #include <linux/clk.h> |
18 | #include <linux/clockchips.h> | 18 | #include <linux/clockchips.h> |
19 | #include <linux/cpu.h> | 19 | #include <linux/cpu.h> |
20 | #include <linux/platform_device.h> | 20 | #include <linux/platform_device.h> |
21 | #include <linux/delay.h> | 21 | #include <linux/delay.h> |
22 | #include <linux/percpu.h> | 22 | #include <linux/percpu.h> |
23 | #include <linux/of.h> | 23 | #include <linux/of.h> |
24 | #include <linux/of_irq.h> | 24 | #include <linux/of_irq.h> |
25 | #include <linux/of_address.h> | 25 | #include <linux/of_address.h> |
26 | #include <linux/clocksource.h> | 26 | #include <linux/clocksource.h> |
27 | 27 | ||
28 | #define EXYNOS4_MCTREG(x) (x) | 28 | #define EXYNOS4_MCTREG(x) (x) |
29 | #define EXYNOS4_MCT_G_CNT_L EXYNOS4_MCTREG(0x100) | 29 | #define EXYNOS4_MCT_G_CNT_L EXYNOS4_MCTREG(0x100) |
30 | #define EXYNOS4_MCT_G_CNT_U EXYNOS4_MCTREG(0x104) | 30 | #define EXYNOS4_MCT_G_CNT_U EXYNOS4_MCTREG(0x104) |
31 | #define EXYNOS4_MCT_G_CNT_WSTAT EXYNOS4_MCTREG(0x110) | 31 | #define EXYNOS4_MCT_G_CNT_WSTAT EXYNOS4_MCTREG(0x110) |
32 | #define EXYNOS4_MCT_G_COMP0_L EXYNOS4_MCTREG(0x200) | 32 | #define EXYNOS4_MCT_G_COMP0_L EXYNOS4_MCTREG(0x200) |
33 | #define EXYNOS4_MCT_G_COMP0_U EXYNOS4_MCTREG(0x204) | 33 | #define EXYNOS4_MCT_G_COMP0_U EXYNOS4_MCTREG(0x204) |
34 | #define EXYNOS4_MCT_G_COMP0_ADD_INCR EXYNOS4_MCTREG(0x208) | 34 | #define EXYNOS4_MCT_G_COMP0_ADD_INCR EXYNOS4_MCTREG(0x208) |
35 | #define EXYNOS4_MCT_G_TCON EXYNOS4_MCTREG(0x240) | 35 | #define EXYNOS4_MCT_G_TCON EXYNOS4_MCTREG(0x240) |
36 | #define EXYNOS4_MCT_G_INT_CSTAT EXYNOS4_MCTREG(0x244) | 36 | #define EXYNOS4_MCT_G_INT_CSTAT EXYNOS4_MCTREG(0x244) |
37 | #define EXYNOS4_MCT_G_INT_ENB EXYNOS4_MCTREG(0x248) | 37 | #define EXYNOS4_MCT_G_INT_ENB EXYNOS4_MCTREG(0x248) |
38 | #define EXYNOS4_MCT_G_WSTAT EXYNOS4_MCTREG(0x24C) | 38 | #define EXYNOS4_MCT_G_WSTAT EXYNOS4_MCTREG(0x24C) |
39 | #define _EXYNOS4_MCT_L_BASE EXYNOS4_MCTREG(0x300) | 39 | #define _EXYNOS4_MCT_L_BASE EXYNOS4_MCTREG(0x300) |
40 | #define EXYNOS4_MCT_L_BASE(x) (_EXYNOS4_MCT_L_BASE + (0x100 * x)) | 40 | #define EXYNOS4_MCT_L_BASE(x) (_EXYNOS4_MCT_L_BASE + (0x100 * x)) |
41 | #define EXYNOS4_MCT_L_MASK (0xffffff00) | 41 | #define EXYNOS4_MCT_L_MASK (0xffffff00) |
42 | 42 | ||
43 | #define MCT_L_TCNTB_OFFSET (0x00) | 43 | #define MCT_L_TCNTB_OFFSET (0x00) |
44 | #define MCT_L_ICNTB_OFFSET (0x08) | 44 | #define MCT_L_ICNTB_OFFSET (0x08) |
45 | #define MCT_L_TCON_OFFSET (0x20) | 45 | #define MCT_L_TCON_OFFSET (0x20) |
46 | #define MCT_L_INT_CSTAT_OFFSET (0x30) | 46 | #define MCT_L_INT_CSTAT_OFFSET (0x30) |
47 | #define MCT_L_INT_ENB_OFFSET (0x34) | 47 | #define MCT_L_INT_ENB_OFFSET (0x34) |
48 | #define MCT_L_WSTAT_OFFSET (0x40) | 48 | #define MCT_L_WSTAT_OFFSET (0x40) |
49 | #define MCT_G_TCON_START (1 << 8) | 49 | #define MCT_G_TCON_START (1 << 8) |
50 | #define MCT_G_TCON_COMP0_AUTO_INC (1 << 1) | 50 | #define MCT_G_TCON_COMP0_AUTO_INC (1 << 1) |
51 | #define MCT_G_TCON_COMP0_ENABLE (1 << 0) | 51 | #define MCT_G_TCON_COMP0_ENABLE (1 << 0) |
52 | #define MCT_L_TCON_INTERVAL_MODE (1 << 2) | 52 | #define MCT_L_TCON_INTERVAL_MODE (1 << 2) |
53 | #define MCT_L_TCON_INT_START (1 << 1) | 53 | #define MCT_L_TCON_INT_START (1 << 1) |
54 | #define MCT_L_TCON_TIMER_START (1 << 0) | 54 | #define MCT_L_TCON_TIMER_START (1 << 0) |
55 | 55 | ||
56 | #define TICK_BASE_CNT 1 | 56 | #define TICK_BASE_CNT 1 |
57 | 57 | ||
58 | enum { | 58 | enum { |
59 | MCT_INT_SPI, | 59 | MCT_INT_SPI, |
60 | MCT_INT_PPI | 60 | MCT_INT_PPI |
61 | }; | 61 | }; |
62 | 62 | ||
63 | enum { | 63 | enum { |
64 | MCT_G0_IRQ, | 64 | MCT_G0_IRQ, |
65 | MCT_G1_IRQ, | 65 | MCT_G1_IRQ, |
66 | MCT_G2_IRQ, | 66 | MCT_G2_IRQ, |
67 | MCT_G3_IRQ, | 67 | MCT_G3_IRQ, |
68 | MCT_L0_IRQ, | 68 | MCT_L0_IRQ, |
69 | MCT_L1_IRQ, | 69 | MCT_L1_IRQ, |
70 | MCT_L2_IRQ, | 70 | MCT_L2_IRQ, |
71 | MCT_L3_IRQ, | 71 | MCT_L3_IRQ, |
72 | MCT_L4_IRQ, | 72 | MCT_L4_IRQ, |
73 | MCT_L5_IRQ, | 73 | MCT_L5_IRQ, |
74 | MCT_L6_IRQ, | 74 | MCT_L6_IRQ, |
75 | MCT_L7_IRQ, | 75 | MCT_L7_IRQ, |
76 | MCT_NR_IRQS, | 76 | MCT_NR_IRQS, |
77 | }; | 77 | }; |
78 | 78 | ||
79 | static void __iomem *reg_base; | 79 | static void __iomem *reg_base; |
80 | static unsigned long clk_rate; | 80 | static unsigned long clk_rate; |
81 | static unsigned int mct_int_type; | 81 | static unsigned int mct_int_type; |
82 | static int mct_irqs[MCT_NR_IRQS]; | 82 | static int mct_irqs[MCT_NR_IRQS]; |
83 | 83 | ||
84 | struct mct_clock_event_device { | 84 | struct mct_clock_event_device { |
85 | struct clock_event_device evt; | 85 | struct clock_event_device evt; |
86 | unsigned long base; | 86 | unsigned long base; |
87 | char name[10]; | 87 | char name[10]; |
88 | }; | 88 | }; |
89 | 89 | ||
90 | static void exynos4_mct_write(unsigned int value, unsigned long offset) | 90 | static void exynos4_mct_write(unsigned int value, unsigned long offset) |
91 | { | 91 | { |
92 | unsigned long stat_addr; | 92 | unsigned long stat_addr; |
93 | u32 mask; | 93 | u32 mask; |
94 | u32 i; | 94 | u32 i; |
95 | 95 | ||
96 | __raw_writel(value, reg_base + offset); | 96 | __raw_writel(value, reg_base + offset); |
97 | 97 | ||
98 | if (likely(offset >= EXYNOS4_MCT_L_BASE(0))) { | 98 | if (likely(offset >= EXYNOS4_MCT_L_BASE(0))) { |
99 | stat_addr = (offset & ~EXYNOS4_MCT_L_MASK) + MCT_L_WSTAT_OFFSET; | 99 | stat_addr = (offset & ~EXYNOS4_MCT_L_MASK) + MCT_L_WSTAT_OFFSET; |
100 | switch (offset & EXYNOS4_MCT_L_MASK) { | 100 | switch (offset & EXYNOS4_MCT_L_MASK) { |
101 | case MCT_L_TCON_OFFSET: | 101 | case MCT_L_TCON_OFFSET: |
102 | mask = 1 << 3; /* L_TCON write status */ | 102 | mask = 1 << 3; /* L_TCON write status */ |
103 | break; | 103 | break; |
104 | case MCT_L_ICNTB_OFFSET: | 104 | case MCT_L_ICNTB_OFFSET: |
105 | mask = 1 << 1; /* L_ICNTB write status */ | 105 | mask = 1 << 1; /* L_ICNTB write status */ |
106 | break; | 106 | break; |
107 | case MCT_L_TCNTB_OFFSET: | 107 | case MCT_L_TCNTB_OFFSET: |
108 | mask = 1 << 0; /* L_TCNTB write status */ | 108 | mask = 1 << 0; /* L_TCNTB write status */ |
109 | break; | 109 | break; |
110 | default: | 110 | default: |
111 | return; | 111 | return; |
112 | } | 112 | } |
113 | } else { | 113 | } else { |
114 | switch (offset) { | 114 | switch (offset) { |
115 | case EXYNOS4_MCT_G_TCON: | 115 | case EXYNOS4_MCT_G_TCON: |
116 | stat_addr = EXYNOS4_MCT_G_WSTAT; | 116 | stat_addr = EXYNOS4_MCT_G_WSTAT; |
117 | mask = 1 << 16; /* G_TCON write status */ | 117 | mask = 1 << 16; /* G_TCON write status */ |
118 | break; | 118 | break; |
119 | case EXYNOS4_MCT_G_COMP0_L: | 119 | case EXYNOS4_MCT_G_COMP0_L: |
120 | stat_addr = EXYNOS4_MCT_G_WSTAT; | 120 | stat_addr = EXYNOS4_MCT_G_WSTAT; |
121 | mask = 1 << 0; /* G_COMP0_L write status */ | 121 | mask = 1 << 0; /* G_COMP0_L write status */ |
122 | break; | 122 | break; |
123 | case EXYNOS4_MCT_G_COMP0_U: | 123 | case EXYNOS4_MCT_G_COMP0_U: |
124 | stat_addr = EXYNOS4_MCT_G_WSTAT; | 124 | stat_addr = EXYNOS4_MCT_G_WSTAT; |
125 | mask = 1 << 1; /* G_COMP0_U write status */ | 125 | mask = 1 << 1; /* G_COMP0_U write status */ |
126 | break; | 126 | break; |
127 | case EXYNOS4_MCT_G_COMP0_ADD_INCR: | 127 | case EXYNOS4_MCT_G_COMP0_ADD_INCR: |
128 | stat_addr = EXYNOS4_MCT_G_WSTAT; | 128 | stat_addr = EXYNOS4_MCT_G_WSTAT; |
129 | mask = 1 << 2; /* G_COMP0_ADD_INCR w status */ | 129 | mask = 1 << 2; /* G_COMP0_ADD_INCR w status */ |
130 | break; | 130 | break; |
131 | case EXYNOS4_MCT_G_CNT_L: | 131 | case EXYNOS4_MCT_G_CNT_L: |
132 | stat_addr = EXYNOS4_MCT_G_CNT_WSTAT; | 132 | stat_addr = EXYNOS4_MCT_G_CNT_WSTAT; |
133 | mask = 1 << 0; /* G_CNT_L write status */ | 133 | mask = 1 << 0; /* G_CNT_L write status */ |
134 | break; | 134 | break; |
135 | case EXYNOS4_MCT_G_CNT_U: | 135 | case EXYNOS4_MCT_G_CNT_U: |
136 | stat_addr = EXYNOS4_MCT_G_CNT_WSTAT; | 136 | stat_addr = EXYNOS4_MCT_G_CNT_WSTAT; |
137 | mask = 1 << 1; /* G_CNT_U write status */ | 137 | mask = 1 << 1; /* G_CNT_U write status */ |
138 | break; | 138 | break; |
139 | default: | 139 | default: |
140 | return; | 140 | return; |
141 | } | 141 | } |
142 | } | 142 | } |
143 | 143 | ||
144 | /* Wait maximum 1 ms until written values are applied */ | 144 | /* Wait maximum 1 ms until written values are applied */ |
145 | for (i = 0; i < loops_per_jiffy / 1000 * HZ; i++) | 145 | for (i = 0; i < loops_per_jiffy / 1000 * HZ; i++) |
146 | if (__raw_readl(reg_base + stat_addr) & mask) { | 146 | if (__raw_readl(reg_base + stat_addr) & mask) { |
147 | __raw_writel(mask, reg_base + stat_addr); | 147 | __raw_writel(mask, reg_base + stat_addr); |
148 | return; | 148 | return; |
149 | } | 149 | } |
150 | 150 | ||
151 | panic("MCT hangs after writing %d (offset:0x%lx)\n", value, offset); | 151 | panic("MCT hangs after writing %d (offset:0x%lx)\n", value, offset); |
152 | } | 152 | } |
153 | 153 | ||
154 | /* Clocksource handling */ | 154 | /* Clocksource handling */ |
155 | static void exynos4_mct_frc_start(u32 hi, u32 lo) | 155 | static void exynos4_mct_frc_start(u32 hi, u32 lo) |
156 | { | 156 | { |
157 | u32 reg; | 157 | u32 reg; |
158 | 158 | ||
159 | exynos4_mct_write(lo, EXYNOS4_MCT_G_CNT_L); | 159 | exynos4_mct_write(lo, EXYNOS4_MCT_G_CNT_L); |
160 | exynos4_mct_write(hi, EXYNOS4_MCT_G_CNT_U); | 160 | exynos4_mct_write(hi, EXYNOS4_MCT_G_CNT_U); |
161 | 161 | ||
162 | reg = __raw_readl(reg_base + EXYNOS4_MCT_G_TCON); | 162 | reg = __raw_readl(reg_base + EXYNOS4_MCT_G_TCON); |
163 | reg |= MCT_G_TCON_START; | 163 | reg |= MCT_G_TCON_START; |
164 | exynos4_mct_write(reg, EXYNOS4_MCT_G_TCON); | 164 | exynos4_mct_write(reg, EXYNOS4_MCT_G_TCON); |
165 | } | 165 | } |
166 | 166 | ||
167 | static cycle_t exynos4_frc_read(struct clocksource *cs) | 167 | static cycle_t exynos4_frc_read(struct clocksource *cs) |
168 | { | 168 | { |
169 | unsigned int lo, hi; | 169 | unsigned int lo, hi; |
170 | u32 hi2 = __raw_readl(reg_base + EXYNOS4_MCT_G_CNT_U); | 170 | u32 hi2 = __raw_readl(reg_base + EXYNOS4_MCT_G_CNT_U); |
171 | 171 | ||
172 | do { | 172 | do { |
173 | hi = hi2; | 173 | hi = hi2; |
174 | lo = __raw_readl(reg_base + EXYNOS4_MCT_G_CNT_L); | 174 | lo = __raw_readl(reg_base + EXYNOS4_MCT_G_CNT_L); |
175 | hi2 = __raw_readl(reg_base + EXYNOS4_MCT_G_CNT_U); | 175 | hi2 = __raw_readl(reg_base + EXYNOS4_MCT_G_CNT_U); |
176 | } while (hi != hi2); | 176 | } while (hi != hi2); |
177 | 177 | ||
178 | return ((cycle_t)hi << 32) | lo; | 178 | return ((cycle_t)hi << 32) | lo; |
179 | } | 179 | } |
180 | 180 | ||
181 | static void exynos4_frc_resume(struct clocksource *cs) | 181 | static void exynos4_frc_resume(struct clocksource *cs) |
182 | { | 182 | { |
183 | exynos4_mct_frc_start(0, 0); | 183 | exynos4_mct_frc_start(0, 0); |
184 | } | 184 | } |
185 | 185 | ||
186 | struct clocksource mct_frc = { | 186 | struct clocksource mct_frc = { |
187 | .name = "mct-frc", | 187 | .name = "mct-frc", |
188 | .rating = 400, | 188 | .rating = 400, |
189 | .read = exynos4_frc_read, | 189 | .read = exynos4_frc_read, |
190 | .mask = CLOCKSOURCE_MASK(64), | 190 | .mask = CLOCKSOURCE_MASK(64), |
191 | .flags = CLOCK_SOURCE_IS_CONTINUOUS, | 191 | .flags = CLOCK_SOURCE_IS_CONTINUOUS, |
192 | .resume = exynos4_frc_resume, | 192 | .resume = exynos4_frc_resume, |
193 | }; | 193 | }; |
194 | 194 | ||
195 | static void __init exynos4_clocksource_init(void) | 195 | static void __init exynos4_clocksource_init(void) |
196 | { | 196 | { |
197 | exynos4_mct_frc_start(0, 0); | 197 | exynos4_mct_frc_start(0, 0); |
198 | 198 | ||
199 | if (clocksource_register_hz(&mct_frc, clk_rate)) | 199 | if (clocksource_register_hz(&mct_frc, clk_rate)) |
200 | panic("%s: can't register clocksource\n", mct_frc.name); | 200 | panic("%s: can't register clocksource\n", mct_frc.name); |
201 | } | 201 | } |
202 | 202 | ||
203 | static void exynos4_mct_comp0_stop(void) | 203 | static void exynos4_mct_comp0_stop(void) |
204 | { | 204 | { |
205 | unsigned int tcon; | 205 | unsigned int tcon; |
206 | 206 | ||
207 | tcon = __raw_readl(reg_base + EXYNOS4_MCT_G_TCON); | 207 | tcon = __raw_readl(reg_base + EXYNOS4_MCT_G_TCON); |
208 | tcon &= ~(MCT_G_TCON_COMP0_ENABLE | MCT_G_TCON_COMP0_AUTO_INC); | 208 | tcon &= ~(MCT_G_TCON_COMP0_ENABLE | MCT_G_TCON_COMP0_AUTO_INC); |
209 | 209 | ||
210 | exynos4_mct_write(tcon, EXYNOS4_MCT_G_TCON); | 210 | exynos4_mct_write(tcon, EXYNOS4_MCT_G_TCON); |
211 | exynos4_mct_write(0, EXYNOS4_MCT_G_INT_ENB); | 211 | exynos4_mct_write(0, EXYNOS4_MCT_G_INT_ENB); |
212 | } | 212 | } |
213 | 213 | ||
214 | static void exynos4_mct_comp0_start(enum clock_event_mode mode, | 214 | static void exynos4_mct_comp0_start(enum clock_event_mode mode, |
215 | unsigned long cycles) | 215 | unsigned long cycles) |
216 | { | 216 | { |
217 | unsigned int tcon; | 217 | unsigned int tcon; |
218 | cycle_t comp_cycle; | 218 | cycle_t comp_cycle; |
219 | 219 | ||
220 | tcon = __raw_readl(reg_base + EXYNOS4_MCT_G_TCON); | 220 | tcon = __raw_readl(reg_base + EXYNOS4_MCT_G_TCON); |
221 | 221 | ||
222 | if (mode == CLOCK_EVT_MODE_PERIODIC) { | 222 | if (mode == CLOCK_EVT_MODE_PERIODIC) { |
223 | tcon |= MCT_G_TCON_COMP0_AUTO_INC; | 223 | tcon |= MCT_G_TCON_COMP0_AUTO_INC; |
224 | exynos4_mct_write(cycles, EXYNOS4_MCT_G_COMP0_ADD_INCR); | 224 | exynos4_mct_write(cycles, EXYNOS4_MCT_G_COMP0_ADD_INCR); |
225 | } | 225 | } |
226 | 226 | ||
227 | comp_cycle = exynos4_frc_read(&mct_frc) + cycles; | 227 | comp_cycle = exynos4_frc_read(&mct_frc) + cycles; |
228 | exynos4_mct_write((u32)comp_cycle, EXYNOS4_MCT_G_COMP0_L); | 228 | exynos4_mct_write((u32)comp_cycle, EXYNOS4_MCT_G_COMP0_L); |
229 | exynos4_mct_write((u32)(comp_cycle >> 32), EXYNOS4_MCT_G_COMP0_U); | 229 | exynos4_mct_write((u32)(comp_cycle >> 32), EXYNOS4_MCT_G_COMP0_U); |
230 | 230 | ||
231 | exynos4_mct_write(0x1, EXYNOS4_MCT_G_INT_ENB); | 231 | exynos4_mct_write(0x1, EXYNOS4_MCT_G_INT_ENB); |
232 | 232 | ||
233 | tcon |= MCT_G_TCON_COMP0_ENABLE; | 233 | tcon |= MCT_G_TCON_COMP0_ENABLE; |
234 | exynos4_mct_write(tcon , EXYNOS4_MCT_G_TCON); | 234 | exynos4_mct_write(tcon , EXYNOS4_MCT_G_TCON); |
235 | } | 235 | } |
236 | 236 | ||
237 | static int exynos4_comp_set_next_event(unsigned long cycles, | 237 | static int exynos4_comp_set_next_event(unsigned long cycles, |
238 | struct clock_event_device *evt) | 238 | struct clock_event_device *evt) |
239 | { | 239 | { |
240 | exynos4_mct_comp0_start(evt->mode, cycles); | 240 | exynos4_mct_comp0_start(evt->mode, cycles); |
241 | 241 | ||
242 | return 0; | 242 | return 0; |
243 | } | 243 | } |
244 | 244 | ||
245 | static void exynos4_comp_set_mode(enum clock_event_mode mode, | 245 | static void exynos4_comp_set_mode(enum clock_event_mode mode, |
246 | struct clock_event_device *evt) | 246 | struct clock_event_device *evt) |
247 | { | 247 | { |
248 | unsigned long cycles_per_jiffy; | 248 | unsigned long cycles_per_jiffy; |
249 | exynos4_mct_comp0_stop(); | 249 | exynos4_mct_comp0_stop(); |
250 | 250 | ||
251 | switch (mode) { | 251 | switch (mode) { |
252 | case CLOCK_EVT_MODE_PERIODIC: | 252 | case CLOCK_EVT_MODE_PERIODIC: |
253 | cycles_per_jiffy = | 253 | cycles_per_jiffy = |
254 | (((unsigned long long) NSEC_PER_SEC / HZ * evt->mult) >> evt->shift); | 254 | (((unsigned long long) NSEC_PER_SEC / HZ * evt->mult) >> evt->shift); |
255 | exynos4_mct_comp0_start(mode, cycles_per_jiffy); | 255 | exynos4_mct_comp0_start(mode, cycles_per_jiffy); |
256 | break; | 256 | break; |
257 | 257 | ||
258 | case CLOCK_EVT_MODE_ONESHOT: | 258 | case CLOCK_EVT_MODE_ONESHOT: |
259 | case CLOCK_EVT_MODE_UNUSED: | 259 | case CLOCK_EVT_MODE_UNUSED: |
260 | case CLOCK_EVT_MODE_SHUTDOWN: | 260 | case CLOCK_EVT_MODE_SHUTDOWN: |
261 | case CLOCK_EVT_MODE_RESUME: | 261 | case CLOCK_EVT_MODE_RESUME: |
262 | break; | 262 | break; |
263 | } | 263 | } |
264 | } | 264 | } |
265 | 265 | ||
266 | static struct clock_event_device mct_comp_device = { | 266 | static struct clock_event_device mct_comp_device = { |
267 | .name = "mct-comp", | 267 | .name = "mct-comp", |
268 | .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT, | 268 | .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT, |
269 | .rating = 250, | 269 | .rating = 250, |
270 | .set_next_event = exynos4_comp_set_next_event, | 270 | .set_next_event = exynos4_comp_set_next_event, |
271 | .set_mode = exynos4_comp_set_mode, | 271 | .set_mode = exynos4_comp_set_mode, |
272 | }; | 272 | }; |
273 | 273 | ||
274 | static irqreturn_t exynos4_mct_comp_isr(int irq, void *dev_id) | 274 | static irqreturn_t exynos4_mct_comp_isr(int irq, void *dev_id) |
275 | { | 275 | { |
276 | struct clock_event_device *evt = dev_id; | 276 | struct clock_event_device *evt = dev_id; |
277 | 277 | ||
278 | exynos4_mct_write(0x1, EXYNOS4_MCT_G_INT_CSTAT); | 278 | exynos4_mct_write(0x1, EXYNOS4_MCT_G_INT_CSTAT); |
279 | 279 | ||
280 | evt->event_handler(evt); | 280 | evt->event_handler(evt); |
281 | 281 | ||
282 | return IRQ_HANDLED; | 282 | return IRQ_HANDLED; |
283 | } | 283 | } |
284 | 284 | ||
285 | static struct irqaction mct_comp_event_irq = { | 285 | static struct irqaction mct_comp_event_irq = { |
286 | .name = "mct_comp_irq", | 286 | .name = "mct_comp_irq", |
287 | .flags = IRQF_TIMER | IRQF_IRQPOLL, | 287 | .flags = IRQF_TIMER | IRQF_IRQPOLL, |
288 | .handler = exynos4_mct_comp_isr, | 288 | .handler = exynos4_mct_comp_isr, |
289 | .dev_id = &mct_comp_device, | 289 | .dev_id = &mct_comp_device, |
290 | }; | 290 | }; |
291 | 291 | ||
292 | static void exynos4_clockevent_init(void) | 292 | static void exynos4_clockevent_init(void) |
293 | { | 293 | { |
294 | mct_comp_device.cpumask = cpumask_of(0); | 294 | mct_comp_device.cpumask = cpumask_of(0); |
295 | clockevents_config_and_register(&mct_comp_device, clk_rate, | 295 | clockevents_config_and_register(&mct_comp_device, clk_rate, |
296 | 0xf, 0xffffffff); | 296 | 0xf, 0xffffffff); |
297 | setup_irq(mct_irqs[MCT_G0_IRQ], &mct_comp_event_irq); | 297 | setup_irq(mct_irqs[MCT_G0_IRQ], &mct_comp_event_irq); |
298 | } | 298 | } |
299 | 299 | ||
300 | static DEFINE_PER_CPU(struct mct_clock_event_device, percpu_mct_tick); | 300 | static DEFINE_PER_CPU(struct mct_clock_event_device, percpu_mct_tick); |
301 | 301 | ||
302 | /* Clock event handling */ | 302 | /* Clock event handling */ |
303 | static void exynos4_mct_tick_stop(struct mct_clock_event_device *mevt) | 303 | static void exynos4_mct_tick_stop(struct mct_clock_event_device *mevt) |
304 | { | 304 | { |
305 | unsigned long tmp; | 305 | unsigned long tmp; |
306 | unsigned long mask = MCT_L_TCON_INT_START | MCT_L_TCON_TIMER_START; | 306 | unsigned long mask = MCT_L_TCON_INT_START | MCT_L_TCON_TIMER_START; |
307 | unsigned long offset = mevt->base + MCT_L_TCON_OFFSET; | 307 | unsigned long offset = mevt->base + MCT_L_TCON_OFFSET; |
308 | 308 | ||
309 | tmp = __raw_readl(reg_base + offset); | 309 | tmp = __raw_readl(reg_base + offset); |
310 | if (tmp & mask) { | 310 | if (tmp & mask) { |
311 | tmp &= ~mask; | 311 | tmp &= ~mask; |
312 | exynos4_mct_write(tmp, offset); | 312 | exynos4_mct_write(tmp, offset); |
313 | } | 313 | } |
314 | } | 314 | } |
315 | 315 | ||
316 | static void exynos4_mct_tick_start(unsigned long cycles, | 316 | static void exynos4_mct_tick_start(unsigned long cycles, |
317 | struct mct_clock_event_device *mevt) | 317 | struct mct_clock_event_device *mevt) |
318 | { | 318 | { |
319 | unsigned long tmp; | 319 | unsigned long tmp; |
320 | 320 | ||
321 | exynos4_mct_tick_stop(mevt); | 321 | exynos4_mct_tick_stop(mevt); |
322 | 322 | ||
323 | tmp = (1 << 31) | cycles; /* MCT_L_UPDATE_ICNTB */ | 323 | tmp = (1 << 31) | cycles; /* MCT_L_UPDATE_ICNTB */ |
324 | 324 | ||
325 | /* update interrupt count buffer */ | 325 | /* update interrupt count buffer */ |
326 | exynos4_mct_write(tmp, mevt->base + MCT_L_ICNTB_OFFSET); | 326 | exynos4_mct_write(tmp, mevt->base + MCT_L_ICNTB_OFFSET); |
327 | 327 | ||
328 | /* enable MCT tick interrupt */ | 328 | /* enable MCT tick interrupt */ |
329 | exynos4_mct_write(0x1, mevt->base + MCT_L_INT_ENB_OFFSET); | 329 | exynos4_mct_write(0x1, mevt->base + MCT_L_INT_ENB_OFFSET); |
330 | 330 | ||
331 | tmp = __raw_readl(reg_base + mevt->base + MCT_L_TCON_OFFSET); | 331 | tmp = __raw_readl(reg_base + mevt->base + MCT_L_TCON_OFFSET); |
332 | tmp |= MCT_L_TCON_INT_START | MCT_L_TCON_TIMER_START | | 332 | tmp |= MCT_L_TCON_INT_START | MCT_L_TCON_TIMER_START | |
333 | MCT_L_TCON_INTERVAL_MODE; | 333 | MCT_L_TCON_INTERVAL_MODE; |
334 | exynos4_mct_write(tmp, mevt->base + MCT_L_TCON_OFFSET); | 334 | exynos4_mct_write(tmp, mevt->base + MCT_L_TCON_OFFSET); |
335 | } | 335 | } |
336 | 336 | ||
337 | static int exynos4_tick_set_next_event(unsigned long cycles, | 337 | static int exynos4_tick_set_next_event(unsigned long cycles, |
338 | struct clock_event_device *evt) | 338 | struct clock_event_device *evt) |
339 | { | 339 | { |
340 | struct mct_clock_event_device *mevt = this_cpu_ptr(&percpu_mct_tick); | 340 | struct mct_clock_event_device *mevt = this_cpu_ptr(&percpu_mct_tick); |
341 | 341 | ||
342 | exynos4_mct_tick_start(cycles, mevt); | 342 | exynos4_mct_tick_start(cycles, mevt); |
343 | 343 | ||
344 | return 0; | 344 | return 0; |
345 | } | 345 | } |
346 | 346 | ||
347 | static inline void exynos4_tick_set_mode(enum clock_event_mode mode, | 347 | static inline void exynos4_tick_set_mode(enum clock_event_mode mode, |
348 | struct clock_event_device *evt) | 348 | struct clock_event_device *evt) |
349 | { | 349 | { |
350 | struct mct_clock_event_device *mevt = this_cpu_ptr(&percpu_mct_tick); | 350 | struct mct_clock_event_device *mevt = this_cpu_ptr(&percpu_mct_tick); |
351 | unsigned long cycles_per_jiffy; | 351 | unsigned long cycles_per_jiffy; |
352 | 352 | ||
353 | exynos4_mct_tick_stop(mevt); | 353 | exynos4_mct_tick_stop(mevt); |
354 | 354 | ||
355 | switch (mode) { | 355 | switch (mode) { |
356 | case CLOCK_EVT_MODE_PERIODIC: | 356 | case CLOCK_EVT_MODE_PERIODIC: |
357 | cycles_per_jiffy = | 357 | cycles_per_jiffy = |
358 | (((unsigned long long) NSEC_PER_SEC / HZ * evt->mult) >> evt->shift); | 358 | (((unsigned long long) NSEC_PER_SEC / HZ * evt->mult) >> evt->shift); |
359 | exynos4_mct_tick_start(cycles_per_jiffy, mevt); | 359 | exynos4_mct_tick_start(cycles_per_jiffy, mevt); |
360 | break; | 360 | break; |
361 | 361 | ||
362 | case CLOCK_EVT_MODE_ONESHOT: | 362 | case CLOCK_EVT_MODE_ONESHOT: |
363 | case CLOCK_EVT_MODE_UNUSED: | 363 | case CLOCK_EVT_MODE_UNUSED: |
364 | case CLOCK_EVT_MODE_SHUTDOWN: | 364 | case CLOCK_EVT_MODE_SHUTDOWN: |
365 | case CLOCK_EVT_MODE_RESUME: | 365 | case CLOCK_EVT_MODE_RESUME: |
366 | break; | 366 | break; |
367 | } | 367 | } |
368 | } | 368 | } |
369 | 369 | ||
370 | static int exynos4_mct_tick_clear(struct mct_clock_event_device *mevt) | 370 | static int exynos4_mct_tick_clear(struct mct_clock_event_device *mevt) |
371 | { | 371 | { |
372 | struct clock_event_device *evt = &mevt->evt; | 372 | struct clock_event_device *evt = &mevt->evt; |
373 | 373 | ||
374 | /* | 374 | /* |
375 | * This is for supporting oneshot mode. | 375 | * This is for supporting oneshot mode. |
376 | * Mct would generate interrupt periodically | 376 | * Mct would generate interrupt periodically |
377 | * without explicit stopping. | 377 | * without explicit stopping. |
378 | */ | 378 | */ |
379 | if (evt->mode != CLOCK_EVT_MODE_PERIODIC) | 379 | if (evt->mode != CLOCK_EVT_MODE_PERIODIC) |
380 | exynos4_mct_tick_stop(mevt); | 380 | exynos4_mct_tick_stop(mevt); |
381 | 381 | ||
382 | /* Clear the MCT tick interrupt */ | 382 | /* Clear the MCT tick interrupt */ |
383 | if (__raw_readl(reg_base + mevt->base + MCT_L_INT_CSTAT_OFFSET) & 1) { | 383 | if (__raw_readl(reg_base + mevt->base + MCT_L_INT_CSTAT_OFFSET) & 1) { |
384 | exynos4_mct_write(0x1, mevt->base + MCT_L_INT_CSTAT_OFFSET); | 384 | exynos4_mct_write(0x1, mevt->base + MCT_L_INT_CSTAT_OFFSET); |
385 | return 1; | 385 | return 1; |
386 | } else { | 386 | } else { |
387 | return 0; | 387 | return 0; |
388 | } | 388 | } |
389 | } | 389 | } |
390 | 390 | ||
391 | static irqreturn_t exynos4_mct_tick_isr(int irq, void *dev_id) | 391 | static irqreturn_t exynos4_mct_tick_isr(int irq, void *dev_id) |
392 | { | 392 | { |
393 | struct mct_clock_event_device *mevt = dev_id; | 393 | struct mct_clock_event_device *mevt = dev_id; |
394 | struct clock_event_device *evt = &mevt->evt; | 394 | struct clock_event_device *evt = &mevt->evt; |
395 | 395 | ||
396 | exynos4_mct_tick_clear(mevt); | 396 | exynos4_mct_tick_clear(mevt); |
397 | 397 | ||
398 | evt->event_handler(evt); | 398 | evt->event_handler(evt); |
399 | 399 | ||
400 | return IRQ_HANDLED; | 400 | return IRQ_HANDLED; |
401 | } | 401 | } |
402 | 402 | ||
403 | static int exynos4_local_timer_setup(struct clock_event_device *evt) | 403 | static int exynos4_local_timer_setup(struct clock_event_device *evt) |
404 | { | 404 | { |
405 | struct mct_clock_event_device *mevt; | 405 | struct mct_clock_event_device *mevt; |
406 | unsigned int cpu = smp_processor_id(); | 406 | unsigned int cpu = smp_processor_id(); |
407 | 407 | ||
408 | mevt = container_of(evt, struct mct_clock_event_device, evt); | 408 | mevt = container_of(evt, struct mct_clock_event_device, evt); |
409 | 409 | ||
410 | mevt->base = EXYNOS4_MCT_L_BASE(cpu); | 410 | mevt->base = EXYNOS4_MCT_L_BASE(cpu); |
411 | snprintf(mevt->name, sizeof(mevt->name), "mct_tick%d", cpu); | 411 | snprintf(mevt->name, sizeof(mevt->name), "mct_tick%d", cpu); |
412 | 412 | ||
413 | evt->name = mevt->name; | 413 | evt->name = mevt->name; |
414 | evt->cpumask = cpumask_of(cpu); | 414 | evt->cpumask = cpumask_of(cpu); |
415 | evt->set_next_event = exynos4_tick_set_next_event; | 415 | evt->set_next_event = exynos4_tick_set_next_event; |
416 | evt->set_mode = exynos4_tick_set_mode; | 416 | evt->set_mode = exynos4_tick_set_mode; |
417 | evt->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT; | 417 | evt->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT; |
418 | evt->rating = 450; | 418 | evt->rating = 450; |
419 | clockevents_config_and_register(evt, clk_rate / (TICK_BASE_CNT + 1), | ||
420 | 0xf, 0x7fffffff); | ||
421 | 419 | ||
422 | exynos4_mct_write(TICK_BASE_CNT, mevt->base + MCT_L_TCNTB_OFFSET); | 420 | exynos4_mct_write(TICK_BASE_CNT, mevt->base + MCT_L_TCNTB_OFFSET); |
423 | 421 | ||
424 | if (mct_int_type == MCT_INT_SPI) { | 422 | if (mct_int_type == MCT_INT_SPI) { |
425 | evt->irq = mct_irqs[MCT_L0_IRQ + cpu]; | 423 | evt->irq = mct_irqs[MCT_L0_IRQ + cpu]; |
426 | if (request_irq(evt->irq, exynos4_mct_tick_isr, | 424 | if (request_irq(evt->irq, exynos4_mct_tick_isr, |
427 | IRQF_TIMER | IRQF_NOBALANCING, | 425 | IRQF_TIMER | IRQF_NOBALANCING, |
428 | evt->name, mevt)) { | 426 | evt->name, mevt)) { |
429 | pr_err("exynos-mct: cannot register IRQ %d\n", | 427 | pr_err("exynos-mct: cannot register IRQ %d\n", |
430 | evt->irq); | 428 | evt->irq); |
431 | return -EIO; | 429 | return -EIO; |
432 | } | 430 | } |
431 | irq_force_affinity(mct_irqs[MCT_L0_IRQ + cpu], cpumask_of(cpu)); | ||
433 | } else { | 432 | } else { |
434 | enable_percpu_irq(mct_irqs[MCT_L0_IRQ], 0); | 433 | enable_percpu_irq(mct_irqs[MCT_L0_IRQ], 0); |
435 | } | 434 | } |
435 | clockevents_config_and_register(evt, clk_rate / (TICK_BASE_CNT + 1), | ||
436 | 0xf, 0x7fffffff); | ||
436 | 437 | ||
437 | return 0; | 438 | return 0; |
438 | } | 439 | } |
439 | 440 | ||
440 | static void exynos4_local_timer_stop(struct clock_event_device *evt) | 441 | static void exynos4_local_timer_stop(struct clock_event_device *evt) |
441 | { | 442 | { |
442 | evt->set_mode(CLOCK_EVT_MODE_UNUSED, evt); | 443 | evt->set_mode(CLOCK_EVT_MODE_UNUSED, evt); |
443 | if (mct_int_type == MCT_INT_SPI) | 444 | if (mct_int_type == MCT_INT_SPI) |
444 | free_irq(evt->irq, this_cpu_ptr(&percpu_mct_tick)); | 445 | free_irq(evt->irq, this_cpu_ptr(&percpu_mct_tick)); |
445 | else | 446 | else |
446 | disable_percpu_irq(mct_irqs[MCT_L0_IRQ]); | 447 | disable_percpu_irq(mct_irqs[MCT_L0_IRQ]); |
447 | } | 448 | } |
448 | 449 | ||
449 | static int exynos4_mct_cpu_notify(struct notifier_block *self, | 450 | static int exynos4_mct_cpu_notify(struct notifier_block *self, |
450 | unsigned long action, void *hcpu) | 451 | unsigned long action, void *hcpu) |
451 | { | 452 | { |
452 | struct mct_clock_event_device *mevt; | 453 | struct mct_clock_event_device *mevt; |
453 | unsigned int cpu; | ||
454 | 454 | ||
455 | /* | 455 | /* |
456 | * Grab cpu pointer in each case to avoid spurious | 456 | * Grab cpu pointer in each case to avoid spurious |
457 | * preemptible warnings | 457 | * preemptible warnings |
458 | */ | 458 | */ |
459 | switch (action & ~CPU_TASKS_FROZEN) { | 459 | switch (action & ~CPU_TASKS_FROZEN) { |
460 | case CPU_STARTING: | 460 | case CPU_STARTING: |
461 | mevt = this_cpu_ptr(&percpu_mct_tick); | 461 | mevt = this_cpu_ptr(&percpu_mct_tick); |
462 | exynos4_local_timer_setup(&mevt->evt); | 462 | exynos4_local_timer_setup(&mevt->evt); |
463 | break; | ||
464 | case CPU_ONLINE: | ||
465 | cpu = (unsigned long)hcpu; | ||
466 | if (mct_int_type == MCT_INT_SPI) | ||
467 | irq_set_affinity(mct_irqs[MCT_L0_IRQ + cpu], | ||
468 | cpumask_of(cpu)); | ||
469 | break; | 463 | break; |
470 | case CPU_DYING: | 464 | case CPU_DYING: |
471 | mevt = this_cpu_ptr(&percpu_mct_tick); | 465 | mevt = this_cpu_ptr(&percpu_mct_tick); |
472 | exynos4_local_timer_stop(&mevt->evt); | 466 | exynos4_local_timer_stop(&mevt->evt); |
473 | break; | 467 | break; |
474 | } | 468 | } |
475 | 469 | ||
476 | return NOTIFY_OK; | 470 | return NOTIFY_OK; |
477 | } | 471 | } |
478 | 472 | ||
479 | static struct notifier_block exynos4_mct_cpu_nb = { | 473 | static struct notifier_block exynos4_mct_cpu_nb = { |
480 | .notifier_call = exynos4_mct_cpu_notify, | 474 | .notifier_call = exynos4_mct_cpu_notify, |
481 | }; | 475 | }; |
482 | 476 | ||
483 | static void __init exynos4_timer_resources(struct device_node *np, void __iomem *base) | 477 | static void __init exynos4_timer_resources(struct device_node *np, void __iomem *base) |
484 | { | 478 | { |
485 | int err; | 479 | int err; |
486 | struct mct_clock_event_device *mevt = this_cpu_ptr(&percpu_mct_tick); | 480 | struct mct_clock_event_device *mevt = this_cpu_ptr(&percpu_mct_tick); |
487 | struct clk *mct_clk, *tick_clk; | 481 | struct clk *mct_clk, *tick_clk; |
488 | 482 | ||
489 | tick_clk = np ? of_clk_get_by_name(np, "fin_pll") : | 483 | tick_clk = np ? of_clk_get_by_name(np, "fin_pll") : |
490 | clk_get(NULL, "fin_pll"); | 484 | clk_get(NULL, "fin_pll"); |
491 | if (IS_ERR(tick_clk)) | 485 | if (IS_ERR(tick_clk)) |
492 | panic("%s: unable to determine tick clock rate\n", __func__); | 486 | panic("%s: unable to determine tick clock rate\n", __func__); |
493 | clk_rate = clk_get_rate(tick_clk); | 487 | clk_rate = clk_get_rate(tick_clk); |
494 | 488 | ||
495 | mct_clk = np ? of_clk_get_by_name(np, "mct") : clk_get(NULL, "mct"); | 489 | mct_clk = np ? of_clk_get_by_name(np, "mct") : clk_get(NULL, "mct"); |
496 | if (IS_ERR(mct_clk)) | 490 | if (IS_ERR(mct_clk)) |
497 | panic("%s: unable to retrieve mct clock instance\n", __func__); | 491 | panic("%s: unable to retrieve mct clock instance\n", __func__); |
498 | clk_prepare_enable(mct_clk); | 492 | clk_prepare_enable(mct_clk); |
499 | 493 | ||
500 | reg_base = base; | 494 | reg_base = base; |
501 | if (!reg_base) | 495 | if (!reg_base) |
502 | panic("%s: unable to ioremap mct address space\n", __func__); | 496 | panic("%s: unable to ioremap mct address space\n", __func__); |
503 | 497 | ||
504 | if (mct_int_type == MCT_INT_PPI) { | 498 | if (mct_int_type == MCT_INT_PPI) { |
505 | 499 | ||
506 | err = request_percpu_irq(mct_irqs[MCT_L0_IRQ], | 500 | err = request_percpu_irq(mct_irqs[MCT_L0_IRQ], |
507 | exynos4_mct_tick_isr, "MCT", | 501 | exynos4_mct_tick_isr, "MCT", |
508 | &percpu_mct_tick); | 502 | &percpu_mct_tick); |
509 | WARN(err, "MCT: can't request IRQ %d (%d)\n", | 503 | WARN(err, "MCT: can't request IRQ %d (%d)\n", |
510 | mct_irqs[MCT_L0_IRQ], err); | 504 | mct_irqs[MCT_L0_IRQ], err); |
511 | } else { | 505 | } else { |
512 | irq_set_affinity(mct_irqs[MCT_L0_IRQ], cpumask_of(0)); | 506 | irq_set_affinity(mct_irqs[MCT_L0_IRQ], cpumask_of(0)); |
513 | } | 507 | } |
514 | 508 | ||
515 | err = register_cpu_notifier(&exynos4_mct_cpu_nb); | 509 | err = register_cpu_notifier(&exynos4_mct_cpu_nb); |
516 | if (err) | 510 | if (err) |
517 | goto out_irq; | 511 | goto out_irq; |
518 | 512 | ||
519 | /* Immediately configure the timer on the boot CPU */ | 513 | /* Immediately configure the timer on the boot CPU */ |
520 | exynos4_local_timer_setup(&mevt->evt); | 514 | exynos4_local_timer_setup(&mevt->evt); |
521 | return; | 515 | return; |
522 | 516 | ||
523 | out_irq: | 517 | out_irq: |
524 | free_percpu_irq(mct_irqs[MCT_L0_IRQ], &percpu_mct_tick); | 518 | free_percpu_irq(mct_irqs[MCT_L0_IRQ], &percpu_mct_tick); |
525 | } | 519 | } |
526 | 520 | ||
527 | void __init mct_init(void __iomem *base, int irq_g0, int irq_l0, int irq_l1) | 521 | void __init mct_init(void __iomem *base, int irq_g0, int irq_l0, int irq_l1) |
528 | { | 522 | { |
529 | mct_irqs[MCT_G0_IRQ] = irq_g0; | 523 | mct_irqs[MCT_G0_IRQ] = irq_g0; |
530 | mct_irqs[MCT_L0_IRQ] = irq_l0; | 524 | mct_irqs[MCT_L0_IRQ] = irq_l0; |
531 | mct_irqs[MCT_L1_IRQ] = irq_l1; | 525 | mct_irqs[MCT_L1_IRQ] = irq_l1; |
532 | mct_int_type = MCT_INT_SPI; | 526 | mct_int_type = MCT_INT_SPI; |
533 | 527 | ||
534 | exynos4_timer_resources(NULL, base); | 528 | exynos4_timer_resources(NULL, base); |
535 | exynos4_clocksource_init(); | 529 | exynos4_clocksource_init(); |
536 | exynos4_clockevent_init(); | 530 | exynos4_clockevent_init(); |
537 | } | 531 | } |
538 | 532 | ||
539 | static void __init mct_init_dt(struct device_node *np, unsigned int int_type) | 533 | static void __init mct_init_dt(struct device_node *np, unsigned int int_type) |
540 | { | 534 | { |
541 | u32 nr_irqs, i; | 535 | u32 nr_irqs, i; |
542 | 536 | ||
543 | mct_int_type = int_type; | 537 | mct_int_type = int_type; |
544 | 538 | ||
545 | /* This driver uses only one global timer interrupt */ | 539 | /* This driver uses only one global timer interrupt */ |
546 | mct_irqs[MCT_G0_IRQ] = irq_of_parse_and_map(np, MCT_G0_IRQ); | 540 | mct_irqs[MCT_G0_IRQ] = irq_of_parse_and_map(np, MCT_G0_IRQ); |
547 | 541 | ||
548 | /* | 542 | /* |
549 | * Find out the number of local irqs specified. The local | 543 | * Find out the number of local irqs specified. The local |
550 | * timer irqs are specified after the four global timer | 544 | * timer irqs are specified after the four global timer |
551 | * irqs are specified. | 545 | * irqs are specified. |
552 | */ | 546 | */ |
553 | #ifdef CONFIG_OF | 547 | #ifdef CONFIG_OF |
554 | nr_irqs = of_irq_count(np); | 548 | nr_irqs = of_irq_count(np); |
555 | #else | 549 | #else |
556 | nr_irqs = 0; | 550 | nr_irqs = 0; |
557 | #endif | 551 | #endif |
558 | for (i = MCT_L0_IRQ; i < nr_irqs; i++) | 552 | for (i = MCT_L0_IRQ; i < nr_irqs; i++) |
559 | mct_irqs[i] = irq_of_parse_and_map(np, i); | 553 | mct_irqs[i] = irq_of_parse_and_map(np, i); |
560 | 554 | ||
561 | exynos4_timer_resources(np, of_iomap(np, 0)); | 555 | exynos4_timer_resources(np, of_iomap(np, 0)); |
562 | exynos4_clocksource_init(); | 556 | exynos4_clocksource_init(); |
563 | exynos4_clockevent_init(); | 557 | exynos4_clockevent_init(); |
564 | } | 558 | } |
565 | 559 | ||
566 | 560 | ||
567 | static void __init mct_init_spi(struct device_node *np) | 561 | static void __init mct_init_spi(struct device_node *np) |
568 | { | 562 | { |
569 | return mct_init_dt(np, MCT_INT_SPI); | 563 | return mct_init_dt(np, MCT_INT_SPI); |
570 | } | 564 | } |
571 | 565 | ||
572 | static void __init mct_init_ppi(struct device_node *np) | 566 | static void __init mct_init_ppi(struct device_node *np) |
573 | { | 567 | { |
574 | return mct_init_dt(np, MCT_INT_PPI); | 568 | return mct_init_dt(np, MCT_INT_PPI); |
575 | } | 569 | } |
drivers/irqchip/irq-gic.c
1 | /* | 1 | /* |
2 | * linux/arch/arm/common/gic.c | 2 | * linux/arch/arm/common/gic.c |
3 | * | 3 | * |
4 | * Copyright (C) 2002 ARM Limited, All Rights Reserved. | 4 | * Copyright (C) 2002 ARM Limited, All Rights Reserved. |
5 | * | 5 | * |
6 | * This program is free software; you can redistribute it and/or modify | 6 | * This program is free software; you can redistribute it and/or modify |
7 | * it under the terms of the GNU General Public License version 2 as | 7 | * it under the terms of the GNU General Public License version 2 as |
8 | * published by the Free Software Foundation. | 8 | * published by the Free Software Foundation. |
9 | * | 9 | * |
10 | * Interrupt architecture for the GIC: | 10 | * Interrupt architecture for the GIC: |
11 | * | 11 | * |
12 | * o There is one Interrupt Distributor, which receives interrupts | 12 | * o There is one Interrupt Distributor, which receives interrupts |
13 | * from system devices and sends them to the Interrupt Controllers. | 13 | * from system devices and sends them to the Interrupt Controllers. |
14 | * | 14 | * |
15 | * o There is one CPU Interface per CPU, which sends interrupts sent | 15 | * o There is one CPU Interface per CPU, which sends interrupts sent |
16 | * by the Distributor, and interrupts generated locally, to the | 16 | * by the Distributor, and interrupts generated locally, to the |
17 | * associated CPU. The base address of the CPU interface is usually | 17 | * associated CPU. The base address of the CPU interface is usually |
18 | * aliased so that the same address points to different chips depending | 18 | * aliased so that the same address points to different chips depending |
19 | * on the CPU it is accessed from. | 19 | * on the CPU it is accessed from. |
20 | * | 20 | * |
21 | * Note that IRQs 0-31 are special - they are local to each CPU. | 21 | * Note that IRQs 0-31 are special - they are local to each CPU. |
22 | * As such, the enable set/clear, pending set/clear and active bit | 22 | * As such, the enable set/clear, pending set/clear and active bit |
23 | * registers are banked per-cpu for these sources. | 23 | * registers are banked per-cpu for these sources. |
24 | */ | 24 | */ |
25 | #include <linux/init.h> | 25 | #include <linux/init.h> |
26 | #include <linux/kernel.h> | 26 | #include <linux/kernel.h> |
27 | #include <linux/err.h> | 27 | #include <linux/err.h> |
28 | #include <linux/module.h> | 28 | #include <linux/module.h> |
29 | #include <linux/list.h> | 29 | #include <linux/list.h> |
30 | #include <linux/smp.h> | 30 | #include <linux/smp.h> |
31 | #include <linux/cpu.h> | 31 | #include <linux/cpu.h> |
32 | #include <linux/cpu_pm.h> | 32 | #include <linux/cpu_pm.h> |
33 | #include <linux/cpumask.h> | 33 | #include <linux/cpumask.h> |
34 | #include <linux/io.h> | 34 | #include <linux/io.h> |
35 | #include <linux/of.h> | 35 | #include <linux/of.h> |
36 | #include <linux/of_address.h> | 36 | #include <linux/of_address.h> |
37 | #include <linux/of_irq.h> | 37 | #include <linux/of_irq.h> |
38 | #include <linux/irqdomain.h> | 38 | #include <linux/irqdomain.h> |
39 | #include <linux/interrupt.h> | 39 | #include <linux/interrupt.h> |
40 | #include <linux/percpu.h> | 40 | #include <linux/percpu.h> |
41 | #include <linux/slab.h> | 41 | #include <linux/slab.h> |
42 | #include <linux/irqchip/chained_irq.h> | 42 | #include <linux/irqchip/chained_irq.h> |
43 | #include <linux/irqchip/arm-gic.h> | 43 | #include <linux/irqchip/arm-gic.h> |
44 | 44 | ||
45 | #include <asm/irq.h> | 45 | #include <asm/irq.h> |
46 | #include <asm/exception.h> | 46 | #include <asm/exception.h> |
47 | #include <asm/smp_plat.h> | 47 | #include <asm/smp_plat.h> |
48 | 48 | ||
49 | #include "irqchip.h" | 49 | #include "irqchip.h" |
50 | 50 | ||
51 | union gic_base { | 51 | union gic_base { |
52 | void __iomem *common_base; | 52 | void __iomem *common_base; |
53 | void __percpu * __iomem *percpu_base; | 53 | void __percpu * __iomem *percpu_base; |
54 | }; | 54 | }; |
55 | 55 | ||
56 | struct gic_chip_data { | 56 | struct gic_chip_data { |
57 | union gic_base dist_base; | 57 | union gic_base dist_base; |
58 | union gic_base cpu_base; | 58 | union gic_base cpu_base; |
59 | #ifdef CONFIG_CPU_PM | 59 | #ifdef CONFIG_CPU_PM |
60 | u32 saved_spi_enable[DIV_ROUND_UP(1020, 32)]; | 60 | u32 saved_spi_enable[DIV_ROUND_UP(1020, 32)]; |
61 | u32 saved_spi_conf[DIV_ROUND_UP(1020, 16)]; | 61 | u32 saved_spi_conf[DIV_ROUND_UP(1020, 16)]; |
62 | u32 saved_spi_target[DIV_ROUND_UP(1020, 4)]; | 62 | u32 saved_spi_target[DIV_ROUND_UP(1020, 4)]; |
63 | u32 __percpu *saved_ppi_enable; | 63 | u32 __percpu *saved_ppi_enable; |
64 | u32 __percpu *saved_ppi_conf; | 64 | u32 __percpu *saved_ppi_conf; |
65 | #endif | 65 | #endif |
66 | struct irq_domain *domain; | 66 | struct irq_domain *domain; |
67 | unsigned int gic_irqs; | 67 | unsigned int gic_irqs; |
68 | #ifdef CONFIG_GIC_NON_BANKED | 68 | #ifdef CONFIG_GIC_NON_BANKED |
69 | void __iomem *(*get_base)(union gic_base *); | 69 | void __iomem *(*get_base)(union gic_base *); |
70 | #endif | 70 | #endif |
71 | }; | 71 | }; |
72 | 72 | ||
73 | static DEFINE_RAW_SPINLOCK(irq_controller_lock); | 73 | static DEFINE_RAW_SPINLOCK(irq_controller_lock); |
74 | 74 | ||
75 | /* | 75 | /* |
76 | * The GIC mapping of CPU interfaces does not necessarily match | 76 | * The GIC mapping of CPU interfaces does not necessarily match |
77 | * the logical CPU numbering. Let's use a mapping as returned | 77 | * the logical CPU numbering. Let's use a mapping as returned |
78 | * by the GIC itself. | 78 | * by the GIC itself. |
79 | */ | 79 | */ |
80 | #define NR_GIC_CPU_IF 8 | 80 | #define NR_GIC_CPU_IF 8 |
81 | static u8 gic_cpu_map[NR_GIC_CPU_IF] __read_mostly; | 81 | static u8 gic_cpu_map[NR_GIC_CPU_IF] __read_mostly; |
82 | 82 | ||
83 | /* | 83 | /* |
84 | * Supported arch specific GIC irq extension. | 84 | * Supported arch specific GIC irq extension. |
85 | * Default make them NULL. | 85 | * Default make them NULL. |
86 | */ | 86 | */ |
87 | struct irq_chip gic_arch_extn = { | 87 | struct irq_chip gic_arch_extn = { |
88 | .irq_eoi = NULL, | 88 | .irq_eoi = NULL, |
89 | .irq_mask = NULL, | 89 | .irq_mask = NULL, |
90 | .irq_unmask = NULL, | 90 | .irq_unmask = NULL, |
91 | .irq_retrigger = NULL, | 91 | .irq_retrigger = NULL, |
92 | .irq_set_type = NULL, | 92 | .irq_set_type = NULL, |
93 | .irq_set_wake = NULL, | 93 | .irq_set_wake = NULL, |
94 | }; | 94 | }; |
95 | 95 | ||
96 | #ifndef MAX_GIC_NR | 96 | #ifndef MAX_GIC_NR |
97 | #define MAX_GIC_NR 1 | 97 | #define MAX_GIC_NR 1 |
98 | #endif | 98 | #endif |
99 | 99 | ||
100 | static struct gic_chip_data gic_data[MAX_GIC_NR] __read_mostly; | 100 | static struct gic_chip_data gic_data[MAX_GIC_NR] __read_mostly; |
101 | 101 | ||
102 | #ifdef CONFIG_GIC_NON_BANKED | 102 | #ifdef CONFIG_GIC_NON_BANKED |
103 | static void __iomem *gic_get_percpu_base(union gic_base *base) | 103 | static void __iomem *gic_get_percpu_base(union gic_base *base) |
104 | { | 104 | { |
105 | return *__this_cpu_ptr(base->percpu_base); | 105 | return *__this_cpu_ptr(base->percpu_base); |
106 | } | 106 | } |
107 | 107 | ||
108 | static void __iomem *gic_get_common_base(union gic_base *base) | 108 | static void __iomem *gic_get_common_base(union gic_base *base) |
109 | { | 109 | { |
110 | return base->common_base; | 110 | return base->common_base; |
111 | } | 111 | } |
112 | 112 | ||
113 | static inline void __iomem *gic_data_dist_base(struct gic_chip_data *data) | 113 | static inline void __iomem *gic_data_dist_base(struct gic_chip_data *data) |
114 | { | 114 | { |
115 | return data->get_base(&data->dist_base); | 115 | return data->get_base(&data->dist_base); |
116 | } | 116 | } |
117 | 117 | ||
118 | static inline void __iomem *gic_data_cpu_base(struct gic_chip_data *data) | 118 | static inline void __iomem *gic_data_cpu_base(struct gic_chip_data *data) |
119 | { | 119 | { |
120 | return data->get_base(&data->cpu_base); | 120 | return data->get_base(&data->cpu_base); |
121 | } | 121 | } |
122 | 122 | ||
123 | static inline void gic_set_base_accessor(struct gic_chip_data *data, | 123 | static inline void gic_set_base_accessor(struct gic_chip_data *data, |
124 | void __iomem *(*f)(union gic_base *)) | 124 | void __iomem *(*f)(union gic_base *)) |
125 | { | 125 | { |
126 | data->get_base = f; | 126 | data->get_base = f; |
127 | } | 127 | } |
128 | #else | 128 | #else |
129 | #define gic_data_dist_base(d) ((d)->dist_base.common_base) | 129 | #define gic_data_dist_base(d) ((d)->dist_base.common_base) |
130 | #define gic_data_cpu_base(d) ((d)->cpu_base.common_base) | 130 | #define gic_data_cpu_base(d) ((d)->cpu_base.common_base) |
131 | #define gic_set_base_accessor(d, f) | 131 | #define gic_set_base_accessor(d, f) |
132 | #endif | 132 | #endif |
133 | 133 | ||
134 | static inline void __iomem *gic_dist_base(struct irq_data *d) | 134 | static inline void __iomem *gic_dist_base(struct irq_data *d) |
135 | { | 135 | { |
136 | struct gic_chip_data *gic_data = irq_data_get_irq_chip_data(d); | 136 | struct gic_chip_data *gic_data = irq_data_get_irq_chip_data(d); |
137 | return gic_data_dist_base(gic_data); | 137 | return gic_data_dist_base(gic_data); |
138 | } | 138 | } |
139 | 139 | ||
140 | static inline void __iomem *gic_cpu_base(struct irq_data *d) | 140 | static inline void __iomem *gic_cpu_base(struct irq_data *d) |
141 | { | 141 | { |
142 | struct gic_chip_data *gic_data = irq_data_get_irq_chip_data(d); | 142 | struct gic_chip_data *gic_data = irq_data_get_irq_chip_data(d); |
143 | return gic_data_cpu_base(gic_data); | 143 | return gic_data_cpu_base(gic_data); |
144 | } | 144 | } |
145 | 145 | ||
146 | static inline unsigned int gic_irq(struct irq_data *d) | 146 | static inline unsigned int gic_irq(struct irq_data *d) |
147 | { | 147 | { |
148 | return d->hwirq; | 148 | return d->hwirq; |
149 | } | 149 | } |
150 | 150 | ||
151 | /* | 151 | /* |
152 | * Routines to acknowledge, disable and enable interrupts | 152 | * Routines to acknowledge, disable and enable interrupts |
153 | */ | 153 | */ |
154 | static void gic_mask_irq(struct irq_data *d) | 154 | static void gic_mask_irq(struct irq_data *d) |
155 | { | 155 | { |
156 | u32 mask = 1 << (gic_irq(d) % 32); | 156 | u32 mask = 1 << (gic_irq(d) % 32); |
157 | 157 | ||
158 | raw_spin_lock(&irq_controller_lock); | 158 | raw_spin_lock(&irq_controller_lock); |
159 | writel_relaxed(mask, gic_dist_base(d) + GIC_DIST_ENABLE_CLEAR + (gic_irq(d) / 32) * 4); | 159 | writel_relaxed(mask, gic_dist_base(d) + GIC_DIST_ENABLE_CLEAR + (gic_irq(d) / 32) * 4); |
160 | if (gic_arch_extn.irq_mask) | 160 | if (gic_arch_extn.irq_mask) |
161 | gic_arch_extn.irq_mask(d); | 161 | gic_arch_extn.irq_mask(d); |
162 | raw_spin_unlock(&irq_controller_lock); | 162 | raw_spin_unlock(&irq_controller_lock); |
163 | } | 163 | } |
164 | 164 | ||
165 | static void gic_unmask_irq(struct irq_data *d) | 165 | static void gic_unmask_irq(struct irq_data *d) |
166 | { | 166 | { |
167 | u32 mask = 1 << (gic_irq(d) % 32); | 167 | u32 mask = 1 << (gic_irq(d) % 32); |
168 | 168 | ||
169 | raw_spin_lock(&irq_controller_lock); | 169 | raw_spin_lock(&irq_controller_lock); |
170 | if (gic_arch_extn.irq_unmask) | 170 | if (gic_arch_extn.irq_unmask) |
171 | gic_arch_extn.irq_unmask(d); | 171 | gic_arch_extn.irq_unmask(d); |
172 | writel_relaxed(mask, gic_dist_base(d) + GIC_DIST_ENABLE_SET + (gic_irq(d) / 32) * 4); | 172 | writel_relaxed(mask, gic_dist_base(d) + GIC_DIST_ENABLE_SET + (gic_irq(d) / 32) * 4); |
173 | raw_spin_unlock(&irq_controller_lock); | 173 | raw_spin_unlock(&irq_controller_lock); |
174 | } | 174 | } |
175 | 175 | ||
176 | static void gic_eoi_irq(struct irq_data *d) | 176 | static void gic_eoi_irq(struct irq_data *d) |
177 | { | 177 | { |
178 | if (gic_arch_extn.irq_eoi) { | 178 | if (gic_arch_extn.irq_eoi) { |
179 | raw_spin_lock(&irq_controller_lock); | 179 | raw_spin_lock(&irq_controller_lock); |
180 | gic_arch_extn.irq_eoi(d); | 180 | gic_arch_extn.irq_eoi(d); |
181 | raw_spin_unlock(&irq_controller_lock); | 181 | raw_spin_unlock(&irq_controller_lock); |
182 | } | 182 | } |
183 | 183 | ||
184 | writel_relaxed(gic_irq(d), gic_cpu_base(d) + GIC_CPU_EOI); | 184 | writel_relaxed(gic_irq(d), gic_cpu_base(d) + GIC_CPU_EOI); |
185 | } | 185 | } |
186 | 186 | ||
187 | static int gic_set_type(struct irq_data *d, unsigned int type) | 187 | static int gic_set_type(struct irq_data *d, unsigned int type) |
188 | { | 188 | { |
189 | void __iomem *base = gic_dist_base(d); | 189 | void __iomem *base = gic_dist_base(d); |
190 | unsigned int gicirq = gic_irq(d); | 190 | unsigned int gicirq = gic_irq(d); |
191 | u32 enablemask = 1 << (gicirq % 32); | 191 | u32 enablemask = 1 << (gicirq % 32); |
192 | u32 enableoff = (gicirq / 32) * 4; | 192 | u32 enableoff = (gicirq / 32) * 4; |
193 | u32 confmask = 0x2 << ((gicirq % 16) * 2); | 193 | u32 confmask = 0x2 << ((gicirq % 16) * 2); |
194 | u32 confoff = (gicirq / 16) * 4; | 194 | u32 confoff = (gicirq / 16) * 4; |
195 | bool enabled = false; | 195 | bool enabled = false; |
196 | u32 val; | 196 | u32 val; |
197 | 197 | ||
198 | /* Interrupt configuration for SGIs can't be changed */ | 198 | /* Interrupt configuration for SGIs can't be changed */ |
199 | if (gicirq < 16) | 199 | if (gicirq < 16) |
200 | return -EINVAL; | 200 | return -EINVAL; |
201 | 201 | ||
202 | if (type != IRQ_TYPE_LEVEL_HIGH && type != IRQ_TYPE_EDGE_RISING) | 202 | if (type != IRQ_TYPE_LEVEL_HIGH && type != IRQ_TYPE_EDGE_RISING) |
203 | return -EINVAL; | 203 | return -EINVAL; |
204 | 204 | ||
205 | raw_spin_lock(&irq_controller_lock); | 205 | raw_spin_lock(&irq_controller_lock); |
206 | 206 | ||
207 | if (gic_arch_extn.irq_set_type) | 207 | if (gic_arch_extn.irq_set_type) |
208 | gic_arch_extn.irq_set_type(d, type); | 208 | gic_arch_extn.irq_set_type(d, type); |
209 | 209 | ||
210 | val = readl_relaxed(base + GIC_DIST_CONFIG + confoff); | 210 | val = readl_relaxed(base + GIC_DIST_CONFIG + confoff); |
211 | if (type == IRQ_TYPE_LEVEL_HIGH) | 211 | if (type == IRQ_TYPE_LEVEL_HIGH) |
212 | val &= ~confmask; | 212 | val &= ~confmask; |
213 | else if (type == IRQ_TYPE_EDGE_RISING) | 213 | else if (type == IRQ_TYPE_EDGE_RISING) |
214 | val |= confmask; | 214 | val |= confmask; |
215 | 215 | ||
216 | /* | 216 | /* |
217 | * As recommended by the spec, disable the interrupt before changing | 217 | * As recommended by the spec, disable the interrupt before changing |
218 | * the configuration | 218 | * the configuration |
219 | */ | 219 | */ |
220 | if (readl_relaxed(base + GIC_DIST_ENABLE_SET + enableoff) & enablemask) { | 220 | if (readl_relaxed(base + GIC_DIST_ENABLE_SET + enableoff) & enablemask) { |
221 | writel_relaxed(enablemask, base + GIC_DIST_ENABLE_CLEAR + enableoff); | 221 | writel_relaxed(enablemask, base + GIC_DIST_ENABLE_CLEAR + enableoff); |
222 | enabled = true; | 222 | enabled = true; |
223 | } | 223 | } |
224 | 224 | ||
225 | writel_relaxed(val, base + GIC_DIST_CONFIG + confoff); | 225 | writel_relaxed(val, base + GIC_DIST_CONFIG + confoff); |
226 | 226 | ||
227 | if (enabled) | 227 | if (enabled) |
228 | writel_relaxed(enablemask, base + GIC_DIST_ENABLE_SET + enableoff); | 228 | writel_relaxed(enablemask, base + GIC_DIST_ENABLE_SET + enableoff); |
229 | 229 | ||
230 | raw_spin_unlock(&irq_controller_lock); | 230 | raw_spin_unlock(&irq_controller_lock); |
231 | 231 | ||
232 | return 0; | 232 | return 0; |
233 | } | 233 | } |
234 | 234 | ||
235 | static int gic_retrigger(struct irq_data *d) | 235 | static int gic_retrigger(struct irq_data *d) |
236 | { | 236 | { |
237 | if (gic_arch_extn.irq_retrigger) | 237 | if (gic_arch_extn.irq_retrigger) |
238 | return gic_arch_extn.irq_retrigger(d); | 238 | return gic_arch_extn.irq_retrigger(d); |
239 | 239 | ||
240 | /* the genirq layer expects 0 if we can't retrigger in hardware */ | 240 | /* the genirq layer expects 0 if we can't retrigger in hardware */ |
241 | return 0; | 241 | return 0; |
242 | } | 242 | } |
243 | 243 | ||
244 | #ifdef CONFIG_SMP | 244 | #ifdef CONFIG_SMP |
245 | static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val, | 245 | static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val, |
246 | bool force) | 246 | bool force) |
247 | { | 247 | { |
248 | void __iomem *reg = gic_dist_base(d) + GIC_DIST_TARGET + (gic_irq(d) & ~3); | 248 | void __iomem *reg = gic_dist_base(d) + GIC_DIST_TARGET + (gic_irq(d) & ~3); |
249 | unsigned int shift = (gic_irq(d) % 4) * 8; | 249 | unsigned int cpu, shift = (gic_irq(d) % 4) * 8; |
250 | unsigned int cpu = cpumask_any_and(mask_val, cpu_online_mask); | ||
251 | u32 val, mask, bit; | 250 | u32 val, mask, bit; |
251 | |||
252 | if (!force) | ||
253 | cpu = cpumask_any_and(mask_val, cpu_online_mask); | ||
254 | else | ||
255 | cpu = cpumask_first(mask_val); | ||
252 | 256 | ||
253 | if (cpu >= NR_GIC_CPU_IF || cpu >= nr_cpu_ids) | 257 | if (cpu >= NR_GIC_CPU_IF || cpu >= nr_cpu_ids) |
254 | return -EINVAL; | 258 | return -EINVAL; |
255 | 259 | ||
256 | raw_spin_lock(&irq_controller_lock); | 260 | raw_spin_lock(&irq_controller_lock); |
257 | mask = 0xff << shift; | 261 | mask = 0xff << shift; |
258 | bit = gic_cpu_map[cpu] << shift; | 262 | bit = gic_cpu_map[cpu] << shift; |
259 | val = readl_relaxed(reg) & ~mask; | 263 | val = readl_relaxed(reg) & ~mask; |
260 | writel_relaxed(val | bit, reg); | 264 | writel_relaxed(val | bit, reg); |
261 | raw_spin_unlock(&irq_controller_lock); | 265 | raw_spin_unlock(&irq_controller_lock); |
262 | 266 | ||
263 | return IRQ_SET_MASK_OK; | 267 | return IRQ_SET_MASK_OK; |
264 | } | 268 | } |
265 | #endif | 269 | #endif |
266 | 270 | ||
267 | #ifdef CONFIG_PM | 271 | #ifdef CONFIG_PM |
268 | static int gic_set_wake(struct irq_data *d, unsigned int on) | 272 | static int gic_set_wake(struct irq_data *d, unsigned int on) |
269 | { | 273 | { |
270 | int ret = -ENXIO; | 274 | int ret = -ENXIO; |
271 | 275 | ||
272 | if (gic_arch_extn.irq_set_wake) | 276 | if (gic_arch_extn.irq_set_wake) |
273 | ret = gic_arch_extn.irq_set_wake(d, on); | 277 | ret = gic_arch_extn.irq_set_wake(d, on); |
274 | 278 | ||
275 | return ret; | 279 | return ret; |
276 | } | 280 | } |
277 | 281 | ||
278 | #else | 282 | #else |
279 | #define gic_set_wake NULL | 283 | #define gic_set_wake NULL |
280 | #endif | 284 | #endif |
281 | 285 | ||
282 | static void __exception_irq_entry gic_handle_irq(struct pt_regs *regs) | 286 | static void __exception_irq_entry gic_handle_irq(struct pt_regs *regs) |
283 | { | 287 | { |
284 | u32 irqstat, irqnr; | 288 | u32 irqstat, irqnr; |
285 | struct gic_chip_data *gic = &gic_data[0]; | 289 | struct gic_chip_data *gic = &gic_data[0]; |
286 | void __iomem *cpu_base = gic_data_cpu_base(gic); | 290 | void __iomem *cpu_base = gic_data_cpu_base(gic); |
287 | 291 | ||
288 | do { | 292 | do { |
289 | irqstat = readl_relaxed(cpu_base + GIC_CPU_INTACK); | 293 | irqstat = readl_relaxed(cpu_base + GIC_CPU_INTACK); |
290 | irqnr = irqstat & ~0x1c00; | 294 | irqnr = irqstat & ~0x1c00; |
291 | 295 | ||
292 | if (likely(irqnr > 15 && irqnr < 1021)) { | 296 | if (likely(irqnr > 15 && irqnr < 1021)) { |
293 | irqnr = irq_find_mapping(gic->domain, irqnr); | 297 | irqnr = irq_find_mapping(gic->domain, irqnr); |
294 | handle_IRQ(irqnr, regs); | 298 | handle_IRQ(irqnr, regs); |
295 | continue; | 299 | continue; |
296 | } | 300 | } |
297 | if (irqnr < 16) { | 301 | if (irqnr < 16) { |
298 | writel_relaxed(irqstat, cpu_base + GIC_CPU_EOI); | 302 | writel_relaxed(irqstat, cpu_base + GIC_CPU_EOI); |
299 | #ifdef CONFIG_SMP | 303 | #ifdef CONFIG_SMP |
300 | handle_IPI(irqnr, regs); | 304 | handle_IPI(irqnr, regs); |
301 | #endif | 305 | #endif |
302 | continue; | 306 | continue; |
303 | } | 307 | } |
304 | break; | 308 | break; |
305 | } while (1); | 309 | } while (1); |
306 | } | 310 | } |
307 | 311 | ||
308 | static void gic_handle_cascade_irq(unsigned int irq, struct irq_desc *desc) | 312 | static void gic_handle_cascade_irq(unsigned int irq, struct irq_desc *desc) |
309 | { | 313 | { |
310 | struct gic_chip_data *chip_data = irq_get_handler_data(irq); | 314 | struct gic_chip_data *chip_data = irq_get_handler_data(irq); |
311 | struct irq_chip *chip = irq_get_chip(irq); | 315 | struct irq_chip *chip = irq_get_chip(irq); |
312 | unsigned int cascade_irq, gic_irq; | 316 | unsigned int cascade_irq, gic_irq; |
313 | unsigned long status; | 317 | unsigned long status; |
314 | 318 | ||
315 | chained_irq_enter(chip, desc); | 319 | chained_irq_enter(chip, desc); |
316 | 320 | ||
317 | raw_spin_lock(&irq_controller_lock); | 321 | raw_spin_lock(&irq_controller_lock); |
318 | status = readl_relaxed(gic_data_cpu_base(chip_data) + GIC_CPU_INTACK); | 322 | status = readl_relaxed(gic_data_cpu_base(chip_data) + GIC_CPU_INTACK); |
319 | raw_spin_unlock(&irq_controller_lock); | 323 | raw_spin_unlock(&irq_controller_lock); |
320 | 324 | ||
321 | gic_irq = (status & 0x3ff); | 325 | gic_irq = (status & 0x3ff); |
322 | if (gic_irq == 1023) | 326 | if (gic_irq == 1023) |
323 | goto out; | 327 | goto out; |
324 | 328 | ||
325 | cascade_irq = irq_find_mapping(chip_data->domain, gic_irq); | 329 | cascade_irq = irq_find_mapping(chip_data->domain, gic_irq); |
326 | if (unlikely(gic_irq < 32 || gic_irq > 1020)) | 330 | if (unlikely(gic_irq < 32 || gic_irq > 1020)) |
327 | handle_bad_irq(cascade_irq, desc); | 331 | handle_bad_irq(cascade_irq, desc); |
328 | else | 332 | else |
329 | generic_handle_irq(cascade_irq); | 333 | generic_handle_irq(cascade_irq); |
330 | 334 | ||
331 | out: | 335 | out: |
332 | chained_irq_exit(chip, desc); | 336 | chained_irq_exit(chip, desc); |
333 | } | 337 | } |
334 | 338 | ||
335 | static struct irq_chip gic_chip = { | 339 | static struct irq_chip gic_chip = { |
336 | .name = "GIC", | 340 | .name = "GIC", |
337 | .irq_mask = gic_mask_irq, | 341 | .irq_mask = gic_mask_irq, |
338 | .irq_unmask = gic_unmask_irq, | 342 | .irq_unmask = gic_unmask_irq, |
339 | .irq_eoi = gic_eoi_irq, | 343 | .irq_eoi = gic_eoi_irq, |
340 | .irq_set_type = gic_set_type, | 344 | .irq_set_type = gic_set_type, |
341 | .irq_retrigger = gic_retrigger, | 345 | .irq_retrigger = gic_retrigger, |
342 | #ifdef CONFIG_SMP | 346 | #ifdef CONFIG_SMP |
343 | .irq_set_affinity = gic_set_affinity, | 347 | .irq_set_affinity = gic_set_affinity, |
344 | #endif | 348 | #endif |
345 | .irq_set_wake = gic_set_wake, | 349 | .irq_set_wake = gic_set_wake, |
346 | }; | 350 | }; |
347 | 351 | ||
348 | void __init gic_cascade_irq(unsigned int gic_nr, unsigned int irq) | 352 | void __init gic_cascade_irq(unsigned int gic_nr, unsigned int irq) |
349 | { | 353 | { |
350 | if (gic_nr >= MAX_GIC_NR) | 354 | if (gic_nr >= MAX_GIC_NR) |
351 | BUG(); | 355 | BUG(); |
352 | if (irq_set_handler_data(irq, &gic_data[gic_nr]) != 0) | 356 | if (irq_set_handler_data(irq, &gic_data[gic_nr]) != 0) |
353 | BUG(); | 357 | BUG(); |
354 | irq_set_chained_handler(irq, gic_handle_cascade_irq); | 358 | irq_set_chained_handler(irq, gic_handle_cascade_irq); |
355 | } | 359 | } |
356 | 360 | ||
357 | static u8 gic_get_cpumask(struct gic_chip_data *gic) | 361 | static u8 gic_get_cpumask(struct gic_chip_data *gic) |
358 | { | 362 | { |
359 | void __iomem *base = gic_data_dist_base(gic); | 363 | void __iomem *base = gic_data_dist_base(gic); |
360 | u32 mask, i; | 364 | u32 mask, i; |
361 | 365 | ||
362 | for (i = mask = 0; i < 32; i += 4) { | 366 | for (i = mask = 0; i < 32; i += 4) { |
363 | mask = readl_relaxed(base + GIC_DIST_TARGET + i); | 367 | mask = readl_relaxed(base + GIC_DIST_TARGET + i); |
364 | mask |= mask >> 16; | 368 | mask |= mask >> 16; |
365 | mask |= mask >> 8; | 369 | mask |= mask >> 8; |
366 | if (mask) | 370 | if (mask) |
367 | break; | 371 | break; |
368 | } | 372 | } |
369 | 373 | ||
370 | if (!mask) | 374 | if (!mask) |
371 | pr_crit("GIC CPU mask not found - kernel will fail to boot.\n"); | 375 | pr_crit("GIC CPU mask not found - kernel will fail to boot.\n"); |
372 | 376 | ||
373 | return mask; | 377 | return mask; |
374 | } | 378 | } |
375 | 379 | ||
376 | static void __init gic_dist_init(struct gic_chip_data *gic) | 380 | static void __init gic_dist_init(struct gic_chip_data *gic) |
377 | { | 381 | { |
378 | unsigned int i; | 382 | unsigned int i; |
379 | u32 cpumask; | 383 | u32 cpumask; |
380 | unsigned int gic_irqs = gic->gic_irqs; | 384 | unsigned int gic_irqs = gic->gic_irqs; |
381 | void __iomem *base = gic_data_dist_base(gic); | 385 | void __iomem *base = gic_data_dist_base(gic); |
382 | 386 | ||
383 | writel_relaxed(0, base + GIC_DIST_CTRL); | 387 | writel_relaxed(0, base + GIC_DIST_CTRL); |
384 | 388 | ||
385 | /* | 389 | /* |
386 | * Set all global interrupts to be level triggered, active low. | 390 | * Set all global interrupts to be level triggered, active low. |
387 | */ | 391 | */ |
388 | for (i = 32; i < gic_irqs; i += 16) | 392 | for (i = 32; i < gic_irqs; i += 16) |
389 | writel_relaxed(0, base + GIC_DIST_CONFIG + i * 4 / 16); | 393 | writel_relaxed(0, base + GIC_DIST_CONFIG + i * 4 / 16); |
390 | 394 | ||
391 | /* | 395 | /* |
392 | * Set all global interrupts to this CPU only. | 396 | * Set all global interrupts to this CPU only. |
393 | */ | 397 | */ |
394 | cpumask = gic_get_cpumask(gic); | 398 | cpumask = gic_get_cpumask(gic); |
395 | cpumask |= cpumask << 8; | 399 | cpumask |= cpumask << 8; |
396 | cpumask |= cpumask << 16; | 400 | cpumask |= cpumask << 16; |
397 | for (i = 32; i < gic_irqs; i += 4) | 401 | for (i = 32; i < gic_irqs; i += 4) |
398 | writel_relaxed(cpumask, base + GIC_DIST_TARGET + i * 4 / 4); | 402 | writel_relaxed(cpumask, base + GIC_DIST_TARGET + i * 4 / 4); |
399 | 403 | ||
400 | /* | 404 | /* |
401 | * Set priority on all global interrupts. | 405 | * Set priority on all global interrupts. |
402 | */ | 406 | */ |
403 | for (i = 32; i < gic_irqs; i += 4) | 407 | for (i = 32; i < gic_irqs; i += 4) |
404 | writel_relaxed(0xa0a0a0a0, base + GIC_DIST_PRI + i * 4 / 4); | 408 | writel_relaxed(0xa0a0a0a0, base + GIC_DIST_PRI + i * 4 / 4); |
405 | 409 | ||
406 | /* | 410 | /* |
407 | * Disable all interrupts. Leave the PPI and SGIs alone | 411 | * Disable all interrupts. Leave the PPI and SGIs alone |
408 | * as these enables are banked registers. | 412 | * as these enables are banked registers. |
409 | */ | 413 | */ |
410 | for (i = 32; i < gic_irqs; i += 32) | 414 | for (i = 32; i < gic_irqs; i += 32) |
411 | writel_relaxed(0xffffffff, base + GIC_DIST_ENABLE_CLEAR + i * 4 / 32); | 415 | writel_relaxed(0xffffffff, base + GIC_DIST_ENABLE_CLEAR + i * 4 / 32); |
412 | 416 | ||
413 | writel_relaxed(1, base + GIC_DIST_CTRL); | 417 | writel_relaxed(1, base + GIC_DIST_CTRL); |
414 | } | 418 | } |
415 | 419 | ||
416 | static void gic_cpu_init(struct gic_chip_data *gic) | 420 | static void gic_cpu_init(struct gic_chip_data *gic) |
417 | { | 421 | { |
418 | void __iomem *dist_base = gic_data_dist_base(gic); | 422 | void __iomem *dist_base = gic_data_dist_base(gic); |
419 | void __iomem *base = gic_data_cpu_base(gic); | 423 | void __iomem *base = gic_data_cpu_base(gic); |
420 | unsigned int cpu_mask, cpu = smp_processor_id(); | 424 | unsigned int cpu_mask, cpu = smp_processor_id(); |
421 | int i; | 425 | int i; |
422 | 426 | ||
423 | /* | 427 | /* |
424 | * Get what the GIC says our CPU mask is. | 428 | * Get what the GIC says our CPU mask is. |
425 | */ | 429 | */ |
426 | BUG_ON(cpu >= NR_GIC_CPU_IF); | 430 | BUG_ON(cpu >= NR_GIC_CPU_IF); |
427 | cpu_mask = gic_get_cpumask(gic); | 431 | cpu_mask = gic_get_cpumask(gic); |
428 | gic_cpu_map[cpu] = cpu_mask; | 432 | gic_cpu_map[cpu] = cpu_mask; |
429 | 433 | ||
430 | /* | 434 | /* |
431 | * Clear our mask from the other map entries in case they're | 435 | * Clear our mask from the other map entries in case they're |
432 | * still undefined. | 436 | * still undefined. |
433 | */ | 437 | */ |
434 | for (i = 0; i < NR_GIC_CPU_IF; i++) | 438 | for (i = 0; i < NR_GIC_CPU_IF; i++) |
435 | if (i != cpu) | 439 | if (i != cpu) |
436 | gic_cpu_map[i] &= ~cpu_mask; | 440 | gic_cpu_map[i] &= ~cpu_mask; |
437 | 441 | ||
438 | /* | 442 | /* |
439 | * Deal with the banked PPI and SGI interrupts - disable all | 443 | * Deal with the banked PPI and SGI interrupts - disable all |
440 | * PPI interrupts, ensure all SGI interrupts are enabled. | 444 | * PPI interrupts, ensure all SGI interrupts are enabled. |
441 | */ | 445 | */ |
442 | writel_relaxed(0xffff0000, dist_base + GIC_DIST_ENABLE_CLEAR); | 446 | writel_relaxed(0xffff0000, dist_base + GIC_DIST_ENABLE_CLEAR); |
443 | writel_relaxed(0x0000ffff, dist_base + GIC_DIST_ENABLE_SET); | 447 | writel_relaxed(0x0000ffff, dist_base + GIC_DIST_ENABLE_SET); |
444 | 448 | ||
445 | /* | 449 | /* |
446 | * Set priority on PPI and SGI interrupts | 450 | * Set priority on PPI and SGI interrupts |
447 | */ | 451 | */ |
448 | for (i = 0; i < 32; i += 4) | 452 | for (i = 0; i < 32; i += 4) |
449 | writel_relaxed(0xa0a0a0a0, dist_base + GIC_DIST_PRI + i * 4 / 4); | 453 | writel_relaxed(0xa0a0a0a0, dist_base + GIC_DIST_PRI + i * 4 / 4); |
450 | 454 | ||
451 | writel_relaxed(0xf0, base + GIC_CPU_PRIMASK); | 455 | writel_relaxed(0xf0, base + GIC_CPU_PRIMASK); |
452 | writel_relaxed(1, base + GIC_CPU_CTRL); | 456 | writel_relaxed(1, base + GIC_CPU_CTRL); |
453 | } | 457 | } |
454 | 458 | ||
455 | void gic_cpu_if_down(void) | 459 | void gic_cpu_if_down(void) |
456 | { | 460 | { |
457 | void __iomem *cpu_base = gic_data_cpu_base(&gic_data[0]); | 461 | void __iomem *cpu_base = gic_data_cpu_base(&gic_data[0]); |
458 | writel_relaxed(0, cpu_base + GIC_CPU_CTRL); | 462 | writel_relaxed(0, cpu_base + GIC_CPU_CTRL); |
459 | } | 463 | } |
460 | 464 | ||
461 | #ifdef CONFIG_CPU_PM | 465 | #ifdef CONFIG_CPU_PM |
462 | /* | 466 | /* |
463 | * Saves the GIC distributor registers during suspend or idle. Must be called | 467 | * Saves the GIC distributor registers during suspend or idle. Must be called |
464 | * with interrupts disabled but before powering down the GIC. After calling | 468 | * with interrupts disabled but before powering down the GIC. After calling |
465 | * this function, no interrupts will be delivered by the GIC, and another | 469 | * this function, no interrupts will be delivered by the GIC, and another |
466 | * platform-specific wakeup source must be enabled. | 470 | * platform-specific wakeup source must be enabled. |
467 | */ | 471 | */ |
468 | static void gic_dist_save(unsigned int gic_nr) | 472 | static void gic_dist_save(unsigned int gic_nr) |
469 | { | 473 | { |
470 | unsigned int gic_irqs; | 474 | unsigned int gic_irqs; |
471 | void __iomem *dist_base; | 475 | void __iomem *dist_base; |
472 | int i; | 476 | int i; |
473 | 477 | ||
474 | if (gic_nr >= MAX_GIC_NR) | 478 | if (gic_nr >= MAX_GIC_NR) |
475 | BUG(); | 479 | BUG(); |
476 | 480 | ||
477 | gic_irqs = gic_data[gic_nr].gic_irqs; | 481 | gic_irqs = gic_data[gic_nr].gic_irqs; |
478 | dist_base = gic_data_dist_base(&gic_data[gic_nr]); | 482 | dist_base = gic_data_dist_base(&gic_data[gic_nr]); |
479 | 483 | ||
480 | if (!dist_base) | 484 | if (!dist_base) |
481 | return; | 485 | return; |
482 | 486 | ||
483 | for (i = 0; i < DIV_ROUND_UP(gic_irqs, 16); i++) | 487 | for (i = 0; i < DIV_ROUND_UP(gic_irqs, 16); i++) |
484 | gic_data[gic_nr].saved_spi_conf[i] = | 488 | gic_data[gic_nr].saved_spi_conf[i] = |
485 | readl_relaxed(dist_base + GIC_DIST_CONFIG + i * 4); | 489 | readl_relaxed(dist_base + GIC_DIST_CONFIG + i * 4); |
486 | 490 | ||
487 | for (i = 0; i < DIV_ROUND_UP(gic_irqs, 4); i++) | 491 | for (i = 0; i < DIV_ROUND_UP(gic_irqs, 4); i++) |
488 | gic_data[gic_nr].saved_spi_target[i] = | 492 | gic_data[gic_nr].saved_spi_target[i] = |
489 | readl_relaxed(dist_base + GIC_DIST_TARGET + i * 4); | 493 | readl_relaxed(dist_base + GIC_DIST_TARGET + i * 4); |
490 | 494 | ||
491 | for (i = 0; i < DIV_ROUND_UP(gic_irqs, 32); i++) | 495 | for (i = 0; i < DIV_ROUND_UP(gic_irqs, 32); i++) |
492 | gic_data[gic_nr].saved_spi_enable[i] = | 496 | gic_data[gic_nr].saved_spi_enable[i] = |
493 | readl_relaxed(dist_base + GIC_DIST_ENABLE_SET + i * 4); | 497 | readl_relaxed(dist_base + GIC_DIST_ENABLE_SET + i * 4); |
494 | } | 498 | } |
495 | 499 | ||
496 | /* | 500 | /* |
497 | * Restores the GIC distributor registers during resume or when coming out of | 501 | * Restores the GIC distributor registers during resume or when coming out of |
498 | * idle. Must be called before enabling interrupts. If a level interrupt | 502 | * idle. Must be called before enabling interrupts. If a level interrupt |
499 | * that occured while the GIC was suspended is still present, it will be | 503 | * that occured while the GIC was suspended is still present, it will be |
500 | * handled normally, but any edge interrupts that occured will not be seen by | 504 | * handled normally, but any edge interrupts that occured will not be seen by |
501 | * the GIC and need to be handled by the platform-specific wakeup source. | 505 | * the GIC and need to be handled by the platform-specific wakeup source. |
502 | */ | 506 | */ |
503 | static void gic_dist_restore(unsigned int gic_nr) | 507 | static void gic_dist_restore(unsigned int gic_nr) |
504 | { | 508 | { |
505 | unsigned int gic_irqs; | 509 | unsigned int gic_irqs; |
506 | unsigned int i; | 510 | unsigned int i; |
507 | void __iomem *dist_base; | 511 | void __iomem *dist_base; |
508 | 512 | ||
509 | if (gic_nr >= MAX_GIC_NR) | 513 | if (gic_nr >= MAX_GIC_NR) |
510 | BUG(); | 514 | BUG(); |
511 | 515 | ||
512 | gic_irqs = gic_data[gic_nr].gic_irqs; | 516 | gic_irqs = gic_data[gic_nr].gic_irqs; |
513 | dist_base = gic_data_dist_base(&gic_data[gic_nr]); | 517 | dist_base = gic_data_dist_base(&gic_data[gic_nr]); |
514 | 518 | ||
515 | if (!dist_base) | 519 | if (!dist_base) |
516 | return; | 520 | return; |
517 | 521 | ||
518 | writel_relaxed(0, dist_base + GIC_DIST_CTRL); | 522 | writel_relaxed(0, dist_base + GIC_DIST_CTRL); |
519 | 523 | ||
520 | for (i = 0; i < DIV_ROUND_UP(gic_irqs, 16); i++) | 524 | for (i = 0; i < DIV_ROUND_UP(gic_irqs, 16); i++) |
521 | writel_relaxed(gic_data[gic_nr].saved_spi_conf[i], | 525 | writel_relaxed(gic_data[gic_nr].saved_spi_conf[i], |
522 | dist_base + GIC_DIST_CONFIG + i * 4); | 526 | dist_base + GIC_DIST_CONFIG + i * 4); |
523 | 527 | ||
524 | for (i = 0; i < DIV_ROUND_UP(gic_irqs, 4); i++) | 528 | for (i = 0; i < DIV_ROUND_UP(gic_irqs, 4); i++) |
525 | writel_relaxed(0xa0a0a0a0, | 529 | writel_relaxed(0xa0a0a0a0, |
526 | dist_base + GIC_DIST_PRI + i * 4); | 530 | dist_base + GIC_DIST_PRI + i * 4); |
527 | 531 | ||
528 | for (i = 0; i < DIV_ROUND_UP(gic_irqs, 4); i++) | 532 | for (i = 0; i < DIV_ROUND_UP(gic_irqs, 4); i++) |
529 | writel_relaxed(gic_data[gic_nr].saved_spi_target[i], | 533 | writel_relaxed(gic_data[gic_nr].saved_spi_target[i], |
530 | dist_base + GIC_DIST_TARGET + i * 4); | 534 | dist_base + GIC_DIST_TARGET + i * 4); |
531 | 535 | ||
532 | for (i = 0; i < DIV_ROUND_UP(gic_irqs, 32); i++) | 536 | for (i = 0; i < DIV_ROUND_UP(gic_irqs, 32); i++) |
533 | writel_relaxed(gic_data[gic_nr].saved_spi_enable[i], | 537 | writel_relaxed(gic_data[gic_nr].saved_spi_enable[i], |
534 | dist_base + GIC_DIST_ENABLE_SET + i * 4); | 538 | dist_base + GIC_DIST_ENABLE_SET + i * 4); |
535 | 539 | ||
536 | writel_relaxed(1, dist_base + GIC_DIST_CTRL); | 540 | writel_relaxed(1, dist_base + GIC_DIST_CTRL); |
537 | } | 541 | } |
538 | 542 | ||
539 | static void gic_cpu_save(unsigned int gic_nr) | 543 | static void gic_cpu_save(unsigned int gic_nr) |
540 | { | 544 | { |
541 | int i; | 545 | int i; |
542 | u32 *ptr; | 546 | u32 *ptr; |
543 | void __iomem *dist_base; | 547 | void __iomem *dist_base; |
544 | void __iomem *cpu_base; | 548 | void __iomem *cpu_base; |
545 | 549 | ||
546 | if (gic_nr >= MAX_GIC_NR) | 550 | if (gic_nr >= MAX_GIC_NR) |
547 | BUG(); | 551 | BUG(); |
548 | 552 | ||
549 | dist_base = gic_data_dist_base(&gic_data[gic_nr]); | 553 | dist_base = gic_data_dist_base(&gic_data[gic_nr]); |
550 | cpu_base = gic_data_cpu_base(&gic_data[gic_nr]); | 554 | cpu_base = gic_data_cpu_base(&gic_data[gic_nr]); |
551 | 555 | ||
552 | if (!dist_base || !cpu_base) | 556 | if (!dist_base || !cpu_base) |
553 | return; | 557 | return; |
554 | 558 | ||
555 | ptr = __this_cpu_ptr(gic_data[gic_nr].saved_ppi_enable); | 559 | ptr = __this_cpu_ptr(gic_data[gic_nr].saved_ppi_enable); |
556 | for (i = 0; i < DIV_ROUND_UP(32, 32); i++) | 560 | for (i = 0; i < DIV_ROUND_UP(32, 32); i++) |
557 | ptr[i] = readl_relaxed(dist_base + GIC_DIST_ENABLE_SET + i * 4); | 561 | ptr[i] = readl_relaxed(dist_base + GIC_DIST_ENABLE_SET + i * 4); |
558 | 562 | ||
559 | ptr = __this_cpu_ptr(gic_data[gic_nr].saved_ppi_conf); | 563 | ptr = __this_cpu_ptr(gic_data[gic_nr].saved_ppi_conf); |
560 | for (i = 0; i < DIV_ROUND_UP(32, 16); i++) | 564 | for (i = 0; i < DIV_ROUND_UP(32, 16); i++) |
561 | ptr[i] = readl_relaxed(dist_base + GIC_DIST_CONFIG + i * 4); | 565 | ptr[i] = readl_relaxed(dist_base + GIC_DIST_CONFIG + i * 4); |
562 | 566 | ||
563 | } | 567 | } |
564 | 568 | ||
565 | static void gic_cpu_restore(unsigned int gic_nr) | 569 | static void gic_cpu_restore(unsigned int gic_nr) |
566 | { | 570 | { |
567 | int i; | 571 | int i; |
568 | u32 *ptr; | 572 | u32 *ptr; |
569 | void __iomem *dist_base; | 573 | void __iomem *dist_base; |
570 | void __iomem *cpu_base; | 574 | void __iomem *cpu_base; |
571 | 575 | ||
572 | if (gic_nr >= MAX_GIC_NR) | 576 | if (gic_nr >= MAX_GIC_NR) |
573 | BUG(); | 577 | BUG(); |
574 | 578 | ||
575 | dist_base = gic_data_dist_base(&gic_data[gic_nr]); | 579 | dist_base = gic_data_dist_base(&gic_data[gic_nr]); |
576 | cpu_base = gic_data_cpu_base(&gic_data[gic_nr]); | 580 | cpu_base = gic_data_cpu_base(&gic_data[gic_nr]); |
577 | 581 | ||
578 | if (!dist_base || !cpu_base) | 582 | if (!dist_base || !cpu_base) |
579 | return; | 583 | return; |
580 | 584 | ||
581 | ptr = __this_cpu_ptr(gic_data[gic_nr].saved_ppi_enable); | 585 | ptr = __this_cpu_ptr(gic_data[gic_nr].saved_ppi_enable); |
582 | for (i = 0; i < DIV_ROUND_UP(32, 32); i++) | 586 | for (i = 0; i < DIV_ROUND_UP(32, 32); i++) |
583 | writel_relaxed(ptr[i], dist_base + GIC_DIST_ENABLE_SET + i * 4); | 587 | writel_relaxed(ptr[i], dist_base + GIC_DIST_ENABLE_SET + i * 4); |
584 | 588 | ||
585 | ptr = __this_cpu_ptr(gic_data[gic_nr].saved_ppi_conf); | 589 | ptr = __this_cpu_ptr(gic_data[gic_nr].saved_ppi_conf); |
586 | for (i = 0; i < DIV_ROUND_UP(32, 16); i++) | 590 | for (i = 0; i < DIV_ROUND_UP(32, 16); i++) |
587 | writel_relaxed(ptr[i], dist_base + GIC_DIST_CONFIG + i * 4); | 591 | writel_relaxed(ptr[i], dist_base + GIC_DIST_CONFIG + i * 4); |
588 | 592 | ||
589 | for (i = 0; i < DIV_ROUND_UP(32, 4); i++) | 593 | for (i = 0; i < DIV_ROUND_UP(32, 4); i++) |
590 | writel_relaxed(0xa0a0a0a0, dist_base + GIC_DIST_PRI + i * 4); | 594 | writel_relaxed(0xa0a0a0a0, dist_base + GIC_DIST_PRI + i * 4); |
591 | 595 | ||
592 | writel_relaxed(0xf0, cpu_base + GIC_CPU_PRIMASK); | 596 | writel_relaxed(0xf0, cpu_base + GIC_CPU_PRIMASK); |
593 | writel_relaxed(1, cpu_base + GIC_CPU_CTRL); | 597 | writel_relaxed(1, cpu_base + GIC_CPU_CTRL); |
594 | } | 598 | } |
595 | 599 | ||
596 | static int gic_notifier(struct notifier_block *self, unsigned long cmd, void *v) | 600 | static int gic_notifier(struct notifier_block *self, unsigned long cmd, void *v) |
597 | { | 601 | { |
598 | int i; | 602 | int i; |
599 | 603 | ||
600 | for (i = 0; i < MAX_GIC_NR; i++) { | 604 | for (i = 0; i < MAX_GIC_NR; i++) { |
601 | #ifdef CONFIG_GIC_NON_BANKED | 605 | #ifdef CONFIG_GIC_NON_BANKED |
602 | /* Skip over unused GICs */ | 606 | /* Skip over unused GICs */ |
603 | if (!gic_data[i].get_base) | 607 | if (!gic_data[i].get_base) |
604 | continue; | 608 | continue; |
605 | #endif | 609 | #endif |
606 | switch (cmd) { | 610 | switch (cmd) { |
607 | case CPU_PM_ENTER: | 611 | case CPU_PM_ENTER: |
608 | gic_cpu_save(i); | 612 | gic_cpu_save(i); |
609 | break; | 613 | break; |
610 | case CPU_PM_ENTER_FAILED: | 614 | case CPU_PM_ENTER_FAILED: |
611 | case CPU_PM_EXIT: | 615 | case CPU_PM_EXIT: |
612 | gic_cpu_restore(i); | 616 | gic_cpu_restore(i); |
613 | break; | 617 | break; |
614 | case CPU_CLUSTER_PM_ENTER: | 618 | case CPU_CLUSTER_PM_ENTER: |
615 | gic_dist_save(i); | 619 | gic_dist_save(i); |
616 | break; | 620 | break; |
617 | case CPU_CLUSTER_PM_ENTER_FAILED: | 621 | case CPU_CLUSTER_PM_ENTER_FAILED: |
618 | case CPU_CLUSTER_PM_EXIT: | 622 | case CPU_CLUSTER_PM_EXIT: |
619 | gic_dist_restore(i); | 623 | gic_dist_restore(i); |
620 | break; | 624 | break; |
621 | } | 625 | } |
622 | } | 626 | } |
623 | 627 | ||
624 | return NOTIFY_OK; | 628 | return NOTIFY_OK; |
625 | } | 629 | } |
626 | 630 | ||
627 | static struct notifier_block gic_notifier_block = { | 631 | static struct notifier_block gic_notifier_block = { |
628 | .notifier_call = gic_notifier, | 632 | .notifier_call = gic_notifier, |
629 | }; | 633 | }; |
630 | 634 | ||
631 | static void __init gic_pm_init(struct gic_chip_data *gic) | 635 | static void __init gic_pm_init(struct gic_chip_data *gic) |
632 | { | 636 | { |
633 | gic->saved_ppi_enable = __alloc_percpu(DIV_ROUND_UP(32, 32) * 4, | 637 | gic->saved_ppi_enable = __alloc_percpu(DIV_ROUND_UP(32, 32) * 4, |
634 | sizeof(u32)); | 638 | sizeof(u32)); |
635 | BUG_ON(!gic->saved_ppi_enable); | 639 | BUG_ON(!gic->saved_ppi_enable); |
636 | 640 | ||
637 | gic->saved_ppi_conf = __alloc_percpu(DIV_ROUND_UP(32, 16) * 4, | 641 | gic->saved_ppi_conf = __alloc_percpu(DIV_ROUND_UP(32, 16) * 4, |
638 | sizeof(u32)); | 642 | sizeof(u32)); |
639 | BUG_ON(!gic->saved_ppi_conf); | 643 | BUG_ON(!gic->saved_ppi_conf); |
640 | 644 | ||
641 | if (gic == &gic_data[0]) | 645 | if (gic == &gic_data[0]) |
642 | cpu_pm_register_notifier(&gic_notifier_block); | 646 | cpu_pm_register_notifier(&gic_notifier_block); |
643 | } | 647 | } |
644 | #else | 648 | #else |
645 | static void __init gic_pm_init(struct gic_chip_data *gic) | 649 | static void __init gic_pm_init(struct gic_chip_data *gic) |
646 | { | 650 | { |
647 | } | 651 | } |
648 | #endif | 652 | #endif |
649 | 653 | ||
650 | #ifdef CONFIG_SMP | 654 | #ifdef CONFIG_SMP |
651 | static void gic_raise_softirq(const struct cpumask *mask, unsigned int irq) | 655 | static void gic_raise_softirq(const struct cpumask *mask, unsigned int irq) |
652 | { | 656 | { |
653 | int cpu; | 657 | int cpu; |
654 | unsigned long flags, map = 0; | 658 | unsigned long flags, map = 0; |
655 | 659 | ||
656 | raw_spin_lock_irqsave(&irq_controller_lock, flags); | 660 | raw_spin_lock_irqsave(&irq_controller_lock, flags); |
657 | 661 | ||
658 | /* Convert our logical CPU mask into a physical one. */ | 662 | /* Convert our logical CPU mask into a physical one. */ |
659 | for_each_cpu(cpu, mask) | 663 | for_each_cpu(cpu, mask) |
660 | map |= gic_cpu_map[cpu]; | 664 | map |= gic_cpu_map[cpu]; |
661 | 665 | ||
662 | /* | 666 | /* |
663 | * Ensure that stores to Normal memory are visible to the | 667 | * Ensure that stores to Normal memory are visible to the |
664 | * other CPUs before they observe us issuing the IPI. | 668 | * other CPUs before they observe us issuing the IPI. |
665 | */ | 669 | */ |
666 | dmb(ishst); | 670 | dmb(ishst); |
667 | 671 | ||
668 | /* this always happens on GIC0 */ | 672 | /* this always happens on GIC0 */ |
669 | writel_relaxed(map << 16 | irq, gic_data_dist_base(&gic_data[0]) + GIC_DIST_SOFTINT); | 673 | writel_relaxed(map << 16 | irq, gic_data_dist_base(&gic_data[0]) + GIC_DIST_SOFTINT); |
670 | 674 | ||
671 | raw_spin_unlock_irqrestore(&irq_controller_lock, flags); | 675 | raw_spin_unlock_irqrestore(&irq_controller_lock, flags); |
672 | } | 676 | } |
673 | #endif | 677 | #endif |
674 | 678 | ||
675 | #ifdef CONFIG_BL_SWITCHER | 679 | #ifdef CONFIG_BL_SWITCHER |
676 | /* | 680 | /* |
677 | * gic_send_sgi - send a SGI directly to given CPU interface number | 681 | * gic_send_sgi - send a SGI directly to given CPU interface number |
678 | * | 682 | * |
679 | * cpu_id: the ID for the destination CPU interface | 683 | * cpu_id: the ID for the destination CPU interface |
680 | * irq: the IPI number to send a SGI for | 684 | * irq: the IPI number to send a SGI for |
681 | */ | 685 | */ |
682 | void gic_send_sgi(unsigned int cpu_id, unsigned int irq) | 686 | void gic_send_sgi(unsigned int cpu_id, unsigned int irq) |
683 | { | 687 | { |
684 | BUG_ON(cpu_id >= NR_GIC_CPU_IF); | 688 | BUG_ON(cpu_id >= NR_GIC_CPU_IF); |
685 | cpu_id = 1 << cpu_id; | 689 | cpu_id = 1 << cpu_id; |
686 | /* this always happens on GIC0 */ | 690 | /* this always happens on GIC0 */ |
687 | writel_relaxed((cpu_id << 16) | irq, gic_data_dist_base(&gic_data[0]) + GIC_DIST_SOFTINT); | 691 | writel_relaxed((cpu_id << 16) | irq, gic_data_dist_base(&gic_data[0]) + GIC_DIST_SOFTINT); |
688 | } | 692 | } |
689 | 693 | ||
690 | /* | 694 | /* |
691 | * gic_get_cpu_id - get the CPU interface ID for the specified CPU | 695 | * gic_get_cpu_id - get the CPU interface ID for the specified CPU |
692 | * | 696 | * |
693 | * @cpu: the logical CPU number to get the GIC ID for. | 697 | * @cpu: the logical CPU number to get the GIC ID for. |
694 | * | 698 | * |
695 | * Return the CPU interface ID for the given logical CPU number, | 699 | * Return the CPU interface ID for the given logical CPU number, |
696 | * or -1 if the CPU number is too large or the interface ID is | 700 | * or -1 if the CPU number is too large or the interface ID is |
697 | * unknown (more than one bit set). | 701 | * unknown (more than one bit set). |
698 | */ | 702 | */ |
699 | int gic_get_cpu_id(unsigned int cpu) | 703 | int gic_get_cpu_id(unsigned int cpu) |
700 | { | 704 | { |
701 | unsigned int cpu_bit; | 705 | unsigned int cpu_bit; |
702 | 706 | ||
703 | if (cpu >= NR_GIC_CPU_IF) | 707 | if (cpu >= NR_GIC_CPU_IF) |
704 | return -1; | 708 | return -1; |
705 | cpu_bit = gic_cpu_map[cpu]; | 709 | cpu_bit = gic_cpu_map[cpu]; |
706 | if (cpu_bit & (cpu_bit - 1)) | 710 | if (cpu_bit & (cpu_bit - 1)) |
707 | return -1; | 711 | return -1; |
708 | return __ffs(cpu_bit); | 712 | return __ffs(cpu_bit); |
709 | } | 713 | } |
710 | 714 | ||
711 | /* | 715 | /* |
712 | * gic_migrate_target - migrate IRQs to another CPU interface | 716 | * gic_migrate_target - migrate IRQs to another CPU interface |
713 | * | 717 | * |
714 | * @new_cpu_id: the CPU target ID to migrate IRQs to | 718 | * @new_cpu_id: the CPU target ID to migrate IRQs to |
715 | * | 719 | * |
716 | * Migrate all peripheral interrupts with a target matching the current CPU | 720 | * Migrate all peripheral interrupts with a target matching the current CPU |
717 | * to the interface corresponding to @new_cpu_id. The CPU interface mapping | 721 | * to the interface corresponding to @new_cpu_id. The CPU interface mapping |
718 | * is also updated. Targets to other CPU interfaces are unchanged. | 722 | * is also updated. Targets to other CPU interfaces are unchanged. |
719 | * This must be called with IRQs locally disabled. | 723 | * This must be called with IRQs locally disabled. |
720 | */ | 724 | */ |
721 | void gic_migrate_target(unsigned int new_cpu_id) | 725 | void gic_migrate_target(unsigned int new_cpu_id) |
722 | { | 726 | { |
723 | unsigned int cur_cpu_id, gic_irqs, gic_nr = 0; | 727 | unsigned int cur_cpu_id, gic_irqs, gic_nr = 0; |
724 | void __iomem *dist_base; | 728 | void __iomem *dist_base; |
725 | int i, ror_val, cpu = smp_processor_id(); | 729 | int i, ror_val, cpu = smp_processor_id(); |
726 | u32 val, cur_target_mask, active_mask; | 730 | u32 val, cur_target_mask, active_mask; |
727 | 731 | ||
728 | if (gic_nr >= MAX_GIC_NR) | 732 | if (gic_nr >= MAX_GIC_NR) |
729 | BUG(); | 733 | BUG(); |
730 | 734 | ||
731 | dist_base = gic_data_dist_base(&gic_data[gic_nr]); | 735 | dist_base = gic_data_dist_base(&gic_data[gic_nr]); |
732 | if (!dist_base) | 736 | if (!dist_base) |
733 | return; | 737 | return; |
734 | gic_irqs = gic_data[gic_nr].gic_irqs; | 738 | gic_irqs = gic_data[gic_nr].gic_irqs; |
735 | 739 | ||
736 | cur_cpu_id = __ffs(gic_cpu_map[cpu]); | 740 | cur_cpu_id = __ffs(gic_cpu_map[cpu]); |
737 | cur_target_mask = 0x01010101 << cur_cpu_id; | 741 | cur_target_mask = 0x01010101 << cur_cpu_id; |
738 | ror_val = (cur_cpu_id - new_cpu_id) & 31; | 742 | ror_val = (cur_cpu_id - new_cpu_id) & 31; |
739 | 743 | ||
740 | raw_spin_lock(&irq_controller_lock); | 744 | raw_spin_lock(&irq_controller_lock); |
741 | 745 | ||
742 | /* Update the target interface for this logical CPU */ | 746 | /* Update the target interface for this logical CPU */ |
743 | gic_cpu_map[cpu] = 1 << new_cpu_id; | 747 | gic_cpu_map[cpu] = 1 << new_cpu_id; |
744 | 748 | ||
745 | /* | 749 | /* |
746 | * Find all the peripheral interrupts targetting the current | 750 | * Find all the peripheral interrupts targetting the current |
747 | * CPU interface and migrate them to the new CPU interface. | 751 | * CPU interface and migrate them to the new CPU interface. |
748 | * We skip DIST_TARGET 0 to 7 as they are read-only. | 752 | * We skip DIST_TARGET 0 to 7 as they are read-only. |
749 | */ | 753 | */ |
750 | for (i = 8; i < DIV_ROUND_UP(gic_irqs, 4); i++) { | 754 | for (i = 8; i < DIV_ROUND_UP(gic_irqs, 4); i++) { |
751 | val = readl_relaxed(dist_base + GIC_DIST_TARGET + i * 4); | 755 | val = readl_relaxed(dist_base + GIC_DIST_TARGET + i * 4); |
752 | active_mask = val & cur_target_mask; | 756 | active_mask = val & cur_target_mask; |
753 | if (active_mask) { | 757 | if (active_mask) { |
754 | val &= ~active_mask; | 758 | val &= ~active_mask; |
755 | val |= ror32(active_mask, ror_val); | 759 | val |= ror32(active_mask, ror_val); |
756 | writel_relaxed(val, dist_base + GIC_DIST_TARGET + i*4); | 760 | writel_relaxed(val, dist_base + GIC_DIST_TARGET + i*4); |
757 | } | 761 | } |
758 | } | 762 | } |
759 | 763 | ||
760 | raw_spin_unlock(&irq_controller_lock); | 764 | raw_spin_unlock(&irq_controller_lock); |
761 | 765 | ||
762 | /* | 766 | /* |
763 | * Now let's migrate and clear any potential SGIs that might be | 767 | * Now let's migrate and clear any potential SGIs that might be |
764 | * pending for us (cur_cpu_id). Since GIC_DIST_SGI_PENDING_SET | 768 | * pending for us (cur_cpu_id). Since GIC_DIST_SGI_PENDING_SET |
765 | * is a banked register, we can only forward the SGI using | 769 | * is a banked register, we can only forward the SGI using |
766 | * GIC_DIST_SOFTINT. The original SGI source is lost but Linux | 770 | * GIC_DIST_SOFTINT. The original SGI source is lost but Linux |
767 | * doesn't use that information anyway. | 771 | * doesn't use that information anyway. |
768 | * | 772 | * |
769 | * For the same reason we do not adjust SGI source information | 773 | * For the same reason we do not adjust SGI source information |
770 | * for previously sent SGIs by us to other CPUs either. | 774 | * for previously sent SGIs by us to other CPUs either. |
771 | */ | 775 | */ |
772 | for (i = 0; i < 16; i += 4) { | 776 | for (i = 0; i < 16; i += 4) { |
773 | int j; | 777 | int j; |
774 | val = readl_relaxed(dist_base + GIC_DIST_SGI_PENDING_SET + i); | 778 | val = readl_relaxed(dist_base + GIC_DIST_SGI_PENDING_SET + i); |
775 | if (!val) | 779 | if (!val) |
776 | continue; | 780 | continue; |
777 | writel_relaxed(val, dist_base + GIC_DIST_SGI_PENDING_CLEAR + i); | 781 | writel_relaxed(val, dist_base + GIC_DIST_SGI_PENDING_CLEAR + i); |
778 | for (j = i; j < i + 4; j++) { | 782 | for (j = i; j < i + 4; j++) { |
779 | if (val & 0xff) | 783 | if (val & 0xff) |
780 | writel_relaxed((1 << (new_cpu_id + 16)) | j, | 784 | writel_relaxed((1 << (new_cpu_id + 16)) | j, |
781 | dist_base + GIC_DIST_SOFTINT); | 785 | dist_base + GIC_DIST_SOFTINT); |
782 | val >>= 8; | 786 | val >>= 8; |
783 | } | 787 | } |
784 | } | 788 | } |
785 | } | 789 | } |
786 | 790 | ||
787 | /* | 791 | /* |
788 | * gic_get_sgir_physaddr - get the physical address for the SGI register | 792 | * gic_get_sgir_physaddr - get the physical address for the SGI register |
789 | * | 793 | * |
790 | * REturn the physical address of the SGI register to be used | 794 | * REturn the physical address of the SGI register to be used |
791 | * by some early assembly code when the kernel is not yet available. | 795 | * by some early assembly code when the kernel is not yet available. |
792 | */ | 796 | */ |
793 | static unsigned long gic_dist_physaddr; | 797 | static unsigned long gic_dist_physaddr; |
794 | 798 | ||
795 | unsigned long gic_get_sgir_physaddr(void) | 799 | unsigned long gic_get_sgir_physaddr(void) |
796 | { | 800 | { |
797 | if (!gic_dist_physaddr) | 801 | if (!gic_dist_physaddr) |
798 | return 0; | 802 | return 0; |
799 | return gic_dist_physaddr + GIC_DIST_SOFTINT; | 803 | return gic_dist_physaddr + GIC_DIST_SOFTINT; |
800 | } | 804 | } |
801 | 805 | ||
802 | void __init gic_init_physaddr(struct device_node *node) | 806 | void __init gic_init_physaddr(struct device_node *node) |
803 | { | 807 | { |
804 | struct resource res; | 808 | struct resource res; |
805 | if (of_address_to_resource(node, 0, &res) == 0) { | 809 | if (of_address_to_resource(node, 0, &res) == 0) { |
806 | gic_dist_physaddr = res.start; | 810 | gic_dist_physaddr = res.start; |
807 | pr_info("GIC physical location is %#lx\n", gic_dist_physaddr); | 811 | pr_info("GIC physical location is %#lx\n", gic_dist_physaddr); |
808 | } | 812 | } |
809 | } | 813 | } |
810 | 814 | ||
811 | #else | 815 | #else |
812 | #define gic_init_physaddr(node) do { } while (0) | 816 | #define gic_init_physaddr(node) do { } while (0) |
813 | #endif | 817 | #endif |
814 | 818 | ||
815 | static int gic_irq_domain_map(struct irq_domain *d, unsigned int irq, | 819 | static int gic_irq_domain_map(struct irq_domain *d, unsigned int irq, |
816 | irq_hw_number_t hw) | 820 | irq_hw_number_t hw) |
817 | { | 821 | { |
818 | if (hw < 32) { | 822 | if (hw < 32) { |
819 | irq_set_percpu_devid(irq); | 823 | irq_set_percpu_devid(irq); |
820 | irq_set_chip_and_handler(irq, &gic_chip, | 824 | irq_set_chip_and_handler(irq, &gic_chip, |
821 | handle_percpu_devid_irq); | 825 | handle_percpu_devid_irq); |
822 | set_irq_flags(irq, IRQF_VALID | IRQF_NOAUTOEN); | 826 | set_irq_flags(irq, IRQF_VALID | IRQF_NOAUTOEN); |
823 | } else { | 827 | } else { |
824 | irq_set_chip_and_handler(irq, &gic_chip, | 828 | irq_set_chip_and_handler(irq, &gic_chip, |
825 | handle_fasteoi_irq); | 829 | handle_fasteoi_irq); |
826 | set_irq_flags(irq, IRQF_VALID | IRQF_PROBE); | 830 | set_irq_flags(irq, IRQF_VALID | IRQF_PROBE); |
827 | 831 | ||
828 | gic_routable_irq_domain_ops->map(d, irq, hw); | 832 | gic_routable_irq_domain_ops->map(d, irq, hw); |
829 | } | 833 | } |
830 | irq_set_chip_data(irq, d->host_data); | 834 | irq_set_chip_data(irq, d->host_data); |
831 | return 0; | 835 | return 0; |
832 | } | 836 | } |
833 | 837 | ||
834 | static void gic_irq_domain_unmap(struct irq_domain *d, unsigned int irq) | 838 | static void gic_irq_domain_unmap(struct irq_domain *d, unsigned int irq) |
835 | { | 839 | { |
836 | gic_routable_irq_domain_ops->unmap(d, irq); | 840 | gic_routable_irq_domain_ops->unmap(d, irq); |
837 | } | 841 | } |
838 | 842 | ||
839 | static int gic_irq_domain_xlate(struct irq_domain *d, | 843 | static int gic_irq_domain_xlate(struct irq_domain *d, |
840 | struct device_node *controller, | 844 | struct device_node *controller, |
841 | const u32 *intspec, unsigned int intsize, | 845 | const u32 *intspec, unsigned int intsize, |
842 | unsigned long *out_hwirq, unsigned int *out_type) | 846 | unsigned long *out_hwirq, unsigned int *out_type) |
843 | { | 847 | { |
844 | unsigned long ret = 0; | 848 | unsigned long ret = 0; |
845 | 849 | ||
846 | if (d->of_node != controller) | 850 | if (d->of_node != controller) |
847 | return -EINVAL; | 851 | return -EINVAL; |
848 | if (intsize < 3) | 852 | if (intsize < 3) |
849 | return -EINVAL; | 853 | return -EINVAL; |
850 | 854 | ||
851 | /* Get the interrupt number and add 16 to skip over SGIs */ | 855 | /* Get the interrupt number and add 16 to skip over SGIs */ |
852 | *out_hwirq = intspec[1] + 16; | 856 | *out_hwirq = intspec[1] + 16; |
853 | 857 | ||
854 | /* For SPIs, we need to add 16 more to get the GIC irq ID number */ | 858 | /* For SPIs, we need to add 16 more to get the GIC irq ID number */ |
855 | if (!intspec[0]) { | 859 | if (!intspec[0]) { |
856 | ret = gic_routable_irq_domain_ops->xlate(d, controller, | 860 | ret = gic_routable_irq_domain_ops->xlate(d, controller, |
857 | intspec, | 861 | intspec, |
858 | intsize, | 862 | intsize, |
859 | out_hwirq, | 863 | out_hwirq, |
860 | out_type); | 864 | out_type); |
861 | 865 | ||
862 | if (IS_ERR_VALUE(ret)) | 866 | if (IS_ERR_VALUE(ret)) |
863 | return ret; | 867 | return ret; |
864 | } | 868 | } |
865 | 869 | ||
866 | *out_type = intspec[2] & IRQ_TYPE_SENSE_MASK; | 870 | *out_type = intspec[2] & IRQ_TYPE_SENSE_MASK; |
867 | 871 | ||
868 | return ret; | 872 | return ret; |
869 | } | 873 | } |
870 | 874 | ||
871 | #ifdef CONFIG_SMP | 875 | #ifdef CONFIG_SMP |
872 | static int gic_secondary_init(struct notifier_block *nfb, unsigned long action, | 876 | static int gic_secondary_init(struct notifier_block *nfb, unsigned long action, |
873 | void *hcpu) | 877 | void *hcpu) |
874 | { | 878 | { |
875 | if (action == CPU_STARTING || action == CPU_STARTING_FROZEN) | 879 | if (action == CPU_STARTING || action == CPU_STARTING_FROZEN) |
876 | gic_cpu_init(&gic_data[0]); | 880 | gic_cpu_init(&gic_data[0]); |
877 | return NOTIFY_OK; | 881 | return NOTIFY_OK; |
878 | } | 882 | } |
879 | 883 | ||
880 | /* | 884 | /* |
881 | * Notifier for enabling the GIC CPU interface. Set an arbitrarily high | 885 | * Notifier for enabling the GIC CPU interface. Set an arbitrarily high |
882 | * priority because the GIC needs to be up before the ARM generic timers. | 886 | * priority because the GIC needs to be up before the ARM generic timers. |
883 | */ | 887 | */ |
884 | static struct notifier_block gic_cpu_notifier = { | 888 | static struct notifier_block gic_cpu_notifier = { |
885 | .notifier_call = gic_secondary_init, | 889 | .notifier_call = gic_secondary_init, |
886 | .priority = 100, | 890 | .priority = 100, |
887 | }; | 891 | }; |
888 | #endif | 892 | #endif |
889 | 893 | ||
890 | static const struct irq_domain_ops gic_irq_domain_ops = { | 894 | static const struct irq_domain_ops gic_irq_domain_ops = { |
891 | .map = gic_irq_domain_map, | 895 | .map = gic_irq_domain_map, |
892 | .unmap = gic_irq_domain_unmap, | 896 | .unmap = gic_irq_domain_unmap, |
893 | .xlate = gic_irq_domain_xlate, | 897 | .xlate = gic_irq_domain_xlate, |
894 | }; | 898 | }; |
895 | 899 | ||
896 | /* Default functions for routable irq domain */ | 900 | /* Default functions for routable irq domain */ |
897 | static int gic_routable_irq_domain_map(struct irq_domain *d, unsigned int irq, | 901 | static int gic_routable_irq_domain_map(struct irq_domain *d, unsigned int irq, |
898 | irq_hw_number_t hw) | 902 | irq_hw_number_t hw) |
899 | { | 903 | { |
900 | return 0; | 904 | return 0; |
901 | } | 905 | } |
902 | 906 | ||
903 | static void gic_routable_irq_domain_unmap(struct irq_domain *d, | 907 | static void gic_routable_irq_domain_unmap(struct irq_domain *d, |
904 | unsigned int irq) | 908 | unsigned int irq) |
905 | { | 909 | { |
906 | } | 910 | } |
907 | 911 | ||
908 | static int gic_routable_irq_domain_xlate(struct irq_domain *d, | 912 | static int gic_routable_irq_domain_xlate(struct irq_domain *d, |
909 | struct device_node *controller, | 913 | struct device_node *controller, |
910 | const u32 *intspec, unsigned int intsize, | 914 | const u32 *intspec, unsigned int intsize, |
911 | unsigned long *out_hwirq, | 915 | unsigned long *out_hwirq, |
912 | unsigned int *out_type) | 916 | unsigned int *out_type) |
913 | { | 917 | { |
914 | *out_hwirq += 16; | 918 | *out_hwirq += 16; |
915 | return 0; | 919 | return 0; |
916 | } | 920 | } |
917 | 921 | ||
918 | const struct irq_domain_ops gic_default_routable_irq_domain_ops = { | 922 | const struct irq_domain_ops gic_default_routable_irq_domain_ops = { |
919 | .map = gic_routable_irq_domain_map, | 923 | .map = gic_routable_irq_domain_map, |
920 | .unmap = gic_routable_irq_domain_unmap, | 924 | .unmap = gic_routable_irq_domain_unmap, |
921 | .xlate = gic_routable_irq_domain_xlate, | 925 | .xlate = gic_routable_irq_domain_xlate, |
922 | }; | 926 | }; |
923 | 927 | ||
924 | const struct irq_domain_ops *gic_routable_irq_domain_ops = | 928 | const struct irq_domain_ops *gic_routable_irq_domain_ops = |
925 | &gic_default_routable_irq_domain_ops; | 929 | &gic_default_routable_irq_domain_ops; |
926 | 930 | ||
927 | void __init gic_init_bases(unsigned int gic_nr, int irq_start, | 931 | void __init gic_init_bases(unsigned int gic_nr, int irq_start, |
928 | void __iomem *dist_base, void __iomem *cpu_base, | 932 | void __iomem *dist_base, void __iomem *cpu_base, |
929 | u32 percpu_offset, struct device_node *node) | 933 | u32 percpu_offset, struct device_node *node) |
930 | { | 934 | { |
931 | irq_hw_number_t hwirq_base; | 935 | irq_hw_number_t hwirq_base; |
932 | struct gic_chip_data *gic; | 936 | struct gic_chip_data *gic; |
933 | int gic_irqs, irq_base, i; | 937 | int gic_irqs, irq_base, i; |
934 | int nr_routable_irqs; | 938 | int nr_routable_irqs; |
935 | 939 | ||
936 | BUG_ON(gic_nr >= MAX_GIC_NR); | 940 | BUG_ON(gic_nr >= MAX_GIC_NR); |
937 | 941 | ||
938 | gic = &gic_data[gic_nr]; | 942 | gic = &gic_data[gic_nr]; |
939 | #ifdef CONFIG_GIC_NON_BANKED | 943 | #ifdef CONFIG_GIC_NON_BANKED |
940 | if (percpu_offset) { /* Frankein-GIC without banked registers... */ | 944 | if (percpu_offset) { /* Frankein-GIC without banked registers... */ |
941 | unsigned int cpu; | 945 | unsigned int cpu; |
942 | 946 | ||
943 | gic->dist_base.percpu_base = alloc_percpu(void __iomem *); | 947 | gic->dist_base.percpu_base = alloc_percpu(void __iomem *); |
944 | gic->cpu_base.percpu_base = alloc_percpu(void __iomem *); | 948 | gic->cpu_base.percpu_base = alloc_percpu(void __iomem *); |
945 | if (WARN_ON(!gic->dist_base.percpu_base || | 949 | if (WARN_ON(!gic->dist_base.percpu_base || |
946 | !gic->cpu_base.percpu_base)) { | 950 | !gic->cpu_base.percpu_base)) { |
947 | free_percpu(gic->dist_base.percpu_base); | 951 | free_percpu(gic->dist_base.percpu_base); |
948 | free_percpu(gic->cpu_base.percpu_base); | 952 | free_percpu(gic->cpu_base.percpu_base); |
949 | return; | 953 | return; |
950 | } | 954 | } |
951 | 955 | ||
952 | for_each_possible_cpu(cpu) { | 956 | for_each_possible_cpu(cpu) { |
953 | unsigned long offset = percpu_offset * cpu_logical_map(cpu); | 957 | unsigned long offset = percpu_offset * cpu_logical_map(cpu); |
954 | *per_cpu_ptr(gic->dist_base.percpu_base, cpu) = dist_base + offset; | 958 | *per_cpu_ptr(gic->dist_base.percpu_base, cpu) = dist_base + offset; |
955 | *per_cpu_ptr(gic->cpu_base.percpu_base, cpu) = cpu_base + offset; | 959 | *per_cpu_ptr(gic->cpu_base.percpu_base, cpu) = cpu_base + offset; |
956 | } | 960 | } |
957 | 961 | ||
958 | gic_set_base_accessor(gic, gic_get_percpu_base); | 962 | gic_set_base_accessor(gic, gic_get_percpu_base); |
959 | } else | 963 | } else |
960 | #endif | 964 | #endif |
961 | { /* Normal, sane GIC... */ | 965 | { /* Normal, sane GIC... */ |
962 | WARN(percpu_offset, | 966 | WARN(percpu_offset, |
963 | "GIC_NON_BANKED not enabled, ignoring %08x offset!", | 967 | "GIC_NON_BANKED not enabled, ignoring %08x offset!", |
964 | percpu_offset); | 968 | percpu_offset); |
965 | gic->dist_base.common_base = dist_base; | 969 | gic->dist_base.common_base = dist_base; |
966 | gic->cpu_base.common_base = cpu_base; | 970 | gic->cpu_base.common_base = cpu_base; |
967 | gic_set_base_accessor(gic, gic_get_common_base); | 971 | gic_set_base_accessor(gic, gic_get_common_base); |
968 | } | 972 | } |
969 | 973 | ||
970 | /* | 974 | /* |
971 | * Initialize the CPU interface map to all CPUs. | 975 | * Initialize the CPU interface map to all CPUs. |
972 | * It will be refined as each CPU probes its ID. | 976 | * It will be refined as each CPU probes its ID. |
973 | */ | 977 | */ |
974 | for (i = 0; i < NR_GIC_CPU_IF; i++) | 978 | for (i = 0; i < NR_GIC_CPU_IF; i++) |
975 | gic_cpu_map[i] = 0xff; | 979 | gic_cpu_map[i] = 0xff; |
976 | 980 | ||
977 | /* | 981 | /* |
978 | * For primary GICs, skip over SGIs. | 982 | * For primary GICs, skip over SGIs. |
979 | * For secondary GICs, skip over PPIs, too. | 983 | * For secondary GICs, skip over PPIs, too. |
980 | */ | 984 | */ |
981 | if (gic_nr == 0 && (irq_start & 31) > 0) { | 985 | if (gic_nr == 0 && (irq_start & 31) > 0) { |
982 | hwirq_base = 16; | 986 | hwirq_base = 16; |
983 | if (irq_start != -1) | 987 | if (irq_start != -1) |
984 | irq_start = (irq_start & ~31) + 16; | 988 | irq_start = (irq_start & ~31) + 16; |
985 | } else { | 989 | } else { |
986 | hwirq_base = 32; | 990 | hwirq_base = 32; |
987 | } | 991 | } |
988 | 992 | ||
989 | /* | 993 | /* |
990 | * Find out how many interrupts are supported. | 994 | * Find out how many interrupts are supported. |
991 | * The GIC only supports up to 1020 interrupt sources. | 995 | * The GIC only supports up to 1020 interrupt sources. |
992 | */ | 996 | */ |
993 | gic_irqs = readl_relaxed(gic_data_dist_base(gic) + GIC_DIST_CTR) & 0x1f; | 997 | gic_irqs = readl_relaxed(gic_data_dist_base(gic) + GIC_DIST_CTR) & 0x1f; |
994 | gic_irqs = (gic_irqs + 1) * 32; | 998 | gic_irqs = (gic_irqs + 1) * 32; |
995 | if (gic_irqs > 1020) | 999 | if (gic_irqs > 1020) |
996 | gic_irqs = 1020; | 1000 | gic_irqs = 1020; |
997 | gic->gic_irqs = gic_irqs; | 1001 | gic->gic_irqs = gic_irqs; |
998 | 1002 | ||
999 | gic_irqs -= hwirq_base; /* calculate # of irqs to allocate */ | 1003 | gic_irqs -= hwirq_base; /* calculate # of irqs to allocate */ |
1000 | 1004 | ||
1001 | if (of_property_read_u32(node, "arm,routable-irqs", | 1005 | if (of_property_read_u32(node, "arm,routable-irqs", |
1002 | &nr_routable_irqs)) { | 1006 | &nr_routable_irqs)) { |
1003 | irq_base = irq_alloc_descs(irq_start, 16, gic_irqs, | 1007 | irq_base = irq_alloc_descs(irq_start, 16, gic_irqs, |
1004 | numa_node_id()); | 1008 | numa_node_id()); |
1005 | if (IS_ERR_VALUE(irq_base)) { | 1009 | if (IS_ERR_VALUE(irq_base)) { |
1006 | WARN(1, "Cannot allocate irq_descs @ IRQ%d, assuming pre-allocated\n", | 1010 | WARN(1, "Cannot allocate irq_descs @ IRQ%d, assuming pre-allocated\n", |
1007 | irq_start); | 1011 | irq_start); |
1008 | irq_base = irq_start; | 1012 | irq_base = irq_start; |
1009 | } | 1013 | } |
1010 | 1014 | ||
1011 | gic->domain = irq_domain_add_legacy(node, gic_irqs, irq_base, | 1015 | gic->domain = irq_domain_add_legacy(node, gic_irqs, irq_base, |
1012 | hwirq_base, &gic_irq_domain_ops, gic); | 1016 | hwirq_base, &gic_irq_domain_ops, gic); |
1013 | } else { | 1017 | } else { |
1014 | gic->domain = irq_domain_add_linear(node, nr_routable_irqs, | 1018 | gic->domain = irq_domain_add_linear(node, nr_routable_irqs, |
1015 | &gic_irq_domain_ops, | 1019 | &gic_irq_domain_ops, |
1016 | gic); | 1020 | gic); |
1017 | } | 1021 | } |
1018 | 1022 | ||
1019 | if (WARN_ON(!gic->domain)) | 1023 | if (WARN_ON(!gic->domain)) |
1020 | return; | 1024 | return; |
1021 | 1025 | ||
1022 | if (gic_nr == 0) { | 1026 | if (gic_nr == 0) { |
1023 | #ifdef CONFIG_SMP | 1027 | #ifdef CONFIG_SMP |
1024 | set_smp_cross_call(gic_raise_softirq); | 1028 | set_smp_cross_call(gic_raise_softirq); |
1025 | register_cpu_notifier(&gic_cpu_notifier); | 1029 | register_cpu_notifier(&gic_cpu_notifier); |
1026 | #endif | 1030 | #endif |
1027 | set_handle_irq(gic_handle_irq); | 1031 | set_handle_irq(gic_handle_irq); |
1028 | } | 1032 | } |
1029 | 1033 | ||
1030 | gic_chip.flags |= gic_arch_extn.flags; | 1034 | gic_chip.flags |= gic_arch_extn.flags; |
1031 | gic_dist_init(gic); | 1035 | gic_dist_init(gic); |
1032 | gic_cpu_init(gic); | 1036 | gic_cpu_init(gic); |
1033 | gic_pm_init(gic); | 1037 | gic_pm_init(gic); |
1034 | } | 1038 | } |
1035 | 1039 | ||
1036 | #ifdef CONFIG_OF | 1040 | #ifdef CONFIG_OF |
1037 | static int gic_cnt __initdata; | 1041 | static int gic_cnt __initdata; |
1038 | 1042 | ||
1039 | static int __init | 1043 | static int __init |
1040 | gic_of_init(struct device_node *node, struct device_node *parent) | 1044 | gic_of_init(struct device_node *node, struct device_node *parent) |
1041 | { | 1045 | { |
1042 | void __iomem *cpu_base; | 1046 | void __iomem *cpu_base; |
1043 | void __iomem *dist_base; | 1047 | void __iomem *dist_base; |
1044 | u32 percpu_offset; | 1048 | u32 percpu_offset; |
1045 | int irq; | 1049 | int irq; |
1046 | 1050 | ||
1047 | if (WARN_ON(!node)) | 1051 | if (WARN_ON(!node)) |
1048 | return -ENODEV; | 1052 | return -ENODEV; |
1049 | 1053 | ||
1050 | dist_base = of_iomap(node, 0); | 1054 | dist_base = of_iomap(node, 0); |
1051 | WARN(!dist_base, "unable to map gic dist registers\n"); | 1055 | WARN(!dist_base, "unable to map gic dist registers\n"); |
1052 | 1056 | ||
1053 | cpu_base = of_iomap(node, 1); | 1057 | cpu_base = of_iomap(node, 1); |
1054 | WARN(!cpu_base, "unable to map gic cpu registers\n"); | 1058 | WARN(!cpu_base, "unable to map gic cpu registers\n"); |
1055 | 1059 | ||
1056 | if (of_property_read_u32(node, "cpu-offset", &percpu_offset)) | 1060 | if (of_property_read_u32(node, "cpu-offset", &percpu_offset)) |
1057 | percpu_offset = 0; | 1061 | percpu_offset = 0; |
1058 | 1062 | ||
1059 | gic_init_bases(gic_cnt, -1, dist_base, cpu_base, percpu_offset, node); | 1063 | gic_init_bases(gic_cnt, -1, dist_base, cpu_base, percpu_offset, node); |
1060 | if (!gic_cnt) | 1064 | if (!gic_cnt) |
1061 | gic_init_physaddr(node); | 1065 | gic_init_physaddr(node); |
1062 | 1066 | ||
1063 | if (parent) { | 1067 | if (parent) { |
1064 | irq = irq_of_parse_and_map(node, 0); | 1068 | irq = irq_of_parse_and_map(node, 0); |
1065 | gic_cascade_irq(gic_cnt, irq); | 1069 | gic_cascade_irq(gic_cnt, irq); |
1066 | } | 1070 | } |
1067 | gic_cnt++; | 1071 | gic_cnt++; |
1068 | return 0; | 1072 | return 0; |
1069 | } | 1073 | } |
1070 | IRQCHIP_DECLARE(cortex_a15_gic, "arm,cortex-a15-gic", gic_of_init); | 1074 | IRQCHIP_DECLARE(cortex_a15_gic, "arm,cortex-a15-gic", gic_of_init); |
1071 | IRQCHIP_DECLARE(cortex_a9_gic, "arm,cortex-a9-gic", gic_of_init); | 1075 | IRQCHIP_DECLARE(cortex_a9_gic, "arm,cortex-a9-gic", gic_of_init); |
1072 | IRQCHIP_DECLARE(msm_8660_qgic, "qcom,msm-8660-qgic", gic_of_init); | 1076 | IRQCHIP_DECLARE(msm_8660_qgic, "qcom,msm-8660-qgic", gic_of_init); |
1073 | IRQCHIP_DECLARE(msm_qgic2, "qcom,msm-qgic2", gic_of_init); | 1077 | IRQCHIP_DECLARE(msm_qgic2, "qcom,msm-qgic2", gic_of_init); |
1074 | 1078 | ||
1075 | #endif | 1079 | #endif |
include/linux/interrupt.h
1 | /* interrupt.h */ | 1 | /* interrupt.h */ |
2 | #ifndef _LINUX_INTERRUPT_H | 2 | #ifndef _LINUX_INTERRUPT_H |
3 | #define _LINUX_INTERRUPT_H | 3 | #define _LINUX_INTERRUPT_H |
4 | 4 | ||
5 | #include <linux/kernel.h> | 5 | #include <linux/kernel.h> |
6 | #include <linux/linkage.h> | 6 | #include <linux/linkage.h> |
7 | #include <linux/bitops.h> | 7 | #include <linux/bitops.h> |
8 | #include <linux/preempt.h> | 8 | #include <linux/preempt.h> |
9 | #include <linux/cpumask.h> | 9 | #include <linux/cpumask.h> |
10 | #include <linux/irqreturn.h> | 10 | #include <linux/irqreturn.h> |
11 | #include <linux/irqnr.h> | 11 | #include <linux/irqnr.h> |
12 | #include <linux/hardirq.h> | 12 | #include <linux/hardirq.h> |
13 | #include <linux/irqflags.h> | 13 | #include <linux/irqflags.h> |
14 | #include <linux/hrtimer.h> | 14 | #include <linux/hrtimer.h> |
15 | #include <linux/kref.h> | 15 | #include <linux/kref.h> |
16 | #include <linux/workqueue.h> | 16 | #include <linux/workqueue.h> |
17 | 17 | ||
18 | #include <linux/atomic.h> | 18 | #include <linux/atomic.h> |
19 | #include <asm/ptrace.h> | 19 | #include <asm/ptrace.h> |
20 | #include <asm/irq.h> | 20 | #include <asm/irq.h> |
21 | 21 | ||
22 | /* | 22 | /* |
23 | * These correspond to the IORESOURCE_IRQ_* defines in | 23 | * These correspond to the IORESOURCE_IRQ_* defines in |
24 | * linux/ioport.h to select the interrupt line behaviour. When | 24 | * linux/ioport.h to select the interrupt line behaviour. When |
25 | * requesting an interrupt without specifying a IRQF_TRIGGER, the | 25 | * requesting an interrupt without specifying a IRQF_TRIGGER, the |
26 | * setting should be assumed to be "as already configured", which | 26 | * setting should be assumed to be "as already configured", which |
27 | * may be as per machine or firmware initialisation. | 27 | * may be as per machine or firmware initialisation. |
28 | */ | 28 | */ |
29 | #define IRQF_TRIGGER_NONE 0x00000000 | 29 | #define IRQF_TRIGGER_NONE 0x00000000 |
30 | #define IRQF_TRIGGER_RISING 0x00000001 | 30 | #define IRQF_TRIGGER_RISING 0x00000001 |
31 | #define IRQF_TRIGGER_FALLING 0x00000002 | 31 | #define IRQF_TRIGGER_FALLING 0x00000002 |
32 | #define IRQF_TRIGGER_HIGH 0x00000004 | 32 | #define IRQF_TRIGGER_HIGH 0x00000004 |
33 | #define IRQF_TRIGGER_LOW 0x00000008 | 33 | #define IRQF_TRIGGER_LOW 0x00000008 |
34 | #define IRQF_TRIGGER_MASK (IRQF_TRIGGER_HIGH | IRQF_TRIGGER_LOW | \ | 34 | #define IRQF_TRIGGER_MASK (IRQF_TRIGGER_HIGH | IRQF_TRIGGER_LOW | \ |
35 | IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING) | 35 | IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING) |
36 | #define IRQF_TRIGGER_PROBE 0x00000010 | 36 | #define IRQF_TRIGGER_PROBE 0x00000010 |
37 | 37 | ||
38 | /* | 38 | /* |
39 | * These flags used only by the kernel as part of the | 39 | * These flags used only by the kernel as part of the |
40 | * irq handling routines. | 40 | * irq handling routines. |
41 | * | 41 | * |
42 | * IRQF_DISABLED - keep irqs disabled when calling the action handler. | 42 | * IRQF_DISABLED - keep irqs disabled when calling the action handler. |
43 | * DEPRECATED. This flag is a NOOP and scheduled to be removed | 43 | * DEPRECATED. This flag is a NOOP and scheduled to be removed |
44 | * IRQF_SHARED - allow sharing the irq among several devices | 44 | * IRQF_SHARED - allow sharing the irq among several devices |
45 | * IRQF_PROBE_SHARED - set by callers when they expect sharing mismatches to occur | 45 | * IRQF_PROBE_SHARED - set by callers when they expect sharing mismatches to occur |
46 | * IRQF_TIMER - Flag to mark this interrupt as timer interrupt | 46 | * IRQF_TIMER - Flag to mark this interrupt as timer interrupt |
47 | * IRQF_PERCPU - Interrupt is per cpu | 47 | * IRQF_PERCPU - Interrupt is per cpu |
48 | * IRQF_NOBALANCING - Flag to exclude this interrupt from irq balancing | 48 | * IRQF_NOBALANCING - Flag to exclude this interrupt from irq balancing |
49 | * IRQF_IRQPOLL - Interrupt is used for polling (only the interrupt that is | 49 | * IRQF_IRQPOLL - Interrupt is used for polling (only the interrupt that is |
50 | * registered first in an shared interrupt is considered for | 50 | * registered first in an shared interrupt is considered for |
51 | * performance reasons) | 51 | * performance reasons) |
52 | * IRQF_ONESHOT - Interrupt is not reenabled after the hardirq handler finished. | 52 | * IRQF_ONESHOT - Interrupt is not reenabled after the hardirq handler finished. |
53 | * Used by threaded interrupts which need to keep the | 53 | * Used by threaded interrupts which need to keep the |
54 | * irq line disabled until the threaded handler has been run. | 54 | * irq line disabled until the threaded handler has been run. |
55 | * IRQF_NO_SUSPEND - Do not disable this IRQ during suspend | 55 | * IRQF_NO_SUSPEND - Do not disable this IRQ during suspend |
56 | * IRQF_FORCE_RESUME - Force enable it on resume even if IRQF_NO_SUSPEND is set | 56 | * IRQF_FORCE_RESUME - Force enable it on resume even if IRQF_NO_SUSPEND is set |
57 | * IRQF_NO_THREAD - Interrupt cannot be threaded | 57 | * IRQF_NO_THREAD - Interrupt cannot be threaded |
58 | * IRQF_EARLY_RESUME - Resume IRQ early during syscore instead of at device | 58 | * IRQF_EARLY_RESUME - Resume IRQ early during syscore instead of at device |
59 | * resume time. | 59 | * resume time. |
60 | */ | 60 | */ |
61 | #define IRQF_DISABLED 0x00000020 | 61 | #define IRQF_DISABLED 0x00000020 |
62 | #define IRQF_SHARED 0x00000080 | 62 | #define IRQF_SHARED 0x00000080 |
63 | #define IRQF_PROBE_SHARED 0x00000100 | 63 | #define IRQF_PROBE_SHARED 0x00000100 |
64 | #define __IRQF_TIMER 0x00000200 | 64 | #define __IRQF_TIMER 0x00000200 |
65 | #define IRQF_PERCPU 0x00000400 | 65 | #define IRQF_PERCPU 0x00000400 |
66 | #define IRQF_NOBALANCING 0x00000800 | 66 | #define IRQF_NOBALANCING 0x00000800 |
67 | #define IRQF_IRQPOLL 0x00001000 | 67 | #define IRQF_IRQPOLL 0x00001000 |
68 | #define IRQF_ONESHOT 0x00002000 | 68 | #define IRQF_ONESHOT 0x00002000 |
69 | #define IRQF_NO_SUSPEND 0x00004000 | 69 | #define IRQF_NO_SUSPEND 0x00004000 |
70 | #define IRQF_FORCE_RESUME 0x00008000 | 70 | #define IRQF_FORCE_RESUME 0x00008000 |
71 | #define IRQF_NO_THREAD 0x00010000 | 71 | #define IRQF_NO_THREAD 0x00010000 |
72 | #define IRQF_EARLY_RESUME 0x00020000 | 72 | #define IRQF_EARLY_RESUME 0x00020000 |
73 | 73 | ||
74 | #define IRQF_TIMER (__IRQF_TIMER | IRQF_NO_SUSPEND | IRQF_NO_THREAD) | 74 | #define IRQF_TIMER (__IRQF_TIMER | IRQF_NO_SUSPEND | IRQF_NO_THREAD) |
75 | 75 | ||
76 | /* | 76 | /* |
77 | * These values can be returned by request_any_context_irq() and | 77 | * These values can be returned by request_any_context_irq() and |
78 | * describe the context the interrupt will be run in. | 78 | * describe the context the interrupt will be run in. |
79 | * | 79 | * |
80 | * IRQC_IS_HARDIRQ - interrupt runs in hardirq context | 80 | * IRQC_IS_HARDIRQ - interrupt runs in hardirq context |
81 | * IRQC_IS_NESTED - interrupt runs in a nested threaded context | 81 | * IRQC_IS_NESTED - interrupt runs in a nested threaded context |
82 | */ | 82 | */ |
83 | enum { | 83 | enum { |
84 | IRQC_IS_HARDIRQ = 0, | 84 | IRQC_IS_HARDIRQ = 0, |
85 | IRQC_IS_NESTED, | 85 | IRQC_IS_NESTED, |
86 | }; | 86 | }; |
87 | 87 | ||
88 | typedef irqreturn_t (*irq_handler_t)(int, void *); | 88 | typedef irqreturn_t (*irq_handler_t)(int, void *); |
89 | 89 | ||
90 | /** | 90 | /** |
91 | * struct irqaction - per interrupt action descriptor | 91 | * struct irqaction - per interrupt action descriptor |
92 | * @handler: interrupt handler function | 92 | * @handler: interrupt handler function |
93 | * @name: name of the device | 93 | * @name: name of the device |
94 | * @dev_id: cookie to identify the device | 94 | * @dev_id: cookie to identify the device |
95 | * @percpu_dev_id: cookie to identify the device | 95 | * @percpu_dev_id: cookie to identify the device |
96 | * @next: pointer to the next irqaction for shared interrupts | 96 | * @next: pointer to the next irqaction for shared interrupts |
97 | * @irq: interrupt number | 97 | * @irq: interrupt number |
98 | * @flags: flags (see IRQF_* above) | 98 | * @flags: flags (see IRQF_* above) |
99 | * @thread_fn: interrupt handler function for threaded interrupts | 99 | * @thread_fn: interrupt handler function for threaded interrupts |
100 | * @thread: thread pointer for threaded interrupts | 100 | * @thread: thread pointer for threaded interrupts |
101 | * @thread_flags: flags related to @thread | 101 | * @thread_flags: flags related to @thread |
102 | * @thread_mask: bitmask for keeping track of @thread activity | 102 | * @thread_mask: bitmask for keeping track of @thread activity |
103 | * @dir: pointer to the proc/irq/NN/name entry | 103 | * @dir: pointer to the proc/irq/NN/name entry |
104 | */ | 104 | */ |
105 | struct irqaction { | 105 | struct irqaction { |
106 | irq_handler_t handler; | 106 | irq_handler_t handler; |
107 | void *dev_id; | 107 | void *dev_id; |
108 | void __percpu *percpu_dev_id; | 108 | void __percpu *percpu_dev_id; |
109 | struct irqaction *next; | 109 | struct irqaction *next; |
110 | irq_handler_t thread_fn; | 110 | irq_handler_t thread_fn; |
111 | struct task_struct *thread; | 111 | struct task_struct *thread; |
112 | unsigned int irq; | 112 | unsigned int irq; |
113 | unsigned int flags; | 113 | unsigned int flags; |
114 | unsigned long thread_flags; | 114 | unsigned long thread_flags; |
115 | unsigned long thread_mask; | 115 | unsigned long thread_mask; |
116 | const char *name; | 116 | const char *name; |
117 | struct proc_dir_entry *dir; | 117 | struct proc_dir_entry *dir; |
118 | } ____cacheline_internodealigned_in_smp; | 118 | } ____cacheline_internodealigned_in_smp; |
119 | 119 | ||
120 | extern irqreturn_t no_action(int cpl, void *dev_id); | 120 | extern irqreturn_t no_action(int cpl, void *dev_id); |
121 | 121 | ||
122 | extern int __must_check | 122 | extern int __must_check |
123 | request_threaded_irq(unsigned int irq, irq_handler_t handler, | 123 | request_threaded_irq(unsigned int irq, irq_handler_t handler, |
124 | irq_handler_t thread_fn, | 124 | irq_handler_t thread_fn, |
125 | unsigned long flags, const char *name, void *dev); | 125 | unsigned long flags, const char *name, void *dev); |
126 | 126 | ||
127 | static inline int __must_check | 127 | static inline int __must_check |
128 | request_irq(unsigned int irq, irq_handler_t handler, unsigned long flags, | 128 | request_irq(unsigned int irq, irq_handler_t handler, unsigned long flags, |
129 | const char *name, void *dev) | 129 | const char *name, void *dev) |
130 | { | 130 | { |
131 | return request_threaded_irq(irq, handler, NULL, flags, name, dev); | 131 | return request_threaded_irq(irq, handler, NULL, flags, name, dev); |
132 | } | 132 | } |
133 | 133 | ||
134 | extern int __must_check | 134 | extern int __must_check |
135 | request_any_context_irq(unsigned int irq, irq_handler_t handler, | 135 | request_any_context_irq(unsigned int irq, irq_handler_t handler, |
136 | unsigned long flags, const char *name, void *dev_id); | 136 | unsigned long flags, const char *name, void *dev_id); |
137 | 137 | ||
138 | extern int __must_check | 138 | extern int __must_check |
139 | request_percpu_irq(unsigned int irq, irq_handler_t handler, | 139 | request_percpu_irq(unsigned int irq, irq_handler_t handler, |
140 | const char *devname, void __percpu *percpu_dev_id); | 140 | const char *devname, void __percpu *percpu_dev_id); |
141 | 141 | ||
142 | extern void free_irq(unsigned int, void *); | 142 | extern void free_irq(unsigned int, void *); |
143 | extern void free_percpu_irq(unsigned int, void __percpu *); | 143 | extern void free_percpu_irq(unsigned int, void __percpu *); |
144 | 144 | ||
145 | struct device; | 145 | struct device; |
146 | 146 | ||
147 | extern int __must_check | 147 | extern int __must_check |
148 | devm_request_threaded_irq(struct device *dev, unsigned int irq, | 148 | devm_request_threaded_irq(struct device *dev, unsigned int irq, |
149 | irq_handler_t handler, irq_handler_t thread_fn, | 149 | irq_handler_t handler, irq_handler_t thread_fn, |
150 | unsigned long irqflags, const char *devname, | 150 | unsigned long irqflags, const char *devname, |
151 | void *dev_id); | 151 | void *dev_id); |
152 | 152 | ||
153 | static inline int __must_check | 153 | static inline int __must_check |
154 | devm_request_irq(struct device *dev, unsigned int irq, irq_handler_t handler, | 154 | devm_request_irq(struct device *dev, unsigned int irq, irq_handler_t handler, |
155 | unsigned long irqflags, const char *devname, void *dev_id) | 155 | unsigned long irqflags, const char *devname, void *dev_id) |
156 | { | 156 | { |
157 | return devm_request_threaded_irq(dev, irq, handler, NULL, irqflags, | 157 | return devm_request_threaded_irq(dev, irq, handler, NULL, irqflags, |
158 | devname, dev_id); | 158 | devname, dev_id); |
159 | } | 159 | } |
160 | 160 | ||
161 | extern int __must_check | 161 | extern int __must_check |
162 | devm_request_any_context_irq(struct device *dev, unsigned int irq, | 162 | devm_request_any_context_irq(struct device *dev, unsigned int irq, |
163 | irq_handler_t handler, unsigned long irqflags, | 163 | irq_handler_t handler, unsigned long irqflags, |
164 | const char *devname, void *dev_id); | 164 | const char *devname, void *dev_id); |
165 | 165 | ||
166 | extern void devm_free_irq(struct device *dev, unsigned int irq, void *dev_id); | 166 | extern void devm_free_irq(struct device *dev, unsigned int irq, void *dev_id); |
167 | 167 | ||
168 | /* | 168 | /* |
169 | * On lockdep we dont want to enable hardirqs in hardirq | 169 | * On lockdep we dont want to enable hardirqs in hardirq |
170 | * context. Use local_irq_enable_in_hardirq() to annotate | 170 | * context. Use local_irq_enable_in_hardirq() to annotate |
171 | * kernel code that has to do this nevertheless (pretty much | 171 | * kernel code that has to do this nevertheless (pretty much |
172 | * the only valid case is for old/broken hardware that is | 172 | * the only valid case is for old/broken hardware that is |
173 | * insanely slow). | 173 | * insanely slow). |
174 | * | 174 | * |
175 | * NOTE: in theory this might break fragile code that relies | 175 | * NOTE: in theory this might break fragile code that relies |
176 | * on hardirq delivery - in practice we dont seem to have such | 176 | * on hardirq delivery - in practice we dont seem to have such |
177 | * places left. So the only effect should be slightly increased | 177 | * places left. So the only effect should be slightly increased |
178 | * irqs-off latencies. | 178 | * irqs-off latencies. |
179 | */ | 179 | */ |
180 | #ifdef CONFIG_LOCKDEP | 180 | #ifdef CONFIG_LOCKDEP |
181 | # define local_irq_enable_in_hardirq() do { } while (0) | 181 | # define local_irq_enable_in_hardirq() do { } while (0) |
182 | #else | 182 | #else |
183 | # define local_irq_enable_in_hardirq() local_irq_enable() | 183 | # define local_irq_enable_in_hardirq() local_irq_enable() |
184 | #endif | 184 | #endif |
185 | 185 | ||
186 | extern void disable_irq_nosync(unsigned int irq); | 186 | extern void disable_irq_nosync(unsigned int irq); |
187 | extern void disable_irq(unsigned int irq); | 187 | extern void disable_irq(unsigned int irq); |
188 | extern void disable_percpu_irq(unsigned int irq); | 188 | extern void disable_percpu_irq(unsigned int irq); |
189 | extern void enable_irq(unsigned int irq); | 189 | extern void enable_irq(unsigned int irq); |
190 | extern void enable_percpu_irq(unsigned int irq, unsigned int type); | 190 | extern void enable_percpu_irq(unsigned int irq, unsigned int type); |
191 | extern void irq_wake_thread(unsigned int irq, void *dev_id); | 191 | extern void irq_wake_thread(unsigned int irq, void *dev_id); |
192 | 192 | ||
193 | /* The following three functions are for the core kernel use only. */ | 193 | /* The following three functions are for the core kernel use only. */ |
194 | extern void suspend_device_irqs(void); | 194 | extern void suspend_device_irqs(void); |
195 | extern void resume_device_irqs(void); | 195 | extern void resume_device_irqs(void); |
196 | #ifdef CONFIG_PM_SLEEP | 196 | #ifdef CONFIG_PM_SLEEP |
197 | extern int check_wakeup_irqs(void); | 197 | extern int check_wakeup_irqs(void); |
198 | #else | 198 | #else |
199 | static inline int check_wakeup_irqs(void) { return 0; } | 199 | static inline int check_wakeup_irqs(void) { return 0; } |
200 | #endif | 200 | #endif |
201 | 201 | ||
202 | #if defined(CONFIG_SMP) | 202 | #if defined(CONFIG_SMP) |
203 | 203 | ||
204 | extern cpumask_var_t irq_default_affinity; | 204 | extern cpumask_var_t irq_default_affinity; |
205 | 205 | ||
206 | extern int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask); | 206 | /* Internal implementation. Use the helpers below */ |
207 | extern int __irq_set_affinity(unsigned int irq, const struct cpumask *cpumask, | ||
208 | bool force); | ||
209 | |||
210 | /** | ||
211 | * irq_set_affinity - Set the irq affinity of a given irq | ||
212 | * @irq: Interrupt to set affinity | ||
213 | * @mask: cpumask | ||
214 | * | ||
215 | * Fails if cpumask does not contain an online CPU | ||
216 | */ | ||
217 | static inline int | ||
218 | irq_set_affinity(unsigned int irq, const struct cpumask *cpumask) | ||
219 | { | ||
220 | return __irq_set_affinity(irq, cpumask, false); | ||
221 | } | ||
222 | |||
223 | /** | ||
224 | * irq_force_affinity - Force the irq affinity of a given irq | ||
225 | * @irq: Interrupt to set affinity | ||
226 | * @mask: cpumask | ||
227 | * | ||
228 | * Same as irq_set_affinity, but without checking the mask against | ||
229 | * online cpus. | ||
230 | * | ||
231 | * Solely for low level cpu hotplug code, where we need to make per | ||
232 | * cpu interrupts affine before the cpu becomes online. | ||
233 | */ | ||
234 | static inline int | ||
235 | irq_force_affinity(unsigned int irq, const struct cpumask *cpumask) | ||
236 | { | ||
237 | return __irq_set_affinity(irq, cpumask, true); | ||
238 | } | ||
239 | |||
207 | extern int irq_can_set_affinity(unsigned int irq); | 240 | extern int irq_can_set_affinity(unsigned int irq); |
208 | extern int irq_select_affinity(unsigned int irq); | 241 | extern int irq_select_affinity(unsigned int irq); |
209 | 242 | ||
210 | extern int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m); | 243 | extern int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m); |
211 | 244 | ||
212 | /** | 245 | /** |
213 | * struct irq_affinity_notify - context for notification of IRQ affinity changes | 246 | * struct irq_affinity_notify - context for notification of IRQ affinity changes |
214 | * @irq: Interrupt to which notification applies | 247 | * @irq: Interrupt to which notification applies |
215 | * @kref: Reference count, for internal use | 248 | * @kref: Reference count, for internal use |
216 | * @work: Work item, for internal use | 249 | * @work: Work item, for internal use |
217 | * @notify: Function to be called on change. This will be | 250 | * @notify: Function to be called on change. This will be |
218 | * called in process context. | 251 | * called in process context. |
219 | * @release: Function to be called on release. This will be | 252 | * @release: Function to be called on release. This will be |
220 | * called in process context. Once registered, the | 253 | * called in process context. Once registered, the |
221 | * structure must only be freed when this function is | 254 | * structure must only be freed when this function is |
222 | * called or later. | 255 | * called or later. |
223 | */ | 256 | */ |
224 | struct irq_affinity_notify { | 257 | struct irq_affinity_notify { |
225 | unsigned int irq; | 258 | unsigned int irq; |
226 | struct kref kref; | 259 | struct kref kref; |
227 | struct work_struct work; | 260 | struct work_struct work; |
228 | void (*notify)(struct irq_affinity_notify *, const cpumask_t *mask); | 261 | void (*notify)(struct irq_affinity_notify *, const cpumask_t *mask); |
229 | void (*release)(struct kref *ref); | 262 | void (*release)(struct kref *ref); |
230 | }; | 263 | }; |
231 | 264 | ||
232 | extern int | 265 | extern int |
233 | irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify); | 266 | irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify); |
234 | 267 | ||
235 | #else /* CONFIG_SMP */ | 268 | #else /* CONFIG_SMP */ |
236 | 269 | ||
237 | static inline int irq_set_affinity(unsigned int irq, const struct cpumask *m) | 270 | static inline int irq_set_affinity(unsigned int irq, const struct cpumask *m) |
238 | { | 271 | { |
239 | return -EINVAL; | 272 | return -EINVAL; |
240 | } | 273 | } |
241 | 274 | ||
242 | static inline int irq_can_set_affinity(unsigned int irq) | 275 | static inline int irq_can_set_affinity(unsigned int irq) |
243 | { | 276 | { |
244 | return 0; | 277 | return 0; |
245 | } | 278 | } |
246 | 279 | ||
247 | static inline int irq_select_affinity(unsigned int irq) { return 0; } | 280 | static inline int irq_select_affinity(unsigned int irq) { return 0; } |
248 | 281 | ||
249 | static inline int irq_set_affinity_hint(unsigned int irq, | 282 | static inline int irq_set_affinity_hint(unsigned int irq, |
250 | const struct cpumask *m) | 283 | const struct cpumask *m) |
251 | { | 284 | { |
252 | return -EINVAL; | 285 | return -EINVAL; |
253 | } | 286 | } |
254 | #endif /* CONFIG_SMP */ | 287 | #endif /* CONFIG_SMP */ |
255 | 288 | ||
256 | /* | 289 | /* |
257 | * Special lockdep variants of irq disabling/enabling. | 290 | * Special lockdep variants of irq disabling/enabling. |
258 | * These should be used for locking constructs that | 291 | * These should be used for locking constructs that |
259 | * know that a particular irq context which is disabled, | 292 | * know that a particular irq context which is disabled, |
260 | * and which is the only irq-context user of a lock, | 293 | * and which is the only irq-context user of a lock, |
261 | * that it's safe to take the lock in the irq-disabled | 294 | * that it's safe to take the lock in the irq-disabled |
262 | * section without disabling hardirqs. | 295 | * section without disabling hardirqs. |
263 | * | 296 | * |
264 | * On !CONFIG_LOCKDEP they are equivalent to the normal | 297 | * On !CONFIG_LOCKDEP they are equivalent to the normal |
265 | * irq disable/enable methods. | 298 | * irq disable/enable methods. |
266 | */ | 299 | */ |
267 | static inline void disable_irq_nosync_lockdep(unsigned int irq) | 300 | static inline void disable_irq_nosync_lockdep(unsigned int irq) |
268 | { | 301 | { |
269 | disable_irq_nosync(irq); | 302 | disable_irq_nosync(irq); |
270 | #ifdef CONFIG_LOCKDEP | 303 | #ifdef CONFIG_LOCKDEP |
271 | local_irq_disable(); | 304 | local_irq_disable(); |
272 | #endif | 305 | #endif |
273 | } | 306 | } |
274 | 307 | ||
275 | static inline void disable_irq_nosync_lockdep_irqsave(unsigned int irq, unsigned long *flags) | 308 | static inline void disable_irq_nosync_lockdep_irqsave(unsigned int irq, unsigned long *flags) |
276 | { | 309 | { |
277 | disable_irq_nosync(irq); | 310 | disable_irq_nosync(irq); |
278 | #ifdef CONFIG_LOCKDEP | 311 | #ifdef CONFIG_LOCKDEP |
279 | local_irq_save(*flags); | 312 | local_irq_save(*flags); |
280 | #endif | 313 | #endif |
281 | } | 314 | } |
282 | 315 | ||
283 | static inline void disable_irq_lockdep(unsigned int irq) | 316 | static inline void disable_irq_lockdep(unsigned int irq) |
284 | { | 317 | { |
285 | disable_irq(irq); | 318 | disable_irq(irq); |
286 | #ifdef CONFIG_LOCKDEP | 319 | #ifdef CONFIG_LOCKDEP |
287 | local_irq_disable(); | 320 | local_irq_disable(); |
288 | #endif | 321 | #endif |
289 | } | 322 | } |
290 | 323 | ||
291 | static inline void enable_irq_lockdep(unsigned int irq) | 324 | static inline void enable_irq_lockdep(unsigned int irq) |
292 | { | 325 | { |
293 | #ifdef CONFIG_LOCKDEP | 326 | #ifdef CONFIG_LOCKDEP |
294 | local_irq_enable(); | 327 | local_irq_enable(); |
295 | #endif | 328 | #endif |
296 | enable_irq(irq); | 329 | enable_irq(irq); |
297 | } | 330 | } |
298 | 331 | ||
299 | static inline void enable_irq_lockdep_irqrestore(unsigned int irq, unsigned long *flags) | 332 | static inline void enable_irq_lockdep_irqrestore(unsigned int irq, unsigned long *flags) |
300 | { | 333 | { |
301 | #ifdef CONFIG_LOCKDEP | 334 | #ifdef CONFIG_LOCKDEP |
302 | local_irq_restore(*flags); | 335 | local_irq_restore(*flags); |
303 | #endif | 336 | #endif |
304 | enable_irq(irq); | 337 | enable_irq(irq); |
305 | } | 338 | } |
306 | 339 | ||
307 | /* IRQ wakeup (PM) control: */ | 340 | /* IRQ wakeup (PM) control: */ |
308 | extern int irq_set_irq_wake(unsigned int irq, unsigned int on); | 341 | extern int irq_set_irq_wake(unsigned int irq, unsigned int on); |
309 | 342 | ||
310 | static inline int enable_irq_wake(unsigned int irq) | 343 | static inline int enable_irq_wake(unsigned int irq) |
311 | { | 344 | { |
312 | return irq_set_irq_wake(irq, 1); | 345 | return irq_set_irq_wake(irq, 1); |
313 | } | 346 | } |
314 | 347 | ||
315 | static inline int disable_irq_wake(unsigned int irq) | 348 | static inline int disable_irq_wake(unsigned int irq) |
316 | { | 349 | { |
317 | return irq_set_irq_wake(irq, 0); | 350 | return irq_set_irq_wake(irq, 0); |
318 | } | 351 | } |
319 | 352 | ||
320 | 353 | ||
321 | #ifdef CONFIG_IRQ_FORCED_THREADING | 354 | #ifdef CONFIG_IRQ_FORCED_THREADING |
322 | extern bool force_irqthreads; | 355 | extern bool force_irqthreads; |
323 | #else | 356 | #else |
324 | #define force_irqthreads (0) | 357 | #define force_irqthreads (0) |
325 | #endif | 358 | #endif |
326 | 359 | ||
327 | #ifndef __ARCH_SET_SOFTIRQ_PENDING | 360 | #ifndef __ARCH_SET_SOFTIRQ_PENDING |
328 | #define set_softirq_pending(x) (local_softirq_pending() = (x)) | 361 | #define set_softirq_pending(x) (local_softirq_pending() = (x)) |
329 | #define or_softirq_pending(x) (local_softirq_pending() |= (x)) | 362 | #define or_softirq_pending(x) (local_softirq_pending() |= (x)) |
330 | #endif | 363 | #endif |
331 | 364 | ||
332 | /* Some architectures might implement lazy enabling/disabling of | 365 | /* Some architectures might implement lazy enabling/disabling of |
333 | * interrupts. In some cases, such as stop_machine, we might want | 366 | * interrupts. In some cases, such as stop_machine, we might want |
334 | * to ensure that after a local_irq_disable(), interrupts have | 367 | * to ensure that after a local_irq_disable(), interrupts have |
335 | * really been disabled in hardware. Such architectures need to | 368 | * really been disabled in hardware. Such architectures need to |
336 | * implement the following hook. | 369 | * implement the following hook. |
337 | */ | 370 | */ |
338 | #ifndef hard_irq_disable | 371 | #ifndef hard_irq_disable |
339 | #define hard_irq_disable() do { } while(0) | 372 | #define hard_irq_disable() do { } while(0) |
340 | #endif | 373 | #endif |
341 | 374 | ||
342 | /* PLEASE, avoid to allocate new softirqs, if you need not _really_ high | 375 | /* PLEASE, avoid to allocate new softirqs, if you need not _really_ high |
343 | frequency threaded job scheduling. For almost all the purposes | 376 | frequency threaded job scheduling. For almost all the purposes |
344 | tasklets are more than enough. F.e. all serial device BHs et | 377 | tasklets are more than enough. F.e. all serial device BHs et |
345 | al. should be converted to tasklets, not to softirqs. | 378 | al. should be converted to tasklets, not to softirqs. |
346 | */ | 379 | */ |
347 | 380 | ||
348 | enum | 381 | enum |
349 | { | 382 | { |
350 | HI_SOFTIRQ=0, | 383 | HI_SOFTIRQ=0, |
351 | TIMER_SOFTIRQ, | 384 | TIMER_SOFTIRQ, |
352 | NET_TX_SOFTIRQ, | 385 | NET_TX_SOFTIRQ, |
353 | NET_RX_SOFTIRQ, | 386 | NET_RX_SOFTIRQ, |
354 | BLOCK_SOFTIRQ, | 387 | BLOCK_SOFTIRQ, |
355 | BLOCK_IOPOLL_SOFTIRQ, | 388 | BLOCK_IOPOLL_SOFTIRQ, |
356 | TASKLET_SOFTIRQ, | 389 | TASKLET_SOFTIRQ, |
357 | SCHED_SOFTIRQ, | 390 | SCHED_SOFTIRQ, |
358 | HRTIMER_SOFTIRQ, | 391 | HRTIMER_SOFTIRQ, |
359 | RCU_SOFTIRQ, /* Preferable RCU should always be the last softirq */ | 392 | RCU_SOFTIRQ, /* Preferable RCU should always be the last softirq */ |
360 | 393 | ||
361 | NR_SOFTIRQS | 394 | NR_SOFTIRQS |
362 | }; | 395 | }; |
363 | 396 | ||
364 | #define SOFTIRQ_STOP_IDLE_MASK (~(1 << RCU_SOFTIRQ)) | 397 | #define SOFTIRQ_STOP_IDLE_MASK (~(1 << RCU_SOFTIRQ)) |
365 | 398 | ||
366 | /* map softirq index to softirq name. update 'softirq_to_name' in | 399 | /* map softirq index to softirq name. update 'softirq_to_name' in |
367 | * kernel/softirq.c when adding a new softirq. | 400 | * kernel/softirq.c when adding a new softirq. |
368 | */ | 401 | */ |
369 | extern const char * const softirq_to_name[NR_SOFTIRQS]; | 402 | extern const char * const softirq_to_name[NR_SOFTIRQS]; |
370 | 403 | ||
371 | /* softirq mask and active fields moved to irq_cpustat_t in | 404 | /* softirq mask and active fields moved to irq_cpustat_t in |
372 | * asm/hardirq.h to get better cache usage. KAO | 405 | * asm/hardirq.h to get better cache usage. KAO |
373 | */ | 406 | */ |
374 | 407 | ||
375 | struct softirq_action | 408 | struct softirq_action |
376 | { | 409 | { |
377 | void (*action)(struct softirq_action *); | 410 | void (*action)(struct softirq_action *); |
378 | }; | 411 | }; |
379 | 412 | ||
380 | asmlinkage void do_softirq(void); | 413 | asmlinkage void do_softirq(void); |
381 | asmlinkage void __do_softirq(void); | 414 | asmlinkage void __do_softirq(void); |
382 | 415 | ||
383 | #ifdef __ARCH_HAS_DO_SOFTIRQ | 416 | #ifdef __ARCH_HAS_DO_SOFTIRQ |
384 | void do_softirq_own_stack(void); | 417 | void do_softirq_own_stack(void); |
385 | #else | 418 | #else |
386 | static inline void do_softirq_own_stack(void) | 419 | static inline void do_softirq_own_stack(void) |
387 | { | 420 | { |
388 | __do_softirq(); | 421 | __do_softirq(); |
389 | } | 422 | } |
390 | #endif | 423 | #endif |
391 | 424 | ||
392 | extern void open_softirq(int nr, void (*action)(struct softirq_action *)); | 425 | extern void open_softirq(int nr, void (*action)(struct softirq_action *)); |
393 | extern void softirq_init(void); | 426 | extern void softirq_init(void); |
394 | extern void __raise_softirq_irqoff(unsigned int nr); | 427 | extern void __raise_softirq_irqoff(unsigned int nr); |
395 | 428 | ||
396 | extern void raise_softirq_irqoff(unsigned int nr); | 429 | extern void raise_softirq_irqoff(unsigned int nr); |
397 | extern void raise_softirq(unsigned int nr); | 430 | extern void raise_softirq(unsigned int nr); |
398 | 431 | ||
399 | DECLARE_PER_CPU(struct task_struct *, ksoftirqd); | 432 | DECLARE_PER_CPU(struct task_struct *, ksoftirqd); |
400 | 433 | ||
401 | static inline struct task_struct *this_cpu_ksoftirqd(void) | 434 | static inline struct task_struct *this_cpu_ksoftirqd(void) |
402 | { | 435 | { |
403 | return this_cpu_read(ksoftirqd); | 436 | return this_cpu_read(ksoftirqd); |
404 | } | 437 | } |
405 | 438 | ||
406 | /* Tasklets --- multithreaded analogue of BHs. | 439 | /* Tasklets --- multithreaded analogue of BHs. |
407 | 440 | ||
408 | Main feature differing them of generic softirqs: tasklet | 441 | Main feature differing them of generic softirqs: tasklet |
409 | is running only on one CPU simultaneously. | 442 | is running only on one CPU simultaneously. |
410 | 443 | ||
411 | Main feature differing them of BHs: different tasklets | 444 | Main feature differing them of BHs: different tasklets |
412 | may be run simultaneously on different CPUs. | 445 | may be run simultaneously on different CPUs. |
413 | 446 | ||
414 | Properties: | 447 | Properties: |
415 | * If tasklet_schedule() is called, then tasklet is guaranteed | 448 | * If tasklet_schedule() is called, then tasklet is guaranteed |
416 | to be executed on some cpu at least once after this. | 449 | to be executed on some cpu at least once after this. |
417 | * If the tasklet is already scheduled, but its execution is still not | 450 | * If the tasklet is already scheduled, but its execution is still not |
418 | started, it will be executed only once. | 451 | started, it will be executed only once. |
419 | * If this tasklet is already running on another CPU (or schedule is called | 452 | * If this tasklet is already running on another CPU (or schedule is called |
420 | from tasklet itself), it is rescheduled for later. | 453 | from tasklet itself), it is rescheduled for later. |
421 | * Tasklet is strictly serialized wrt itself, but not | 454 | * Tasklet is strictly serialized wrt itself, but not |
422 | wrt another tasklets. If client needs some intertask synchronization, | 455 | wrt another tasklets. If client needs some intertask synchronization, |
423 | he makes it with spinlocks. | 456 | he makes it with spinlocks. |
424 | */ | 457 | */ |
425 | 458 | ||
426 | struct tasklet_struct | 459 | struct tasklet_struct |
427 | { | 460 | { |
428 | struct tasklet_struct *next; | 461 | struct tasklet_struct *next; |
429 | unsigned long state; | 462 | unsigned long state; |
430 | atomic_t count; | 463 | atomic_t count; |
431 | void (*func)(unsigned long); | 464 | void (*func)(unsigned long); |
432 | unsigned long data; | 465 | unsigned long data; |
433 | }; | 466 | }; |
434 | 467 | ||
435 | #define DECLARE_TASKLET(name, func, data) \ | 468 | #define DECLARE_TASKLET(name, func, data) \ |
436 | struct tasklet_struct name = { NULL, 0, ATOMIC_INIT(0), func, data } | 469 | struct tasklet_struct name = { NULL, 0, ATOMIC_INIT(0), func, data } |
437 | 470 | ||
438 | #define DECLARE_TASKLET_DISABLED(name, func, data) \ | 471 | #define DECLARE_TASKLET_DISABLED(name, func, data) \ |
439 | struct tasklet_struct name = { NULL, 0, ATOMIC_INIT(1), func, data } | 472 | struct tasklet_struct name = { NULL, 0, ATOMIC_INIT(1), func, data } |
440 | 473 | ||
441 | 474 | ||
442 | enum | 475 | enum |
443 | { | 476 | { |
444 | TASKLET_STATE_SCHED, /* Tasklet is scheduled for execution */ | 477 | TASKLET_STATE_SCHED, /* Tasklet is scheduled for execution */ |
445 | TASKLET_STATE_RUN /* Tasklet is running (SMP only) */ | 478 | TASKLET_STATE_RUN /* Tasklet is running (SMP only) */ |
446 | }; | 479 | }; |
447 | 480 | ||
448 | #ifdef CONFIG_SMP | 481 | #ifdef CONFIG_SMP |
449 | static inline int tasklet_trylock(struct tasklet_struct *t) | 482 | static inline int tasklet_trylock(struct tasklet_struct *t) |
450 | { | 483 | { |
451 | return !test_and_set_bit(TASKLET_STATE_RUN, &(t)->state); | 484 | return !test_and_set_bit(TASKLET_STATE_RUN, &(t)->state); |
452 | } | 485 | } |
453 | 486 | ||
454 | static inline void tasklet_unlock(struct tasklet_struct *t) | 487 | static inline void tasklet_unlock(struct tasklet_struct *t) |
455 | { | 488 | { |
456 | smp_mb__before_clear_bit(); | 489 | smp_mb__before_clear_bit(); |
457 | clear_bit(TASKLET_STATE_RUN, &(t)->state); | 490 | clear_bit(TASKLET_STATE_RUN, &(t)->state); |
458 | } | 491 | } |
459 | 492 | ||
460 | static inline void tasklet_unlock_wait(struct tasklet_struct *t) | 493 | static inline void tasklet_unlock_wait(struct tasklet_struct *t) |
461 | { | 494 | { |
462 | while (test_bit(TASKLET_STATE_RUN, &(t)->state)) { barrier(); } | 495 | while (test_bit(TASKLET_STATE_RUN, &(t)->state)) { barrier(); } |
463 | } | 496 | } |
464 | #else | 497 | #else |
465 | #define tasklet_trylock(t) 1 | 498 | #define tasklet_trylock(t) 1 |
466 | #define tasklet_unlock_wait(t) do { } while (0) | 499 | #define tasklet_unlock_wait(t) do { } while (0) |
467 | #define tasklet_unlock(t) do { } while (0) | 500 | #define tasklet_unlock(t) do { } while (0) |
468 | #endif | 501 | #endif |
469 | 502 | ||
470 | extern void __tasklet_schedule(struct tasklet_struct *t); | 503 | extern void __tasklet_schedule(struct tasklet_struct *t); |
471 | 504 | ||
472 | static inline void tasklet_schedule(struct tasklet_struct *t) | 505 | static inline void tasklet_schedule(struct tasklet_struct *t) |
473 | { | 506 | { |
474 | if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) | 507 | if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) |
475 | __tasklet_schedule(t); | 508 | __tasklet_schedule(t); |
476 | } | 509 | } |
477 | 510 | ||
478 | extern void __tasklet_hi_schedule(struct tasklet_struct *t); | 511 | extern void __tasklet_hi_schedule(struct tasklet_struct *t); |
479 | 512 | ||
480 | static inline void tasklet_hi_schedule(struct tasklet_struct *t) | 513 | static inline void tasklet_hi_schedule(struct tasklet_struct *t) |
481 | { | 514 | { |
482 | if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) | 515 | if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) |
483 | __tasklet_hi_schedule(t); | 516 | __tasklet_hi_schedule(t); |
484 | } | 517 | } |
485 | 518 | ||
486 | extern void __tasklet_hi_schedule_first(struct tasklet_struct *t); | 519 | extern void __tasklet_hi_schedule_first(struct tasklet_struct *t); |
487 | 520 | ||
488 | /* | 521 | /* |
489 | * This version avoids touching any other tasklets. Needed for kmemcheck | 522 | * This version avoids touching any other tasklets. Needed for kmemcheck |
490 | * in order not to take any page faults while enqueueing this tasklet; | 523 | * in order not to take any page faults while enqueueing this tasklet; |
491 | * consider VERY carefully whether you really need this or | 524 | * consider VERY carefully whether you really need this or |
492 | * tasklet_hi_schedule()... | 525 | * tasklet_hi_schedule()... |
493 | */ | 526 | */ |
494 | static inline void tasklet_hi_schedule_first(struct tasklet_struct *t) | 527 | static inline void tasklet_hi_schedule_first(struct tasklet_struct *t) |
495 | { | 528 | { |
496 | if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) | 529 | if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) |
497 | __tasklet_hi_schedule_first(t); | 530 | __tasklet_hi_schedule_first(t); |
498 | } | 531 | } |
499 | 532 | ||
500 | 533 | ||
501 | static inline void tasklet_disable_nosync(struct tasklet_struct *t) | 534 | static inline void tasklet_disable_nosync(struct tasklet_struct *t) |
502 | { | 535 | { |
503 | atomic_inc(&t->count); | 536 | atomic_inc(&t->count); |
504 | smp_mb__after_atomic_inc(); | 537 | smp_mb__after_atomic_inc(); |
505 | } | 538 | } |
506 | 539 | ||
507 | static inline void tasklet_disable(struct tasklet_struct *t) | 540 | static inline void tasklet_disable(struct tasklet_struct *t) |
508 | { | 541 | { |
509 | tasklet_disable_nosync(t); | 542 | tasklet_disable_nosync(t); |
510 | tasklet_unlock_wait(t); | 543 | tasklet_unlock_wait(t); |
511 | smp_mb(); | 544 | smp_mb(); |
512 | } | 545 | } |
513 | 546 | ||
514 | static inline void tasklet_enable(struct tasklet_struct *t) | 547 | static inline void tasklet_enable(struct tasklet_struct *t) |
515 | { | 548 | { |
516 | smp_mb__before_atomic_dec(); | 549 | smp_mb__before_atomic_dec(); |
517 | atomic_dec(&t->count); | 550 | atomic_dec(&t->count); |
518 | } | 551 | } |
519 | 552 | ||
520 | static inline void tasklet_hi_enable(struct tasklet_struct *t) | 553 | static inline void tasklet_hi_enable(struct tasklet_struct *t) |
521 | { | 554 | { |
522 | smp_mb__before_atomic_dec(); | 555 | smp_mb__before_atomic_dec(); |
523 | atomic_dec(&t->count); | 556 | atomic_dec(&t->count); |
524 | } | 557 | } |
525 | 558 | ||
526 | extern void tasklet_kill(struct tasklet_struct *t); | 559 | extern void tasklet_kill(struct tasklet_struct *t); |
527 | extern void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu); | 560 | extern void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu); |
528 | extern void tasklet_init(struct tasklet_struct *t, | 561 | extern void tasklet_init(struct tasklet_struct *t, |
529 | void (*func)(unsigned long), unsigned long data); | 562 | void (*func)(unsigned long), unsigned long data); |
530 | 563 | ||
531 | struct tasklet_hrtimer { | 564 | struct tasklet_hrtimer { |
532 | struct hrtimer timer; | 565 | struct hrtimer timer; |
533 | struct tasklet_struct tasklet; | 566 | struct tasklet_struct tasklet; |
534 | enum hrtimer_restart (*function)(struct hrtimer *); | 567 | enum hrtimer_restart (*function)(struct hrtimer *); |
535 | }; | 568 | }; |
536 | 569 | ||
537 | extern void | 570 | extern void |
538 | tasklet_hrtimer_init(struct tasklet_hrtimer *ttimer, | 571 | tasklet_hrtimer_init(struct tasklet_hrtimer *ttimer, |
539 | enum hrtimer_restart (*function)(struct hrtimer *), | 572 | enum hrtimer_restart (*function)(struct hrtimer *), |
540 | clockid_t which_clock, enum hrtimer_mode mode); | 573 | clockid_t which_clock, enum hrtimer_mode mode); |
541 | 574 | ||
542 | static inline | 575 | static inline |
543 | int tasklet_hrtimer_start(struct tasklet_hrtimer *ttimer, ktime_t time, | 576 | int tasklet_hrtimer_start(struct tasklet_hrtimer *ttimer, ktime_t time, |
544 | const enum hrtimer_mode mode) | 577 | const enum hrtimer_mode mode) |
545 | { | 578 | { |
546 | return hrtimer_start(&ttimer->timer, time, mode); | 579 | return hrtimer_start(&ttimer->timer, time, mode); |
547 | } | 580 | } |
548 | 581 | ||
549 | static inline | 582 | static inline |
550 | void tasklet_hrtimer_cancel(struct tasklet_hrtimer *ttimer) | 583 | void tasklet_hrtimer_cancel(struct tasklet_hrtimer *ttimer) |
551 | { | 584 | { |
552 | hrtimer_cancel(&ttimer->timer); | 585 | hrtimer_cancel(&ttimer->timer); |
553 | tasklet_kill(&ttimer->tasklet); | 586 | tasklet_kill(&ttimer->tasklet); |
554 | } | 587 | } |
555 | 588 | ||
556 | /* | 589 | /* |
557 | * Autoprobing for irqs: | 590 | * Autoprobing for irqs: |
558 | * | 591 | * |
559 | * probe_irq_on() and probe_irq_off() provide robust primitives | 592 | * probe_irq_on() and probe_irq_off() provide robust primitives |
560 | * for accurate IRQ probing during kernel initialization. They are | 593 | * for accurate IRQ probing during kernel initialization. They are |
561 | * reasonably simple to use, are not "fooled" by spurious interrupts, | 594 | * reasonably simple to use, are not "fooled" by spurious interrupts, |
562 | * and, unlike other attempts at IRQ probing, they do not get hung on | 595 | * and, unlike other attempts at IRQ probing, they do not get hung on |
563 | * stuck interrupts (such as unused PS2 mouse interfaces on ASUS boards). | 596 | * stuck interrupts (such as unused PS2 mouse interfaces on ASUS boards). |
564 | * | 597 | * |
565 | * For reasonably foolproof probing, use them as follows: | 598 | * For reasonably foolproof probing, use them as follows: |
566 | * | 599 | * |
567 | * 1. clear and/or mask the device's internal interrupt. | 600 | * 1. clear and/or mask the device's internal interrupt. |
568 | * 2. sti(); | 601 | * 2. sti(); |
569 | * 3. irqs = probe_irq_on(); // "take over" all unassigned idle IRQs | 602 | * 3. irqs = probe_irq_on(); // "take over" all unassigned idle IRQs |
570 | * 4. enable the device and cause it to trigger an interrupt. | 603 | * 4. enable the device and cause it to trigger an interrupt. |
571 | * 5. wait for the device to interrupt, using non-intrusive polling or a delay. | 604 | * 5. wait for the device to interrupt, using non-intrusive polling or a delay. |
572 | * 6. irq = probe_irq_off(irqs); // get IRQ number, 0=none, negative=multiple | 605 | * 6. irq = probe_irq_off(irqs); // get IRQ number, 0=none, negative=multiple |
573 | * 7. service the device to clear its pending interrupt. | 606 | * 7. service the device to clear its pending interrupt. |
574 | * 8. loop again if paranoia is required. | 607 | * 8. loop again if paranoia is required. |
575 | * | 608 | * |
576 | * probe_irq_on() returns a mask of allocated irq's. | 609 | * probe_irq_on() returns a mask of allocated irq's. |
577 | * | 610 | * |
578 | * probe_irq_off() takes the mask as a parameter, | 611 | * probe_irq_off() takes the mask as a parameter, |
579 | * and returns the irq number which occurred, | 612 | * and returns the irq number which occurred, |
580 | * or zero if none occurred, or a negative irq number | 613 | * or zero if none occurred, or a negative irq number |
581 | * if more than one irq occurred. | 614 | * if more than one irq occurred. |
582 | */ | 615 | */ |
583 | 616 | ||
584 | #if !defined(CONFIG_GENERIC_IRQ_PROBE) | 617 | #if !defined(CONFIG_GENERIC_IRQ_PROBE) |
585 | static inline unsigned long probe_irq_on(void) | 618 | static inline unsigned long probe_irq_on(void) |
586 | { | 619 | { |
587 | return 0; | 620 | return 0; |
588 | } | 621 | } |
589 | static inline int probe_irq_off(unsigned long val) | 622 | static inline int probe_irq_off(unsigned long val) |
590 | { | 623 | { |
591 | return 0; | 624 | return 0; |
592 | } | 625 | } |
593 | static inline unsigned int probe_irq_mask(unsigned long val) | 626 | static inline unsigned int probe_irq_mask(unsigned long val) |
594 | { | 627 | { |
595 | return 0; | 628 | return 0; |
596 | } | 629 | } |
597 | #else | 630 | #else |
598 | extern unsigned long probe_irq_on(void); /* returns 0 on failure */ | 631 | extern unsigned long probe_irq_on(void); /* returns 0 on failure */ |
599 | extern int probe_irq_off(unsigned long); /* returns 0 or negative on failure */ | 632 | extern int probe_irq_off(unsigned long); /* returns 0 or negative on failure */ |
600 | extern unsigned int probe_irq_mask(unsigned long); /* returns mask of ISA interrupts */ | 633 | extern unsigned int probe_irq_mask(unsigned long); /* returns mask of ISA interrupts */ |
601 | #endif | 634 | #endif |
602 | 635 | ||
603 | #ifdef CONFIG_PROC_FS | 636 | #ifdef CONFIG_PROC_FS |
604 | /* Initialize /proc/irq/ */ | 637 | /* Initialize /proc/irq/ */ |
605 | extern void init_irq_proc(void); | 638 | extern void init_irq_proc(void); |
606 | #else | 639 | #else |
607 | static inline void init_irq_proc(void) | 640 | static inline void init_irq_proc(void) |
608 | { | 641 | { |
609 | } | 642 | } |
610 | #endif | 643 | #endif |
611 | 644 | ||
612 | struct seq_file; | 645 | struct seq_file; |
613 | int show_interrupts(struct seq_file *p, void *v); | 646 | int show_interrupts(struct seq_file *p, void *v); |
614 | int arch_show_interrupts(struct seq_file *p, int prec); | 647 | int arch_show_interrupts(struct seq_file *p, int prec); |
615 | 648 | ||
616 | extern int early_irq_init(void); | 649 | extern int early_irq_init(void); |
617 | extern int arch_probe_nr_irqs(void); | 650 | extern int arch_probe_nr_irqs(void); |
618 | extern int arch_early_irq_init(void); | 651 | extern int arch_early_irq_init(void); |
619 | 652 | ||
620 | #endif | 653 | #endif |
621 | 654 |
include/linux/irq.h
1 | #ifndef _LINUX_IRQ_H | 1 | #ifndef _LINUX_IRQ_H |
2 | #define _LINUX_IRQ_H | 2 | #define _LINUX_IRQ_H |
3 | 3 | ||
4 | /* | 4 | /* |
5 | * Please do not include this file in generic code. There is currently | 5 | * Please do not include this file in generic code. There is currently |
6 | * no requirement for any architecture to implement anything held | 6 | * no requirement for any architecture to implement anything held |
7 | * within this file. | 7 | * within this file. |
8 | * | 8 | * |
9 | * Thanks. --rmk | 9 | * Thanks. --rmk |
10 | */ | 10 | */ |
11 | 11 | ||
12 | #include <linux/smp.h> | 12 | #include <linux/smp.h> |
13 | #include <linux/linkage.h> | 13 | #include <linux/linkage.h> |
14 | #include <linux/cache.h> | 14 | #include <linux/cache.h> |
15 | #include <linux/spinlock.h> | 15 | #include <linux/spinlock.h> |
16 | #include <linux/cpumask.h> | 16 | #include <linux/cpumask.h> |
17 | #include <linux/gfp.h> | 17 | #include <linux/gfp.h> |
18 | #include <linux/irqreturn.h> | 18 | #include <linux/irqreturn.h> |
19 | #include <linux/irqnr.h> | 19 | #include <linux/irqnr.h> |
20 | #include <linux/errno.h> | 20 | #include <linux/errno.h> |
21 | #include <linux/topology.h> | 21 | #include <linux/topology.h> |
22 | #include <linux/wait.h> | 22 | #include <linux/wait.h> |
23 | 23 | ||
24 | #include <asm/irq.h> | 24 | #include <asm/irq.h> |
25 | #include <asm/ptrace.h> | 25 | #include <asm/ptrace.h> |
26 | #include <asm/irq_regs.h> | 26 | #include <asm/irq_regs.h> |
27 | 27 | ||
28 | struct seq_file; | 28 | struct seq_file; |
29 | struct module; | 29 | struct module; |
30 | struct irq_desc; | 30 | struct irq_desc; |
31 | struct irq_data; | 31 | struct irq_data; |
32 | typedef void (*irq_flow_handler_t)(unsigned int irq, | 32 | typedef void (*irq_flow_handler_t)(unsigned int irq, |
33 | struct irq_desc *desc); | 33 | struct irq_desc *desc); |
34 | typedef void (*irq_preflow_handler_t)(struct irq_data *data); | 34 | typedef void (*irq_preflow_handler_t)(struct irq_data *data); |
35 | 35 | ||
36 | /* | 36 | /* |
37 | * IRQ line status. | 37 | * IRQ line status. |
38 | * | 38 | * |
39 | * Bits 0-7 are the same as the IRQF_* bits in linux/interrupt.h | 39 | * Bits 0-7 are the same as the IRQF_* bits in linux/interrupt.h |
40 | * | 40 | * |
41 | * IRQ_TYPE_NONE - default, unspecified type | 41 | * IRQ_TYPE_NONE - default, unspecified type |
42 | * IRQ_TYPE_EDGE_RISING - rising edge triggered | 42 | * IRQ_TYPE_EDGE_RISING - rising edge triggered |
43 | * IRQ_TYPE_EDGE_FALLING - falling edge triggered | 43 | * IRQ_TYPE_EDGE_FALLING - falling edge triggered |
44 | * IRQ_TYPE_EDGE_BOTH - rising and falling edge triggered | 44 | * IRQ_TYPE_EDGE_BOTH - rising and falling edge triggered |
45 | * IRQ_TYPE_LEVEL_HIGH - high level triggered | 45 | * IRQ_TYPE_LEVEL_HIGH - high level triggered |
46 | * IRQ_TYPE_LEVEL_LOW - low level triggered | 46 | * IRQ_TYPE_LEVEL_LOW - low level triggered |
47 | * IRQ_TYPE_LEVEL_MASK - Mask to filter out the level bits | 47 | * IRQ_TYPE_LEVEL_MASK - Mask to filter out the level bits |
48 | * IRQ_TYPE_SENSE_MASK - Mask for all the above bits | 48 | * IRQ_TYPE_SENSE_MASK - Mask for all the above bits |
49 | * IRQ_TYPE_DEFAULT - For use by some PICs to ask irq_set_type | 49 | * IRQ_TYPE_DEFAULT - For use by some PICs to ask irq_set_type |
50 | * to setup the HW to a sane default (used | 50 | * to setup the HW to a sane default (used |
51 | * by irqdomain map() callbacks to synchronize | 51 | * by irqdomain map() callbacks to synchronize |
52 | * the HW state and SW flags for a newly | 52 | * the HW state and SW flags for a newly |
53 | * allocated descriptor). | 53 | * allocated descriptor). |
54 | * | 54 | * |
55 | * IRQ_TYPE_PROBE - Special flag for probing in progress | 55 | * IRQ_TYPE_PROBE - Special flag for probing in progress |
56 | * | 56 | * |
57 | * Bits which can be modified via irq_set/clear/modify_status_flags() | 57 | * Bits which can be modified via irq_set/clear/modify_status_flags() |
58 | * IRQ_LEVEL - Interrupt is level type. Will be also | 58 | * IRQ_LEVEL - Interrupt is level type. Will be also |
59 | * updated in the code when the above trigger | 59 | * updated in the code when the above trigger |
60 | * bits are modified via irq_set_irq_type() | 60 | * bits are modified via irq_set_irq_type() |
61 | * IRQ_PER_CPU - Mark an interrupt PER_CPU. Will protect | 61 | * IRQ_PER_CPU - Mark an interrupt PER_CPU. Will protect |
62 | * it from affinity setting | 62 | * it from affinity setting |
63 | * IRQ_NOPROBE - Interrupt cannot be probed by autoprobing | 63 | * IRQ_NOPROBE - Interrupt cannot be probed by autoprobing |
64 | * IRQ_NOREQUEST - Interrupt cannot be requested via | 64 | * IRQ_NOREQUEST - Interrupt cannot be requested via |
65 | * request_irq() | 65 | * request_irq() |
66 | * IRQ_NOTHREAD - Interrupt cannot be threaded | 66 | * IRQ_NOTHREAD - Interrupt cannot be threaded |
67 | * IRQ_NOAUTOEN - Interrupt is not automatically enabled in | 67 | * IRQ_NOAUTOEN - Interrupt is not automatically enabled in |
68 | * request/setup_irq() | 68 | * request/setup_irq() |
69 | * IRQ_NO_BALANCING - Interrupt cannot be balanced (affinity set) | 69 | * IRQ_NO_BALANCING - Interrupt cannot be balanced (affinity set) |
70 | * IRQ_MOVE_PCNTXT - Interrupt can be migrated from process context | 70 | * IRQ_MOVE_PCNTXT - Interrupt can be migrated from process context |
71 | * IRQ_NESTED_TRHEAD - Interrupt nests into another thread | 71 | * IRQ_NESTED_TRHEAD - Interrupt nests into another thread |
72 | * IRQ_PER_CPU_DEVID - Dev_id is a per-cpu variable | 72 | * IRQ_PER_CPU_DEVID - Dev_id is a per-cpu variable |
73 | * IRQ_IS_POLLED - Always polled by another interrupt. Exclude | 73 | * IRQ_IS_POLLED - Always polled by another interrupt. Exclude |
74 | * it from the spurious interrupt detection | 74 | * it from the spurious interrupt detection |
75 | * mechanism and from core side polling. | 75 | * mechanism and from core side polling. |
76 | */ | 76 | */ |
77 | enum { | 77 | enum { |
78 | IRQ_TYPE_NONE = 0x00000000, | 78 | IRQ_TYPE_NONE = 0x00000000, |
79 | IRQ_TYPE_EDGE_RISING = 0x00000001, | 79 | IRQ_TYPE_EDGE_RISING = 0x00000001, |
80 | IRQ_TYPE_EDGE_FALLING = 0x00000002, | 80 | IRQ_TYPE_EDGE_FALLING = 0x00000002, |
81 | IRQ_TYPE_EDGE_BOTH = (IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING), | 81 | IRQ_TYPE_EDGE_BOTH = (IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING), |
82 | IRQ_TYPE_LEVEL_HIGH = 0x00000004, | 82 | IRQ_TYPE_LEVEL_HIGH = 0x00000004, |
83 | IRQ_TYPE_LEVEL_LOW = 0x00000008, | 83 | IRQ_TYPE_LEVEL_LOW = 0x00000008, |
84 | IRQ_TYPE_LEVEL_MASK = (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH), | 84 | IRQ_TYPE_LEVEL_MASK = (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH), |
85 | IRQ_TYPE_SENSE_MASK = 0x0000000f, | 85 | IRQ_TYPE_SENSE_MASK = 0x0000000f, |
86 | IRQ_TYPE_DEFAULT = IRQ_TYPE_SENSE_MASK, | 86 | IRQ_TYPE_DEFAULT = IRQ_TYPE_SENSE_MASK, |
87 | 87 | ||
88 | IRQ_TYPE_PROBE = 0x00000010, | 88 | IRQ_TYPE_PROBE = 0x00000010, |
89 | 89 | ||
90 | IRQ_LEVEL = (1 << 8), | 90 | IRQ_LEVEL = (1 << 8), |
91 | IRQ_PER_CPU = (1 << 9), | 91 | IRQ_PER_CPU = (1 << 9), |
92 | IRQ_NOPROBE = (1 << 10), | 92 | IRQ_NOPROBE = (1 << 10), |
93 | IRQ_NOREQUEST = (1 << 11), | 93 | IRQ_NOREQUEST = (1 << 11), |
94 | IRQ_NOAUTOEN = (1 << 12), | 94 | IRQ_NOAUTOEN = (1 << 12), |
95 | IRQ_NO_BALANCING = (1 << 13), | 95 | IRQ_NO_BALANCING = (1 << 13), |
96 | IRQ_MOVE_PCNTXT = (1 << 14), | 96 | IRQ_MOVE_PCNTXT = (1 << 14), |
97 | IRQ_NESTED_THREAD = (1 << 15), | 97 | IRQ_NESTED_THREAD = (1 << 15), |
98 | IRQ_NOTHREAD = (1 << 16), | 98 | IRQ_NOTHREAD = (1 << 16), |
99 | IRQ_PER_CPU_DEVID = (1 << 17), | 99 | IRQ_PER_CPU_DEVID = (1 << 17), |
100 | IRQ_IS_POLLED = (1 << 18), | 100 | IRQ_IS_POLLED = (1 << 18), |
101 | }; | 101 | }; |
102 | 102 | ||
103 | #define IRQF_MODIFY_MASK \ | 103 | #define IRQF_MODIFY_MASK \ |
104 | (IRQ_TYPE_SENSE_MASK | IRQ_NOPROBE | IRQ_NOREQUEST | \ | 104 | (IRQ_TYPE_SENSE_MASK | IRQ_NOPROBE | IRQ_NOREQUEST | \ |
105 | IRQ_NOAUTOEN | IRQ_MOVE_PCNTXT | IRQ_LEVEL | IRQ_NO_BALANCING | \ | 105 | IRQ_NOAUTOEN | IRQ_MOVE_PCNTXT | IRQ_LEVEL | IRQ_NO_BALANCING | \ |
106 | IRQ_PER_CPU | IRQ_NESTED_THREAD | IRQ_NOTHREAD | IRQ_PER_CPU_DEVID | \ | 106 | IRQ_PER_CPU | IRQ_NESTED_THREAD | IRQ_NOTHREAD | IRQ_PER_CPU_DEVID | \ |
107 | IRQ_IS_POLLED) | 107 | IRQ_IS_POLLED) |
108 | 108 | ||
109 | #define IRQ_NO_BALANCING_MASK (IRQ_PER_CPU | IRQ_NO_BALANCING) | 109 | #define IRQ_NO_BALANCING_MASK (IRQ_PER_CPU | IRQ_NO_BALANCING) |
110 | 110 | ||
111 | /* | 111 | /* |
112 | * Return value for chip->irq_set_affinity() | 112 | * Return value for chip->irq_set_affinity() |
113 | * | 113 | * |
114 | * IRQ_SET_MASK_OK - OK, core updates irq_data.affinity | 114 | * IRQ_SET_MASK_OK - OK, core updates irq_data.affinity |
115 | * IRQ_SET_MASK_NOCPY - OK, chip did update irq_data.affinity | 115 | * IRQ_SET_MASK_NOCPY - OK, chip did update irq_data.affinity |
116 | */ | 116 | */ |
117 | enum { | 117 | enum { |
118 | IRQ_SET_MASK_OK = 0, | 118 | IRQ_SET_MASK_OK = 0, |
119 | IRQ_SET_MASK_OK_NOCOPY, | 119 | IRQ_SET_MASK_OK_NOCOPY, |
120 | }; | 120 | }; |
121 | 121 | ||
122 | struct msi_desc; | 122 | struct msi_desc; |
123 | struct irq_domain; | 123 | struct irq_domain; |
124 | 124 | ||
125 | /** | 125 | /** |
126 | * struct irq_data - per irq and irq chip data passed down to chip functions | 126 | * struct irq_data - per irq and irq chip data passed down to chip functions |
127 | * @mask: precomputed bitmask for accessing the chip registers | 127 | * @mask: precomputed bitmask for accessing the chip registers |
128 | * @irq: interrupt number | 128 | * @irq: interrupt number |
129 | * @hwirq: hardware interrupt number, local to the interrupt domain | 129 | * @hwirq: hardware interrupt number, local to the interrupt domain |
130 | * @node: node index useful for balancing | 130 | * @node: node index useful for balancing |
131 | * @state_use_accessors: status information for irq chip functions. | 131 | * @state_use_accessors: status information for irq chip functions. |
132 | * Use accessor functions to deal with it | 132 | * Use accessor functions to deal with it |
133 | * @chip: low level interrupt hardware access | 133 | * @chip: low level interrupt hardware access |
134 | * @domain: Interrupt translation domain; responsible for mapping | 134 | * @domain: Interrupt translation domain; responsible for mapping |
135 | * between hwirq number and linux irq number. | 135 | * between hwirq number and linux irq number. |
136 | * @handler_data: per-IRQ data for the irq_chip methods | 136 | * @handler_data: per-IRQ data for the irq_chip methods |
137 | * @chip_data: platform-specific per-chip private data for the chip | 137 | * @chip_data: platform-specific per-chip private data for the chip |
138 | * methods, to allow shared chip implementations | 138 | * methods, to allow shared chip implementations |
139 | * @msi_desc: MSI descriptor | 139 | * @msi_desc: MSI descriptor |
140 | * @affinity: IRQ affinity on SMP | 140 | * @affinity: IRQ affinity on SMP |
141 | * | 141 | * |
142 | * The fields here need to overlay the ones in irq_desc until we | 142 | * The fields here need to overlay the ones in irq_desc until we |
143 | * cleaned up the direct references and switched everything over to | 143 | * cleaned up the direct references and switched everything over to |
144 | * irq_data. | 144 | * irq_data. |
145 | */ | 145 | */ |
146 | struct irq_data { | 146 | struct irq_data { |
147 | u32 mask; | 147 | u32 mask; |
148 | unsigned int irq; | 148 | unsigned int irq; |
149 | unsigned long hwirq; | 149 | unsigned long hwirq; |
150 | unsigned int node; | 150 | unsigned int node; |
151 | unsigned int state_use_accessors; | 151 | unsigned int state_use_accessors; |
152 | struct irq_chip *chip; | 152 | struct irq_chip *chip; |
153 | struct irq_domain *domain; | 153 | struct irq_domain *domain; |
154 | void *handler_data; | 154 | void *handler_data; |
155 | void *chip_data; | 155 | void *chip_data; |
156 | struct msi_desc *msi_desc; | 156 | struct msi_desc *msi_desc; |
157 | cpumask_var_t affinity; | 157 | cpumask_var_t affinity; |
158 | }; | 158 | }; |
159 | 159 | ||
160 | /* | 160 | /* |
161 | * Bit masks for irq_data.state | 161 | * Bit masks for irq_data.state |
162 | * | 162 | * |
163 | * IRQD_TRIGGER_MASK - Mask for the trigger type bits | 163 | * IRQD_TRIGGER_MASK - Mask for the trigger type bits |
164 | * IRQD_SETAFFINITY_PENDING - Affinity setting is pending | 164 | * IRQD_SETAFFINITY_PENDING - Affinity setting is pending |
165 | * IRQD_NO_BALANCING - Balancing disabled for this IRQ | 165 | * IRQD_NO_BALANCING - Balancing disabled for this IRQ |
166 | * IRQD_PER_CPU - Interrupt is per cpu | 166 | * IRQD_PER_CPU - Interrupt is per cpu |
167 | * IRQD_AFFINITY_SET - Interrupt affinity was set | 167 | * IRQD_AFFINITY_SET - Interrupt affinity was set |
168 | * IRQD_LEVEL - Interrupt is level triggered | 168 | * IRQD_LEVEL - Interrupt is level triggered |
169 | * IRQD_WAKEUP_STATE - Interrupt is configured for wakeup | 169 | * IRQD_WAKEUP_STATE - Interrupt is configured for wakeup |
170 | * from suspend | 170 | * from suspend |
171 | * IRDQ_MOVE_PCNTXT - Interrupt can be moved in process | 171 | * IRDQ_MOVE_PCNTXT - Interrupt can be moved in process |
172 | * context | 172 | * context |
173 | * IRQD_IRQ_DISABLED - Disabled state of the interrupt | 173 | * IRQD_IRQ_DISABLED - Disabled state of the interrupt |
174 | * IRQD_IRQ_MASKED - Masked state of the interrupt | 174 | * IRQD_IRQ_MASKED - Masked state of the interrupt |
175 | * IRQD_IRQ_INPROGRESS - In progress state of the interrupt | 175 | * IRQD_IRQ_INPROGRESS - In progress state of the interrupt |
176 | */ | 176 | */ |
177 | enum { | 177 | enum { |
178 | IRQD_TRIGGER_MASK = 0xf, | 178 | IRQD_TRIGGER_MASK = 0xf, |
179 | IRQD_SETAFFINITY_PENDING = (1 << 8), | 179 | IRQD_SETAFFINITY_PENDING = (1 << 8), |
180 | IRQD_NO_BALANCING = (1 << 10), | 180 | IRQD_NO_BALANCING = (1 << 10), |
181 | IRQD_PER_CPU = (1 << 11), | 181 | IRQD_PER_CPU = (1 << 11), |
182 | IRQD_AFFINITY_SET = (1 << 12), | 182 | IRQD_AFFINITY_SET = (1 << 12), |
183 | IRQD_LEVEL = (1 << 13), | 183 | IRQD_LEVEL = (1 << 13), |
184 | IRQD_WAKEUP_STATE = (1 << 14), | 184 | IRQD_WAKEUP_STATE = (1 << 14), |
185 | IRQD_MOVE_PCNTXT = (1 << 15), | 185 | IRQD_MOVE_PCNTXT = (1 << 15), |
186 | IRQD_IRQ_DISABLED = (1 << 16), | 186 | IRQD_IRQ_DISABLED = (1 << 16), |
187 | IRQD_IRQ_MASKED = (1 << 17), | 187 | IRQD_IRQ_MASKED = (1 << 17), |
188 | IRQD_IRQ_INPROGRESS = (1 << 18), | 188 | IRQD_IRQ_INPROGRESS = (1 << 18), |
189 | }; | 189 | }; |
190 | 190 | ||
191 | static inline bool irqd_is_setaffinity_pending(struct irq_data *d) | 191 | static inline bool irqd_is_setaffinity_pending(struct irq_data *d) |
192 | { | 192 | { |
193 | return d->state_use_accessors & IRQD_SETAFFINITY_PENDING; | 193 | return d->state_use_accessors & IRQD_SETAFFINITY_PENDING; |
194 | } | 194 | } |
195 | 195 | ||
196 | static inline bool irqd_is_per_cpu(struct irq_data *d) | 196 | static inline bool irqd_is_per_cpu(struct irq_data *d) |
197 | { | 197 | { |
198 | return d->state_use_accessors & IRQD_PER_CPU; | 198 | return d->state_use_accessors & IRQD_PER_CPU; |
199 | } | 199 | } |
200 | 200 | ||
201 | static inline bool irqd_can_balance(struct irq_data *d) | 201 | static inline bool irqd_can_balance(struct irq_data *d) |
202 | { | 202 | { |
203 | return !(d->state_use_accessors & (IRQD_PER_CPU | IRQD_NO_BALANCING)); | 203 | return !(d->state_use_accessors & (IRQD_PER_CPU | IRQD_NO_BALANCING)); |
204 | } | 204 | } |
205 | 205 | ||
206 | static inline bool irqd_affinity_was_set(struct irq_data *d) | 206 | static inline bool irqd_affinity_was_set(struct irq_data *d) |
207 | { | 207 | { |
208 | return d->state_use_accessors & IRQD_AFFINITY_SET; | 208 | return d->state_use_accessors & IRQD_AFFINITY_SET; |
209 | } | 209 | } |
210 | 210 | ||
211 | static inline void irqd_mark_affinity_was_set(struct irq_data *d) | 211 | static inline void irqd_mark_affinity_was_set(struct irq_data *d) |
212 | { | 212 | { |
213 | d->state_use_accessors |= IRQD_AFFINITY_SET; | 213 | d->state_use_accessors |= IRQD_AFFINITY_SET; |
214 | } | 214 | } |
215 | 215 | ||
216 | static inline u32 irqd_get_trigger_type(struct irq_data *d) | 216 | static inline u32 irqd_get_trigger_type(struct irq_data *d) |
217 | { | 217 | { |
218 | return d->state_use_accessors & IRQD_TRIGGER_MASK; | 218 | return d->state_use_accessors & IRQD_TRIGGER_MASK; |
219 | } | 219 | } |
220 | 220 | ||
221 | /* | 221 | /* |
222 | * Must only be called inside irq_chip.irq_set_type() functions. | 222 | * Must only be called inside irq_chip.irq_set_type() functions. |
223 | */ | 223 | */ |
224 | static inline void irqd_set_trigger_type(struct irq_data *d, u32 type) | 224 | static inline void irqd_set_trigger_type(struct irq_data *d, u32 type) |
225 | { | 225 | { |
226 | d->state_use_accessors &= ~IRQD_TRIGGER_MASK; | 226 | d->state_use_accessors &= ~IRQD_TRIGGER_MASK; |
227 | d->state_use_accessors |= type & IRQD_TRIGGER_MASK; | 227 | d->state_use_accessors |= type & IRQD_TRIGGER_MASK; |
228 | } | 228 | } |
229 | 229 | ||
230 | static inline bool irqd_is_level_type(struct irq_data *d) | 230 | static inline bool irqd_is_level_type(struct irq_data *d) |
231 | { | 231 | { |
232 | return d->state_use_accessors & IRQD_LEVEL; | 232 | return d->state_use_accessors & IRQD_LEVEL; |
233 | } | 233 | } |
234 | 234 | ||
235 | static inline bool irqd_is_wakeup_set(struct irq_data *d) | 235 | static inline bool irqd_is_wakeup_set(struct irq_data *d) |
236 | { | 236 | { |
237 | return d->state_use_accessors & IRQD_WAKEUP_STATE; | 237 | return d->state_use_accessors & IRQD_WAKEUP_STATE; |
238 | } | 238 | } |
239 | 239 | ||
240 | static inline bool irqd_can_move_in_process_context(struct irq_data *d) | 240 | static inline bool irqd_can_move_in_process_context(struct irq_data *d) |
241 | { | 241 | { |
242 | return d->state_use_accessors & IRQD_MOVE_PCNTXT; | 242 | return d->state_use_accessors & IRQD_MOVE_PCNTXT; |
243 | } | 243 | } |
244 | 244 | ||
245 | static inline bool irqd_irq_disabled(struct irq_data *d) | 245 | static inline bool irqd_irq_disabled(struct irq_data *d) |
246 | { | 246 | { |
247 | return d->state_use_accessors & IRQD_IRQ_DISABLED; | 247 | return d->state_use_accessors & IRQD_IRQ_DISABLED; |
248 | } | 248 | } |
249 | 249 | ||
250 | static inline bool irqd_irq_masked(struct irq_data *d) | 250 | static inline bool irqd_irq_masked(struct irq_data *d) |
251 | { | 251 | { |
252 | return d->state_use_accessors & IRQD_IRQ_MASKED; | 252 | return d->state_use_accessors & IRQD_IRQ_MASKED; |
253 | } | 253 | } |
254 | 254 | ||
255 | static inline bool irqd_irq_inprogress(struct irq_data *d) | 255 | static inline bool irqd_irq_inprogress(struct irq_data *d) |
256 | { | 256 | { |
257 | return d->state_use_accessors & IRQD_IRQ_INPROGRESS; | 257 | return d->state_use_accessors & IRQD_IRQ_INPROGRESS; |
258 | } | 258 | } |
259 | 259 | ||
260 | /* | 260 | /* |
261 | * Functions for chained handlers which can be enabled/disabled by the | 261 | * Functions for chained handlers which can be enabled/disabled by the |
262 | * standard disable_irq/enable_irq calls. Must be called with | 262 | * standard disable_irq/enable_irq calls. Must be called with |
263 | * irq_desc->lock held. | 263 | * irq_desc->lock held. |
264 | */ | 264 | */ |
265 | static inline void irqd_set_chained_irq_inprogress(struct irq_data *d) | 265 | static inline void irqd_set_chained_irq_inprogress(struct irq_data *d) |
266 | { | 266 | { |
267 | d->state_use_accessors |= IRQD_IRQ_INPROGRESS; | 267 | d->state_use_accessors |= IRQD_IRQ_INPROGRESS; |
268 | } | 268 | } |
269 | 269 | ||
270 | static inline void irqd_clr_chained_irq_inprogress(struct irq_data *d) | 270 | static inline void irqd_clr_chained_irq_inprogress(struct irq_data *d) |
271 | { | 271 | { |
272 | d->state_use_accessors &= ~IRQD_IRQ_INPROGRESS; | 272 | d->state_use_accessors &= ~IRQD_IRQ_INPROGRESS; |
273 | } | 273 | } |
274 | 274 | ||
275 | static inline irq_hw_number_t irqd_to_hwirq(struct irq_data *d) | 275 | static inline irq_hw_number_t irqd_to_hwirq(struct irq_data *d) |
276 | { | 276 | { |
277 | return d->hwirq; | 277 | return d->hwirq; |
278 | } | 278 | } |
279 | 279 | ||
280 | /** | 280 | /** |
281 | * struct irq_chip - hardware interrupt chip descriptor | 281 | * struct irq_chip - hardware interrupt chip descriptor |
282 | * | 282 | * |
283 | * @name: name for /proc/interrupts | 283 | * @name: name for /proc/interrupts |
284 | * @irq_startup: start up the interrupt (defaults to ->enable if NULL) | 284 | * @irq_startup: start up the interrupt (defaults to ->enable if NULL) |
285 | * @irq_shutdown: shut down the interrupt (defaults to ->disable if NULL) | 285 | * @irq_shutdown: shut down the interrupt (defaults to ->disable if NULL) |
286 | * @irq_enable: enable the interrupt (defaults to chip->unmask if NULL) | 286 | * @irq_enable: enable the interrupt (defaults to chip->unmask if NULL) |
287 | * @irq_disable: disable the interrupt | 287 | * @irq_disable: disable the interrupt |
288 | * @irq_ack: start of a new interrupt | 288 | * @irq_ack: start of a new interrupt |
289 | * @irq_mask: mask an interrupt source | 289 | * @irq_mask: mask an interrupt source |
290 | * @irq_mask_ack: ack and mask an interrupt source | 290 | * @irq_mask_ack: ack and mask an interrupt source |
291 | * @irq_unmask: unmask an interrupt source | 291 | * @irq_unmask: unmask an interrupt source |
292 | * @irq_eoi: end of interrupt | 292 | * @irq_eoi: end of interrupt |
293 | * @irq_set_affinity: set the CPU affinity on SMP machines | 293 | * @irq_set_affinity: set the CPU affinity on SMP machines |
294 | * @irq_retrigger: resend an IRQ to the CPU | 294 | * @irq_retrigger: resend an IRQ to the CPU |
295 | * @irq_set_type: set the flow type (IRQ_TYPE_LEVEL/etc.) of an IRQ | 295 | * @irq_set_type: set the flow type (IRQ_TYPE_LEVEL/etc.) of an IRQ |
296 | * @irq_set_wake: enable/disable power-management wake-on of an IRQ | 296 | * @irq_set_wake: enable/disable power-management wake-on of an IRQ |
297 | * @irq_bus_lock: function to lock access to slow bus (i2c) chips | 297 | * @irq_bus_lock: function to lock access to slow bus (i2c) chips |
298 | * @irq_bus_sync_unlock:function to sync and unlock slow bus (i2c) chips | 298 | * @irq_bus_sync_unlock:function to sync and unlock slow bus (i2c) chips |
299 | * @irq_cpu_online: configure an interrupt source for a secondary CPU | 299 | * @irq_cpu_online: configure an interrupt source for a secondary CPU |
300 | * @irq_cpu_offline: un-configure an interrupt source for a secondary CPU | 300 | * @irq_cpu_offline: un-configure an interrupt source for a secondary CPU |
301 | * @irq_suspend: function called from core code on suspend once per chip | 301 | * @irq_suspend: function called from core code on suspend once per chip |
302 | * @irq_resume: function called from core code on resume once per chip | 302 | * @irq_resume: function called from core code on resume once per chip |
303 | * @irq_pm_shutdown: function called from core code on shutdown once per chip | 303 | * @irq_pm_shutdown: function called from core code on shutdown once per chip |
304 | * @irq_calc_mask: Optional function to set irq_data.mask for special cases | 304 | * @irq_calc_mask: Optional function to set irq_data.mask for special cases |
305 | * @irq_print_chip: optional to print special chip info in show_interrupts | 305 | * @irq_print_chip: optional to print special chip info in show_interrupts |
306 | * @irq_request_resources: optional to request resources before calling | 306 | * @irq_request_resources: optional to request resources before calling |
307 | * any other callback related to this irq | 307 | * any other callback related to this irq |
308 | * @irq_release_resources: optional to release resources acquired with | 308 | * @irq_release_resources: optional to release resources acquired with |
309 | * irq_request_resources | 309 | * irq_request_resources |
310 | * @flags: chip specific flags | 310 | * @flags: chip specific flags |
311 | */ | 311 | */ |
312 | struct irq_chip { | 312 | struct irq_chip { |
313 | const char *name; | 313 | const char *name; |
314 | unsigned int (*irq_startup)(struct irq_data *data); | 314 | unsigned int (*irq_startup)(struct irq_data *data); |
315 | void (*irq_shutdown)(struct irq_data *data); | 315 | void (*irq_shutdown)(struct irq_data *data); |
316 | void (*irq_enable)(struct irq_data *data); | 316 | void (*irq_enable)(struct irq_data *data); |
317 | void (*irq_disable)(struct irq_data *data); | 317 | void (*irq_disable)(struct irq_data *data); |
318 | 318 | ||
319 | void (*irq_ack)(struct irq_data *data); | 319 | void (*irq_ack)(struct irq_data *data); |
320 | void (*irq_mask)(struct irq_data *data); | 320 | void (*irq_mask)(struct irq_data *data); |
321 | void (*irq_mask_ack)(struct irq_data *data); | 321 | void (*irq_mask_ack)(struct irq_data *data); |
322 | void (*irq_unmask)(struct irq_data *data); | 322 | void (*irq_unmask)(struct irq_data *data); |
323 | void (*irq_eoi)(struct irq_data *data); | 323 | void (*irq_eoi)(struct irq_data *data); |
324 | 324 | ||
325 | int (*irq_set_affinity)(struct irq_data *data, const struct cpumask *dest, bool force); | 325 | int (*irq_set_affinity)(struct irq_data *data, const struct cpumask *dest, bool force); |
326 | int (*irq_retrigger)(struct irq_data *data); | 326 | int (*irq_retrigger)(struct irq_data *data); |
327 | int (*irq_set_type)(struct irq_data *data, unsigned int flow_type); | 327 | int (*irq_set_type)(struct irq_data *data, unsigned int flow_type); |
328 | int (*irq_set_wake)(struct irq_data *data, unsigned int on); | 328 | int (*irq_set_wake)(struct irq_data *data, unsigned int on); |
329 | 329 | ||
330 | void (*irq_bus_lock)(struct irq_data *data); | 330 | void (*irq_bus_lock)(struct irq_data *data); |
331 | void (*irq_bus_sync_unlock)(struct irq_data *data); | 331 | void (*irq_bus_sync_unlock)(struct irq_data *data); |
332 | 332 | ||
333 | void (*irq_cpu_online)(struct irq_data *data); | 333 | void (*irq_cpu_online)(struct irq_data *data); |
334 | void (*irq_cpu_offline)(struct irq_data *data); | 334 | void (*irq_cpu_offline)(struct irq_data *data); |
335 | 335 | ||
336 | void (*irq_suspend)(struct irq_data *data); | 336 | void (*irq_suspend)(struct irq_data *data); |
337 | void (*irq_resume)(struct irq_data *data); | 337 | void (*irq_resume)(struct irq_data *data); |
338 | void (*irq_pm_shutdown)(struct irq_data *data); | 338 | void (*irq_pm_shutdown)(struct irq_data *data); |
339 | 339 | ||
340 | void (*irq_calc_mask)(struct irq_data *data); | 340 | void (*irq_calc_mask)(struct irq_data *data); |
341 | 341 | ||
342 | void (*irq_print_chip)(struct irq_data *data, struct seq_file *p); | 342 | void (*irq_print_chip)(struct irq_data *data, struct seq_file *p); |
343 | int (*irq_request_resources)(struct irq_data *data); | 343 | int (*irq_request_resources)(struct irq_data *data); |
344 | void (*irq_release_resources)(struct irq_data *data); | 344 | void (*irq_release_resources)(struct irq_data *data); |
345 | 345 | ||
346 | unsigned long flags; | 346 | unsigned long flags; |
347 | }; | 347 | }; |
348 | 348 | ||
349 | /* | 349 | /* |
350 | * irq_chip specific flags | 350 | * irq_chip specific flags |
351 | * | 351 | * |
352 | * IRQCHIP_SET_TYPE_MASKED: Mask before calling chip.irq_set_type() | 352 | * IRQCHIP_SET_TYPE_MASKED: Mask before calling chip.irq_set_type() |
353 | * IRQCHIP_EOI_IF_HANDLED: Only issue irq_eoi() when irq was handled | 353 | * IRQCHIP_EOI_IF_HANDLED: Only issue irq_eoi() when irq was handled |
354 | * IRQCHIP_MASK_ON_SUSPEND: Mask non wake irqs in the suspend path | 354 | * IRQCHIP_MASK_ON_SUSPEND: Mask non wake irqs in the suspend path |
355 | * IRQCHIP_ONOFFLINE_ENABLED: Only call irq_on/off_line callbacks | 355 | * IRQCHIP_ONOFFLINE_ENABLED: Only call irq_on/off_line callbacks |
356 | * when irq enabled | 356 | * when irq enabled |
357 | * IRQCHIP_SKIP_SET_WAKE: Skip chip.irq_set_wake(), for this irq chip | 357 | * IRQCHIP_SKIP_SET_WAKE: Skip chip.irq_set_wake(), for this irq chip |
358 | * IRQCHIP_ONESHOT_SAFE: One shot does not require mask/unmask | 358 | * IRQCHIP_ONESHOT_SAFE: One shot does not require mask/unmask |
359 | * IRQCHIP_EOI_THREADED: Chip requires eoi() on unmask in threaded mode | 359 | * IRQCHIP_EOI_THREADED: Chip requires eoi() on unmask in threaded mode |
360 | */ | 360 | */ |
361 | enum { | 361 | enum { |
362 | IRQCHIP_SET_TYPE_MASKED = (1 << 0), | 362 | IRQCHIP_SET_TYPE_MASKED = (1 << 0), |
363 | IRQCHIP_EOI_IF_HANDLED = (1 << 1), | 363 | IRQCHIP_EOI_IF_HANDLED = (1 << 1), |
364 | IRQCHIP_MASK_ON_SUSPEND = (1 << 2), | 364 | IRQCHIP_MASK_ON_SUSPEND = (1 << 2), |
365 | IRQCHIP_ONOFFLINE_ENABLED = (1 << 3), | 365 | IRQCHIP_ONOFFLINE_ENABLED = (1 << 3), |
366 | IRQCHIP_SKIP_SET_WAKE = (1 << 4), | 366 | IRQCHIP_SKIP_SET_WAKE = (1 << 4), |
367 | IRQCHIP_ONESHOT_SAFE = (1 << 5), | 367 | IRQCHIP_ONESHOT_SAFE = (1 << 5), |
368 | IRQCHIP_EOI_THREADED = (1 << 6), | 368 | IRQCHIP_EOI_THREADED = (1 << 6), |
369 | }; | 369 | }; |
370 | 370 | ||
371 | /* This include will go away once we isolated irq_desc usage to core code */ | 371 | /* This include will go away once we isolated irq_desc usage to core code */ |
372 | #include <linux/irqdesc.h> | 372 | #include <linux/irqdesc.h> |
373 | 373 | ||
374 | /* | 374 | /* |
375 | * Pick up the arch-dependent methods: | 375 | * Pick up the arch-dependent methods: |
376 | */ | 376 | */ |
377 | #include <asm/hw_irq.h> | 377 | #include <asm/hw_irq.h> |
378 | 378 | ||
379 | #ifndef NR_IRQS_LEGACY | 379 | #ifndef NR_IRQS_LEGACY |
380 | # define NR_IRQS_LEGACY 0 | 380 | # define NR_IRQS_LEGACY 0 |
381 | #endif | 381 | #endif |
382 | 382 | ||
383 | #ifndef ARCH_IRQ_INIT_FLAGS | 383 | #ifndef ARCH_IRQ_INIT_FLAGS |
384 | # define ARCH_IRQ_INIT_FLAGS 0 | 384 | # define ARCH_IRQ_INIT_FLAGS 0 |
385 | #endif | 385 | #endif |
386 | 386 | ||
387 | #define IRQ_DEFAULT_INIT_FLAGS ARCH_IRQ_INIT_FLAGS | 387 | #define IRQ_DEFAULT_INIT_FLAGS ARCH_IRQ_INIT_FLAGS |
388 | 388 | ||
389 | struct irqaction; | 389 | struct irqaction; |
390 | extern int setup_irq(unsigned int irq, struct irqaction *new); | 390 | extern int setup_irq(unsigned int irq, struct irqaction *new); |
391 | extern void remove_irq(unsigned int irq, struct irqaction *act); | 391 | extern void remove_irq(unsigned int irq, struct irqaction *act); |
392 | extern int setup_percpu_irq(unsigned int irq, struct irqaction *new); | 392 | extern int setup_percpu_irq(unsigned int irq, struct irqaction *new); |
393 | extern void remove_percpu_irq(unsigned int irq, struct irqaction *act); | 393 | extern void remove_percpu_irq(unsigned int irq, struct irqaction *act); |
394 | 394 | ||
395 | extern void irq_cpu_online(void); | 395 | extern void irq_cpu_online(void); |
396 | extern void irq_cpu_offline(void); | 396 | extern void irq_cpu_offline(void); |
397 | extern int __irq_set_affinity_locked(struct irq_data *data, const struct cpumask *cpumask); | 397 | extern int irq_set_affinity_locked(struct irq_data *data, |
398 | const struct cpumask *cpumask, bool force); | ||
398 | 399 | ||
399 | #if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_PENDING_IRQ) | 400 | #if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_PENDING_IRQ) |
400 | void irq_move_irq(struct irq_data *data); | 401 | void irq_move_irq(struct irq_data *data); |
401 | void irq_move_masked_irq(struct irq_data *data); | 402 | void irq_move_masked_irq(struct irq_data *data); |
402 | #else | 403 | #else |
403 | static inline void irq_move_irq(struct irq_data *data) { } | 404 | static inline void irq_move_irq(struct irq_data *data) { } |
404 | static inline void irq_move_masked_irq(struct irq_data *data) { } | 405 | static inline void irq_move_masked_irq(struct irq_data *data) { } |
405 | #endif | 406 | #endif |
406 | 407 | ||
407 | extern int no_irq_affinity; | 408 | extern int no_irq_affinity; |
408 | 409 | ||
409 | #ifdef CONFIG_HARDIRQS_SW_RESEND | 410 | #ifdef CONFIG_HARDIRQS_SW_RESEND |
410 | int irq_set_parent(int irq, int parent_irq); | 411 | int irq_set_parent(int irq, int parent_irq); |
411 | #else | 412 | #else |
412 | static inline int irq_set_parent(int irq, int parent_irq) | 413 | static inline int irq_set_parent(int irq, int parent_irq) |
413 | { | 414 | { |
414 | return 0; | 415 | return 0; |
415 | } | 416 | } |
416 | #endif | 417 | #endif |
417 | 418 | ||
418 | /* | 419 | /* |
419 | * Built-in IRQ handlers for various IRQ types, | 420 | * Built-in IRQ handlers for various IRQ types, |
420 | * callable via desc->handle_irq() | 421 | * callable via desc->handle_irq() |
421 | */ | 422 | */ |
422 | extern void handle_level_irq(unsigned int irq, struct irq_desc *desc); | 423 | extern void handle_level_irq(unsigned int irq, struct irq_desc *desc); |
423 | extern void handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc); | 424 | extern void handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc); |
424 | extern void handle_edge_irq(unsigned int irq, struct irq_desc *desc); | 425 | extern void handle_edge_irq(unsigned int irq, struct irq_desc *desc); |
425 | extern void handle_edge_eoi_irq(unsigned int irq, struct irq_desc *desc); | 426 | extern void handle_edge_eoi_irq(unsigned int irq, struct irq_desc *desc); |
426 | extern void handle_simple_irq(unsigned int irq, struct irq_desc *desc); | 427 | extern void handle_simple_irq(unsigned int irq, struct irq_desc *desc); |
427 | extern void handle_percpu_irq(unsigned int irq, struct irq_desc *desc); | 428 | extern void handle_percpu_irq(unsigned int irq, struct irq_desc *desc); |
428 | extern void handle_percpu_devid_irq(unsigned int irq, struct irq_desc *desc); | 429 | extern void handle_percpu_devid_irq(unsigned int irq, struct irq_desc *desc); |
429 | extern void handle_bad_irq(unsigned int irq, struct irq_desc *desc); | 430 | extern void handle_bad_irq(unsigned int irq, struct irq_desc *desc); |
430 | extern void handle_nested_irq(unsigned int irq); | 431 | extern void handle_nested_irq(unsigned int irq); |
431 | 432 | ||
432 | /* Handling of unhandled and spurious interrupts: */ | 433 | /* Handling of unhandled and spurious interrupts: */ |
433 | extern void note_interrupt(unsigned int irq, struct irq_desc *desc, | 434 | extern void note_interrupt(unsigned int irq, struct irq_desc *desc, |
434 | irqreturn_t action_ret); | 435 | irqreturn_t action_ret); |
435 | 436 | ||
436 | 437 | ||
437 | /* Enable/disable irq debugging output: */ | 438 | /* Enable/disable irq debugging output: */ |
438 | extern int noirqdebug_setup(char *str); | 439 | extern int noirqdebug_setup(char *str); |
439 | 440 | ||
440 | /* Checks whether the interrupt can be requested by request_irq(): */ | 441 | /* Checks whether the interrupt can be requested by request_irq(): */ |
441 | extern int can_request_irq(unsigned int irq, unsigned long irqflags); | 442 | extern int can_request_irq(unsigned int irq, unsigned long irqflags); |
442 | 443 | ||
443 | /* Dummy irq-chip implementations: */ | 444 | /* Dummy irq-chip implementations: */ |
444 | extern struct irq_chip no_irq_chip; | 445 | extern struct irq_chip no_irq_chip; |
445 | extern struct irq_chip dummy_irq_chip; | 446 | extern struct irq_chip dummy_irq_chip; |
446 | 447 | ||
447 | extern void | 448 | extern void |
448 | irq_set_chip_and_handler_name(unsigned int irq, struct irq_chip *chip, | 449 | irq_set_chip_and_handler_name(unsigned int irq, struct irq_chip *chip, |
449 | irq_flow_handler_t handle, const char *name); | 450 | irq_flow_handler_t handle, const char *name); |
450 | 451 | ||
451 | static inline void irq_set_chip_and_handler(unsigned int irq, struct irq_chip *chip, | 452 | static inline void irq_set_chip_and_handler(unsigned int irq, struct irq_chip *chip, |
452 | irq_flow_handler_t handle) | 453 | irq_flow_handler_t handle) |
453 | { | 454 | { |
454 | irq_set_chip_and_handler_name(irq, chip, handle, NULL); | 455 | irq_set_chip_and_handler_name(irq, chip, handle, NULL); |
455 | } | 456 | } |
456 | 457 | ||
457 | extern int irq_set_percpu_devid(unsigned int irq); | 458 | extern int irq_set_percpu_devid(unsigned int irq); |
458 | 459 | ||
459 | extern void | 460 | extern void |
460 | __irq_set_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained, | 461 | __irq_set_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained, |
461 | const char *name); | 462 | const char *name); |
462 | 463 | ||
463 | static inline void | 464 | static inline void |
464 | irq_set_handler(unsigned int irq, irq_flow_handler_t handle) | 465 | irq_set_handler(unsigned int irq, irq_flow_handler_t handle) |
465 | { | 466 | { |
466 | __irq_set_handler(irq, handle, 0, NULL); | 467 | __irq_set_handler(irq, handle, 0, NULL); |
467 | } | 468 | } |
468 | 469 | ||
469 | /* | 470 | /* |
470 | * Set a highlevel chained flow handler for a given IRQ. | 471 | * Set a highlevel chained flow handler for a given IRQ. |
471 | * (a chained handler is automatically enabled and set to | 472 | * (a chained handler is automatically enabled and set to |
472 | * IRQ_NOREQUEST, IRQ_NOPROBE, and IRQ_NOTHREAD) | 473 | * IRQ_NOREQUEST, IRQ_NOPROBE, and IRQ_NOTHREAD) |
473 | */ | 474 | */ |
474 | static inline void | 475 | static inline void |
475 | irq_set_chained_handler(unsigned int irq, irq_flow_handler_t handle) | 476 | irq_set_chained_handler(unsigned int irq, irq_flow_handler_t handle) |
476 | { | 477 | { |
477 | __irq_set_handler(irq, handle, 1, NULL); | 478 | __irq_set_handler(irq, handle, 1, NULL); |
478 | } | 479 | } |
479 | 480 | ||
480 | void irq_modify_status(unsigned int irq, unsigned long clr, unsigned long set); | 481 | void irq_modify_status(unsigned int irq, unsigned long clr, unsigned long set); |
481 | 482 | ||
482 | static inline void irq_set_status_flags(unsigned int irq, unsigned long set) | 483 | static inline void irq_set_status_flags(unsigned int irq, unsigned long set) |
483 | { | 484 | { |
484 | irq_modify_status(irq, 0, set); | 485 | irq_modify_status(irq, 0, set); |
485 | } | 486 | } |
486 | 487 | ||
487 | static inline void irq_clear_status_flags(unsigned int irq, unsigned long clr) | 488 | static inline void irq_clear_status_flags(unsigned int irq, unsigned long clr) |
488 | { | 489 | { |
489 | irq_modify_status(irq, clr, 0); | 490 | irq_modify_status(irq, clr, 0); |
490 | } | 491 | } |
491 | 492 | ||
492 | static inline void irq_set_noprobe(unsigned int irq) | 493 | static inline void irq_set_noprobe(unsigned int irq) |
493 | { | 494 | { |
494 | irq_modify_status(irq, 0, IRQ_NOPROBE); | 495 | irq_modify_status(irq, 0, IRQ_NOPROBE); |
495 | } | 496 | } |
496 | 497 | ||
497 | static inline void irq_set_probe(unsigned int irq) | 498 | static inline void irq_set_probe(unsigned int irq) |
498 | { | 499 | { |
499 | irq_modify_status(irq, IRQ_NOPROBE, 0); | 500 | irq_modify_status(irq, IRQ_NOPROBE, 0); |
500 | } | 501 | } |
501 | 502 | ||
502 | static inline void irq_set_nothread(unsigned int irq) | 503 | static inline void irq_set_nothread(unsigned int irq) |
503 | { | 504 | { |
504 | irq_modify_status(irq, 0, IRQ_NOTHREAD); | 505 | irq_modify_status(irq, 0, IRQ_NOTHREAD); |
505 | } | 506 | } |
506 | 507 | ||
507 | static inline void irq_set_thread(unsigned int irq) | 508 | static inline void irq_set_thread(unsigned int irq) |
508 | { | 509 | { |
509 | irq_modify_status(irq, IRQ_NOTHREAD, 0); | 510 | irq_modify_status(irq, IRQ_NOTHREAD, 0); |
510 | } | 511 | } |
511 | 512 | ||
512 | static inline void irq_set_nested_thread(unsigned int irq, bool nest) | 513 | static inline void irq_set_nested_thread(unsigned int irq, bool nest) |
513 | { | 514 | { |
514 | if (nest) | 515 | if (nest) |
515 | irq_set_status_flags(irq, IRQ_NESTED_THREAD); | 516 | irq_set_status_flags(irq, IRQ_NESTED_THREAD); |
516 | else | 517 | else |
517 | irq_clear_status_flags(irq, IRQ_NESTED_THREAD); | 518 | irq_clear_status_flags(irq, IRQ_NESTED_THREAD); |
518 | } | 519 | } |
519 | 520 | ||
520 | static inline void irq_set_percpu_devid_flags(unsigned int irq) | 521 | static inline void irq_set_percpu_devid_flags(unsigned int irq) |
521 | { | 522 | { |
522 | irq_set_status_flags(irq, | 523 | irq_set_status_flags(irq, |
523 | IRQ_NOAUTOEN | IRQ_PER_CPU | IRQ_NOTHREAD | | 524 | IRQ_NOAUTOEN | IRQ_PER_CPU | IRQ_NOTHREAD | |
524 | IRQ_NOPROBE | IRQ_PER_CPU_DEVID); | 525 | IRQ_NOPROBE | IRQ_PER_CPU_DEVID); |
525 | } | 526 | } |
526 | 527 | ||
527 | /* Handle dynamic irq creation and destruction */ | 528 | /* Handle dynamic irq creation and destruction */ |
528 | extern unsigned int create_irq_nr(unsigned int irq_want, int node); | 529 | extern unsigned int create_irq_nr(unsigned int irq_want, int node); |
529 | extern unsigned int __create_irqs(unsigned int from, unsigned int count, | 530 | extern unsigned int __create_irqs(unsigned int from, unsigned int count, |
530 | int node); | 531 | int node); |
531 | extern int create_irq(void); | 532 | extern int create_irq(void); |
532 | extern void destroy_irq(unsigned int irq); | 533 | extern void destroy_irq(unsigned int irq); |
533 | extern void destroy_irqs(unsigned int irq, unsigned int count); | 534 | extern void destroy_irqs(unsigned int irq, unsigned int count); |
534 | 535 | ||
535 | /* | 536 | /* |
536 | * Dynamic irq helper functions. Obsolete. Use irq_alloc_desc* and | 537 | * Dynamic irq helper functions. Obsolete. Use irq_alloc_desc* and |
537 | * irq_free_desc instead. | 538 | * irq_free_desc instead. |
538 | */ | 539 | */ |
539 | extern void dynamic_irq_cleanup(unsigned int irq); | 540 | extern void dynamic_irq_cleanup(unsigned int irq); |
540 | static inline void dynamic_irq_init(unsigned int irq) | 541 | static inline void dynamic_irq_init(unsigned int irq) |
541 | { | 542 | { |
542 | dynamic_irq_cleanup(irq); | 543 | dynamic_irq_cleanup(irq); |
543 | } | 544 | } |
544 | 545 | ||
545 | /* Set/get chip/data for an IRQ: */ | 546 | /* Set/get chip/data for an IRQ: */ |
546 | extern int irq_set_chip(unsigned int irq, struct irq_chip *chip); | 547 | extern int irq_set_chip(unsigned int irq, struct irq_chip *chip); |
547 | extern int irq_set_handler_data(unsigned int irq, void *data); | 548 | extern int irq_set_handler_data(unsigned int irq, void *data); |
548 | extern int irq_set_chip_data(unsigned int irq, void *data); | 549 | extern int irq_set_chip_data(unsigned int irq, void *data); |
549 | extern int irq_set_irq_type(unsigned int irq, unsigned int type); | 550 | extern int irq_set_irq_type(unsigned int irq, unsigned int type); |
550 | extern int irq_set_msi_desc(unsigned int irq, struct msi_desc *entry); | 551 | extern int irq_set_msi_desc(unsigned int irq, struct msi_desc *entry); |
551 | extern int irq_set_msi_desc_off(unsigned int irq_base, unsigned int irq_offset, | 552 | extern int irq_set_msi_desc_off(unsigned int irq_base, unsigned int irq_offset, |
552 | struct msi_desc *entry); | 553 | struct msi_desc *entry); |
553 | extern struct irq_data *irq_get_irq_data(unsigned int irq); | 554 | extern struct irq_data *irq_get_irq_data(unsigned int irq); |
554 | 555 | ||
555 | static inline struct irq_chip *irq_get_chip(unsigned int irq) | 556 | static inline struct irq_chip *irq_get_chip(unsigned int irq) |
556 | { | 557 | { |
557 | struct irq_data *d = irq_get_irq_data(irq); | 558 | struct irq_data *d = irq_get_irq_data(irq); |
558 | return d ? d->chip : NULL; | 559 | return d ? d->chip : NULL; |
559 | } | 560 | } |
560 | 561 | ||
561 | static inline struct irq_chip *irq_data_get_irq_chip(struct irq_data *d) | 562 | static inline struct irq_chip *irq_data_get_irq_chip(struct irq_data *d) |
562 | { | 563 | { |
563 | return d->chip; | 564 | return d->chip; |
564 | } | 565 | } |
565 | 566 | ||
566 | static inline void *irq_get_chip_data(unsigned int irq) | 567 | static inline void *irq_get_chip_data(unsigned int irq) |
567 | { | 568 | { |
568 | struct irq_data *d = irq_get_irq_data(irq); | 569 | struct irq_data *d = irq_get_irq_data(irq); |
569 | return d ? d->chip_data : NULL; | 570 | return d ? d->chip_data : NULL; |
570 | } | 571 | } |
571 | 572 | ||
572 | static inline void *irq_data_get_irq_chip_data(struct irq_data *d) | 573 | static inline void *irq_data_get_irq_chip_data(struct irq_data *d) |
573 | { | 574 | { |
574 | return d->chip_data; | 575 | return d->chip_data; |
575 | } | 576 | } |
576 | 577 | ||
577 | static inline void *irq_get_handler_data(unsigned int irq) | 578 | static inline void *irq_get_handler_data(unsigned int irq) |
578 | { | 579 | { |
579 | struct irq_data *d = irq_get_irq_data(irq); | 580 | struct irq_data *d = irq_get_irq_data(irq); |
580 | return d ? d->handler_data : NULL; | 581 | return d ? d->handler_data : NULL; |
581 | } | 582 | } |
582 | 583 | ||
583 | static inline void *irq_data_get_irq_handler_data(struct irq_data *d) | 584 | static inline void *irq_data_get_irq_handler_data(struct irq_data *d) |
584 | { | 585 | { |
585 | return d->handler_data; | 586 | return d->handler_data; |
586 | } | 587 | } |
587 | 588 | ||
588 | static inline struct msi_desc *irq_get_msi_desc(unsigned int irq) | 589 | static inline struct msi_desc *irq_get_msi_desc(unsigned int irq) |
589 | { | 590 | { |
590 | struct irq_data *d = irq_get_irq_data(irq); | 591 | struct irq_data *d = irq_get_irq_data(irq); |
591 | return d ? d->msi_desc : NULL; | 592 | return d ? d->msi_desc : NULL; |
592 | } | 593 | } |
593 | 594 | ||
594 | static inline struct msi_desc *irq_data_get_msi(struct irq_data *d) | 595 | static inline struct msi_desc *irq_data_get_msi(struct irq_data *d) |
595 | { | 596 | { |
596 | return d->msi_desc; | 597 | return d->msi_desc; |
597 | } | 598 | } |
598 | 599 | ||
599 | static inline u32 irq_get_trigger_type(unsigned int irq) | 600 | static inline u32 irq_get_trigger_type(unsigned int irq) |
600 | { | 601 | { |
601 | struct irq_data *d = irq_get_irq_data(irq); | 602 | struct irq_data *d = irq_get_irq_data(irq); |
602 | return d ? irqd_get_trigger_type(d) : 0; | 603 | return d ? irqd_get_trigger_type(d) : 0; |
603 | } | 604 | } |
604 | 605 | ||
605 | int __irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node, | 606 | int __irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node, |
606 | struct module *owner); | 607 | struct module *owner); |
607 | 608 | ||
608 | /* use macros to avoid needing export.h for THIS_MODULE */ | 609 | /* use macros to avoid needing export.h for THIS_MODULE */ |
609 | #define irq_alloc_descs(irq, from, cnt, node) \ | 610 | #define irq_alloc_descs(irq, from, cnt, node) \ |
610 | __irq_alloc_descs(irq, from, cnt, node, THIS_MODULE) | 611 | __irq_alloc_descs(irq, from, cnt, node, THIS_MODULE) |
611 | 612 | ||
612 | #define irq_alloc_desc(node) \ | 613 | #define irq_alloc_desc(node) \ |
613 | irq_alloc_descs(-1, 0, 1, node) | 614 | irq_alloc_descs(-1, 0, 1, node) |
614 | 615 | ||
615 | #define irq_alloc_desc_at(at, node) \ | 616 | #define irq_alloc_desc_at(at, node) \ |
616 | irq_alloc_descs(at, at, 1, node) | 617 | irq_alloc_descs(at, at, 1, node) |
617 | 618 | ||
618 | #define irq_alloc_desc_from(from, node) \ | 619 | #define irq_alloc_desc_from(from, node) \ |
619 | irq_alloc_descs(-1, from, 1, node) | 620 | irq_alloc_descs(-1, from, 1, node) |
620 | 621 | ||
621 | #define irq_alloc_descs_from(from, cnt, node) \ | 622 | #define irq_alloc_descs_from(from, cnt, node) \ |
622 | irq_alloc_descs(-1, from, cnt, node) | 623 | irq_alloc_descs(-1, from, cnt, node) |
623 | 624 | ||
624 | void irq_free_descs(unsigned int irq, unsigned int cnt); | 625 | void irq_free_descs(unsigned int irq, unsigned int cnt); |
625 | int irq_reserve_irqs(unsigned int from, unsigned int cnt); | 626 | int irq_reserve_irqs(unsigned int from, unsigned int cnt); |
626 | 627 | ||
627 | static inline void irq_free_desc(unsigned int irq) | 628 | static inline void irq_free_desc(unsigned int irq) |
628 | { | 629 | { |
629 | irq_free_descs(irq, 1); | 630 | irq_free_descs(irq, 1); |
630 | } | 631 | } |
631 | 632 | ||
632 | static inline int irq_reserve_irq(unsigned int irq) | 633 | static inline int irq_reserve_irq(unsigned int irq) |
633 | { | 634 | { |
634 | return irq_reserve_irqs(irq, 1); | 635 | return irq_reserve_irqs(irq, 1); |
635 | } | 636 | } |
636 | 637 | ||
637 | #ifndef irq_reg_writel | 638 | #ifndef irq_reg_writel |
638 | # define irq_reg_writel(val, addr) writel(val, addr) | 639 | # define irq_reg_writel(val, addr) writel(val, addr) |
639 | #endif | 640 | #endif |
640 | #ifndef irq_reg_readl | 641 | #ifndef irq_reg_readl |
641 | # define irq_reg_readl(addr) readl(addr) | 642 | # define irq_reg_readl(addr) readl(addr) |
642 | #endif | 643 | #endif |
643 | 644 | ||
644 | /** | 645 | /** |
645 | * struct irq_chip_regs - register offsets for struct irq_gci | 646 | * struct irq_chip_regs - register offsets for struct irq_gci |
646 | * @enable: Enable register offset to reg_base | 647 | * @enable: Enable register offset to reg_base |
647 | * @disable: Disable register offset to reg_base | 648 | * @disable: Disable register offset to reg_base |
648 | * @mask: Mask register offset to reg_base | 649 | * @mask: Mask register offset to reg_base |
649 | * @ack: Ack register offset to reg_base | 650 | * @ack: Ack register offset to reg_base |
650 | * @eoi: Eoi register offset to reg_base | 651 | * @eoi: Eoi register offset to reg_base |
651 | * @type: Type configuration register offset to reg_base | 652 | * @type: Type configuration register offset to reg_base |
652 | * @polarity: Polarity configuration register offset to reg_base | 653 | * @polarity: Polarity configuration register offset to reg_base |
653 | */ | 654 | */ |
654 | struct irq_chip_regs { | 655 | struct irq_chip_regs { |
655 | unsigned long enable; | 656 | unsigned long enable; |
656 | unsigned long disable; | 657 | unsigned long disable; |
657 | unsigned long mask; | 658 | unsigned long mask; |
658 | unsigned long ack; | 659 | unsigned long ack; |
659 | unsigned long eoi; | 660 | unsigned long eoi; |
660 | unsigned long type; | 661 | unsigned long type; |
661 | unsigned long polarity; | 662 | unsigned long polarity; |
662 | }; | 663 | }; |
663 | 664 | ||
664 | /** | 665 | /** |
665 | * struct irq_chip_type - Generic interrupt chip instance for a flow type | 666 | * struct irq_chip_type - Generic interrupt chip instance for a flow type |
666 | * @chip: The real interrupt chip which provides the callbacks | 667 | * @chip: The real interrupt chip which provides the callbacks |
667 | * @regs: Register offsets for this chip | 668 | * @regs: Register offsets for this chip |
668 | * @handler: Flow handler associated with this chip | 669 | * @handler: Flow handler associated with this chip |
669 | * @type: Chip can handle these flow types | 670 | * @type: Chip can handle these flow types |
670 | * @mask_cache_priv: Cached mask register private to the chip type | 671 | * @mask_cache_priv: Cached mask register private to the chip type |
671 | * @mask_cache: Pointer to cached mask register | 672 | * @mask_cache: Pointer to cached mask register |
672 | * | 673 | * |
673 | * A irq_generic_chip can have several instances of irq_chip_type when | 674 | * A irq_generic_chip can have several instances of irq_chip_type when |
674 | * it requires different functions and register offsets for different | 675 | * it requires different functions and register offsets for different |
675 | * flow types. | 676 | * flow types. |
676 | */ | 677 | */ |
677 | struct irq_chip_type { | 678 | struct irq_chip_type { |
678 | struct irq_chip chip; | 679 | struct irq_chip chip; |
679 | struct irq_chip_regs regs; | 680 | struct irq_chip_regs regs; |
680 | irq_flow_handler_t handler; | 681 | irq_flow_handler_t handler; |
681 | u32 type; | 682 | u32 type; |
682 | u32 mask_cache_priv; | 683 | u32 mask_cache_priv; |
683 | u32 *mask_cache; | 684 | u32 *mask_cache; |
684 | }; | 685 | }; |
685 | 686 | ||
686 | /** | 687 | /** |
687 | * struct irq_chip_generic - Generic irq chip data structure | 688 | * struct irq_chip_generic - Generic irq chip data structure |
688 | * @lock: Lock to protect register and cache data access | 689 | * @lock: Lock to protect register and cache data access |
689 | * @reg_base: Register base address (virtual) | 690 | * @reg_base: Register base address (virtual) |
690 | * @irq_base: Interrupt base nr for this chip | 691 | * @irq_base: Interrupt base nr for this chip |
691 | * @irq_cnt: Number of interrupts handled by this chip | 692 | * @irq_cnt: Number of interrupts handled by this chip |
692 | * @mask_cache: Cached mask register shared between all chip types | 693 | * @mask_cache: Cached mask register shared between all chip types |
693 | * @type_cache: Cached type register | 694 | * @type_cache: Cached type register |
694 | * @polarity_cache: Cached polarity register | 695 | * @polarity_cache: Cached polarity register |
695 | * @wake_enabled: Interrupt can wakeup from suspend | 696 | * @wake_enabled: Interrupt can wakeup from suspend |
696 | * @wake_active: Interrupt is marked as an wakeup from suspend source | 697 | * @wake_active: Interrupt is marked as an wakeup from suspend source |
697 | * @num_ct: Number of available irq_chip_type instances (usually 1) | 698 | * @num_ct: Number of available irq_chip_type instances (usually 1) |
698 | * @private: Private data for non generic chip callbacks | 699 | * @private: Private data for non generic chip callbacks |
699 | * @installed: bitfield to denote installed interrupts | 700 | * @installed: bitfield to denote installed interrupts |
700 | * @unused: bitfield to denote unused interrupts | 701 | * @unused: bitfield to denote unused interrupts |
701 | * @domain: irq domain pointer | 702 | * @domain: irq domain pointer |
702 | * @list: List head for keeping track of instances | 703 | * @list: List head for keeping track of instances |
703 | * @chip_types: Array of interrupt irq_chip_types | 704 | * @chip_types: Array of interrupt irq_chip_types |
704 | * | 705 | * |
705 | * Note, that irq_chip_generic can have multiple irq_chip_type | 706 | * Note, that irq_chip_generic can have multiple irq_chip_type |
706 | * implementations which can be associated to a particular irq line of | 707 | * implementations which can be associated to a particular irq line of |
707 | * an irq_chip_generic instance. That allows to share and protect | 708 | * an irq_chip_generic instance. That allows to share and protect |
708 | * state in an irq_chip_generic instance when we need to implement | 709 | * state in an irq_chip_generic instance when we need to implement |
709 | * different flow mechanisms (level/edge) for it. | 710 | * different flow mechanisms (level/edge) for it. |
710 | */ | 711 | */ |
711 | struct irq_chip_generic { | 712 | struct irq_chip_generic { |
712 | raw_spinlock_t lock; | 713 | raw_spinlock_t lock; |
713 | void __iomem *reg_base; | 714 | void __iomem *reg_base; |
714 | unsigned int irq_base; | 715 | unsigned int irq_base; |
715 | unsigned int irq_cnt; | 716 | unsigned int irq_cnt; |
716 | u32 mask_cache; | 717 | u32 mask_cache; |
717 | u32 type_cache; | 718 | u32 type_cache; |
718 | u32 polarity_cache; | 719 | u32 polarity_cache; |
719 | u32 wake_enabled; | 720 | u32 wake_enabled; |
720 | u32 wake_active; | 721 | u32 wake_active; |
721 | unsigned int num_ct; | 722 | unsigned int num_ct; |
722 | void *private; | 723 | void *private; |
723 | unsigned long installed; | 724 | unsigned long installed; |
724 | unsigned long unused; | 725 | unsigned long unused; |
725 | struct irq_domain *domain; | 726 | struct irq_domain *domain; |
726 | struct list_head list; | 727 | struct list_head list; |
727 | struct irq_chip_type chip_types[0]; | 728 | struct irq_chip_type chip_types[0]; |
728 | }; | 729 | }; |
729 | 730 | ||
730 | /** | 731 | /** |
731 | * enum irq_gc_flags - Initialization flags for generic irq chips | 732 | * enum irq_gc_flags - Initialization flags for generic irq chips |
732 | * @IRQ_GC_INIT_MASK_CACHE: Initialize the mask_cache by reading mask reg | 733 | * @IRQ_GC_INIT_MASK_CACHE: Initialize the mask_cache by reading mask reg |
733 | * @IRQ_GC_INIT_NESTED_LOCK: Set the lock class of the irqs to nested for | 734 | * @IRQ_GC_INIT_NESTED_LOCK: Set the lock class of the irqs to nested for |
734 | * irq chips which need to call irq_set_wake() on | 735 | * irq chips which need to call irq_set_wake() on |
735 | * the parent irq. Usually GPIO implementations | 736 | * the parent irq. Usually GPIO implementations |
736 | * @IRQ_GC_MASK_CACHE_PER_TYPE: Mask cache is chip type private | 737 | * @IRQ_GC_MASK_CACHE_PER_TYPE: Mask cache is chip type private |
737 | * @IRQ_GC_NO_MASK: Do not calculate irq_data->mask | 738 | * @IRQ_GC_NO_MASK: Do not calculate irq_data->mask |
738 | */ | 739 | */ |
739 | enum irq_gc_flags { | 740 | enum irq_gc_flags { |
740 | IRQ_GC_INIT_MASK_CACHE = 1 << 0, | 741 | IRQ_GC_INIT_MASK_CACHE = 1 << 0, |
741 | IRQ_GC_INIT_NESTED_LOCK = 1 << 1, | 742 | IRQ_GC_INIT_NESTED_LOCK = 1 << 1, |
742 | IRQ_GC_MASK_CACHE_PER_TYPE = 1 << 2, | 743 | IRQ_GC_MASK_CACHE_PER_TYPE = 1 << 2, |
743 | IRQ_GC_NO_MASK = 1 << 3, | 744 | IRQ_GC_NO_MASK = 1 << 3, |
744 | }; | 745 | }; |
745 | 746 | ||
746 | /* | 747 | /* |
747 | * struct irq_domain_chip_generic - Generic irq chip data structure for irq domains | 748 | * struct irq_domain_chip_generic - Generic irq chip data structure for irq domains |
748 | * @irqs_per_chip: Number of interrupts per chip | 749 | * @irqs_per_chip: Number of interrupts per chip |
749 | * @num_chips: Number of chips | 750 | * @num_chips: Number of chips |
750 | * @irq_flags_to_set: IRQ* flags to set on irq setup | 751 | * @irq_flags_to_set: IRQ* flags to set on irq setup |
751 | * @irq_flags_to_clear: IRQ* flags to clear on irq setup | 752 | * @irq_flags_to_clear: IRQ* flags to clear on irq setup |
752 | * @gc_flags: Generic chip specific setup flags | 753 | * @gc_flags: Generic chip specific setup flags |
753 | * @gc: Array of pointers to generic interrupt chips | 754 | * @gc: Array of pointers to generic interrupt chips |
754 | */ | 755 | */ |
755 | struct irq_domain_chip_generic { | 756 | struct irq_domain_chip_generic { |
756 | unsigned int irqs_per_chip; | 757 | unsigned int irqs_per_chip; |
757 | unsigned int num_chips; | 758 | unsigned int num_chips; |
758 | unsigned int irq_flags_to_clear; | 759 | unsigned int irq_flags_to_clear; |
759 | unsigned int irq_flags_to_set; | 760 | unsigned int irq_flags_to_set; |
760 | enum irq_gc_flags gc_flags; | 761 | enum irq_gc_flags gc_flags; |
761 | struct irq_chip_generic *gc[0]; | 762 | struct irq_chip_generic *gc[0]; |
762 | }; | 763 | }; |
763 | 764 | ||
764 | /* Generic chip callback functions */ | 765 | /* Generic chip callback functions */ |
765 | void irq_gc_noop(struct irq_data *d); | 766 | void irq_gc_noop(struct irq_data *d); |
766 | void irq_gc_mask_disable_reg(struct irq_data *d); | 767 | void irq_gc_mask_disable_reg(struct irq_data *d); |
767 | void irq_gc_mask_set_bit(struct irq_data *d); | 768 | void irq_gc_mask_set_bit(struct irq_data *d); |
768 | void irq_gc_mask_clr_bit(struct irq_data *d); | 769 | void irq_gc_mask_clr_bit(struct irq_data *d); |
769 | void irq_gc_unmask_enable_reg(struct irq_data *d); | 770 | void irq_gc_unmask_enable_reg(struct irq_data *d); |
770 | void irq_gc_ack_set_bit(struct irq_data *d); | 771 | void irq_gc_ack_set_bit(struct irq_data *d); |
771 | void irq_gc_ack_clr_bit(struct irq_data *d); | 772 | void irq_gc_ack_clr_bit(struct irq_data *d); |
772 | void irq_gc_mask_disable_reg_and_ack(struct irq_data *d); | 773 | void irq_gc_mask_disable_reg_and_ack(struct irq_data *d); |
773 | void irq_gc_eoi(struct irq_data *d); | 774 | void irq_gc_eoi(struct irq_data *d); |
774 | int irq_gc_set_wake(struct irq_data *d, unsigned int on); | 775 | int irq_gc_set_wake(struct irq_data *d, unsigned int on); |
775 | 776 | ||
776 | /* Setup functions for irq_chip_generic */ | 777 | /* Setup functions for irq_chip_generic */ |
777 | struct irq_chip_generic * | 778 | struct irq_chip_generic * |
778 | irq_alloc_generic_chip(const char *name, int nr_ct, unsigned int irq_base, | 779 | irq_alloc_generic_chip(const char *name, int nr_ct, unsigned int irq_base, |
779 | void __iomem *reg_base, irq_flow_handler_t handler); | 780 | void __iomem *reg_base, irq_flow_handler_t handler); |
780 | void irq_setup_generic_chip(struct irq_chip_generic *gc, u32 msk, | 781 | void irq_setup_generic_chip(struct irq_chip_generic *gc, u32 msk, |
781 | enum irq_gc_flags flags, unsigned int clr, | 782 | enum irq_gc_flags flags, unsigned int clr, |
782 | unsigned int set); | 783 | unsigned int set); |
783 | int irq_setup_alt_chip(struct irq_data *d, unsigned int type); | 784 | int irq_setup_alt_chip(struct irq_data *d, unsigned int type); |
784 | void irq_remove_generic_chip(struct irq_chip_generic *gc, u32 msk, | 785 | void irq_remove_generic_chip(struct irq_chip_generic *gc, u32 msk, |
785 | unsigned int clr, unsigned int set); | 786 | unsigned int clr, unsigned int set); |
786 | 787 | ||
787 | struct irq_chip_generic *irq_get_domain_generic_chip(struct irq_domain *d, unsigned int hw_irq); | 788 | struct irq_chip_generic *irq_get_domain_generic_chip(struct irq_domain *d, unsigned int hw_irq); |
788 | int irq_alloc_domain_generic_chips(struct irq_domain *d, int irqs_per_chip, | 789 | int irq_alloc_domain_generic_chips(struct irq_domain *d, int irqs_per_chip, |
789 | int num_ct, const char *name, | 790 | int num_ct, const char *name, |
790 | irq_flow_handler_t handler, | 791 | irq_flow_handler_t handler, |
791 | unsigned int clr, unsigned int set, | 792 | unsigned int clr, unsigned int set, |
792 | enum irq_gc_flags flags); | 793 | enum irq_gc_flags flags); |
793 | 794 | ||
794 | 795 | ||
795 | static inline struct irq_chip_type *irq_data_get_chip_type(struct irq_data *d) | 796 | static inline struct irq_chip_type *irq_data_get_chip_type(struct irq_data *d) |
796 | { | 797 | { |
797 | return container_of(d->chip, struct irq_chip_type, chip); | 798 | return container_of(d->chip, struct irq_chip_type, chip); |
798 | } | 799 | } |
799 | 800 | ||
800 | #define IRQ_MSK(n) (u32)((n) < 32 ? ((1 << (n)) - 1) : UINT_MAX) | 801 | #define IRQ_MSK(n) (u32)((n) < 32 ? ((1 << (n)) - 1) : UINT_MAX) |
801 | 802 | ||
802 | #ifdef CONFIG_SMP | 803 | #ifdef CONFIG_SMP |
803 | static inline void irq_gc_lock(struct irq_chip_generic *gc) | 804 | static inline void irq_gc_lock(struct irq_chip_generic *gc) |
804 | { | 805 | { |
805 | raw_spin_lock(&gc->lock); | 806 | raw_spin_lock(&gc->lock); |
806 | } | 807 | } |
807 | 808 | ||
808 | static inline void irq_gc_unlock(struct irq_chip_generic *gc) | 809 | static inline void irq_gc_unlock(struct irq_chip_generic *gc) |
809 | { | 810 | { |
810 | raw_spin_unlock(&gc->lock); | 811 | raw_spin_unlock(&gc->lock); |
811 | } | 812 | } |
812 | #else | 813 | #else |
813 | static inline void irq_gc_lock(struct irq_chip_generic *gc) { } | 814 | static inline void irq_gc_lock(struct irq_chip_generic *gc) { } |
814 | static inline void irq_gc_unlock(struct irq_chip_generic *gc) { } | 815 | static inline void irq_gc_unlock(struct irq_chip_generic *gc) { } |
815 | #endif | 816 | #endif |
816 | 817 | ||
817 | #endif /* _LINUX_IRQ_H */ | 818 | #endif /* _LINUX_IRQ_H */ |
818 | 819 |
kernel/irq/manage.c
1 | /* | 1 | /* |
2 | * linux/kernel/irq/manage.c | 2 | * linux/kernel/irq/manage.c |
3 | * | 3 | * |
4 | * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar | 4 | * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar |
5 | * Copyright (C) 2005-2006 Thomas Gleixner | 5 | * Copyright (C) 2005-2006 Thomas Gleixner |
6 | * | 6 | * |
7 | * This file contains driver APIs to the irq subsystem. | 7 | * This file contains driver APIs to the irq subsystem. |
8 | */ | 8 | */ |
9 | 9 | ||
10 | #define pr_fmt(fmt) "genirq: " fmt | 10 | #define pr_fmt(fmt) "genirq: " fmt |
11 | 11 | ||
12 | #include <linux/irq.h> | 12 | #include <linux/irq.h> |
13 | #include <linux/kthread.h> | 13 | #include <linux/kthread.h> |
14 | #include <linux/module.h> | 14 | #include <linux/module.h> |
15 | #include <linux/random.h> | 15 | #include <linux/random.h> |
16 | #include <linux/interrupt.h> | 16 | #include <linux/interrupt.h> |
17 | #include <linux/slab.h> | 17 | #include <linux/slab.h> |
18 | #include <linux/sched.h> | 18 | #include <linux/sched.h> |
19 | #include <linux/sched/rt.h> | 19 | #include <linux/sched/rt.h> |
20 | #include <linux/task_work.h> | 20 | #include <linux/task_work.h> |
21 | 21 | ||
22 | #include "internals.h" | 22 | #include "internals.h" |
23 | 23 | ||
24 | #ifdef CONFIG_IRQ_FORCED_THREADING | 24 | #ifdef CONFIG_IRQ_FORCED_THREADING |
25 | __read_mostly bool force_irqthreads; | 25 | __read_mostly bool force_irqthreads; |
26 | 26 | ||
27 | static int __init setup_forced_irqthreads(char *arg) | 27 | static int __init setup_forced_irqthreads(char *arg) |
28 | { | 28 | { |
29 | force_irqthreads = true; | 29 | force_irqthreads = true; |
30 | return 0; | 30 | return 0; |
31 | } | 31 | } |
32 | early_param("threadirqs", setup_forced_irqthreads); | 32 | early_param("threadirqs", setup_forced_irqthreads); |
33 | #endif | 33 | #endif |
34 | 34 | ||
35 | static void __synchronize_hardirq(struct irq_desc *desc) | 35 | static void __synchronize_hardirq(struct irq_desc *desc) |
36 | { | 36 | { |
37 | bool inprogress; | 37 | bool inprogress; |
38 | 38 | ||
39 | do { | 39 | do { |
40 | unsigned long flags; | 40 | unsigned long flags; |
41 | 41 | ||
42 | /* | 42 | /* |
43 | * Wait until we're out of the critical section. This might | 43 | * Wait until we're out of the critical section. This might |
44 | * give the wrong answer due to the lack of memory barriers. | 44 | * give the wrong answer due to the lack of memory barriers. |
45 | */ | 45 | */ |
46 | while (irqd_irq_inprogress(&desc->irq_data)) | 46 | while (irqd_irq_inprogress(&desc->irq_data)) |
47 | cpu_relax(); | 47 | cpu_relax(); |
48 | 48 | ||
49 | /* Ok, that indicated we're done: double-check carefully. */ | 49 | /* Ok, that indicated we're done: double-check carefully. */ |
50 | raw_spin_lock_irqsave(&desc->lock, flags); | 50 | raw_spin_lock_irqsave(&desc->lock, flags); |
51 | inprogress = irqd_irq_inprogress(&desc->irq_data); | 51 | inprogress = irqd_irq_inprogress(&desc->irq_data); |
52 | raw_spin_unlock_irqrestore(&desc->lock, flags); | 52 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
53 | 53 | ||
54 | /* Oops, that failed? */ | 54 | /* Oops, that failed? */ |
55 | } while (inprogress); | 55 | } while (inprogress); |
56 | } | 56 | } |
57 | 57 | ||
58 | /** | 58 | /** |
59 | * synchronize_hardirq - wait for pending hard IRQ handlers (on other CPUs) | 59 | * synchronize_hardirq - wait for pending hard IRQ handlers (on other CPUs) |
60 | * @irq: interrupt number to wait for | 60 | * @irq: interrupt number to wait for |
61 | * | 61 | * |
62 | * This function waits for any pending hard IRQ handlers for this | 62 | * This function waits for any pending hard IRQ handlers for this |
63 | * interrupt to complete before returning. If you use this | 63 | * interrupt to complete before returning. If you use this |
64 | * function while holding a resource the IRQ handler may need you | 64 | * function while holding a resource the IRQ handler may need you |
65 | * will deadlock. It does not take associated threaded handlers | 65 | * will deadlock. It does not take associated threaded handlers |
66 | * into account. | 66 | * into account. |
67 | * | 67 | * |
68 | * Do not use this for shutdown scenarios where you must be sure | 68 | * Do not use this for shutdown scenarios where you must be sure |
69 | * that all parts (hardirq and threaded handler) have completed. | 69 | * that all parts (hardirq and threaded handler) have completed. |
70 | * | 70 | * |
71 | * This function may be called - with care - from IRQ context. | 71 | * This function may be called - with care - from IRQ context. |
72 | */ | 72 | */ |
73 | void synchronize_hardirq(unsigned int irq) | 73 | void synchronize_hardirq(unsigned int irq) |
74 | { | 74 | { |
75 | struct irq_desc *desc = irq_to_desc(irq); | 75 | struct irq_desc *desc = irq_to_desc(irq); |
76 | 76 | ||
77 | if (desc) | 77 | if (desc) |
78 | __synchronize_hardirq(desc); | 78 | __synchronize_hardirq(desc); |
79 | } | 79 | } |
80 | EXPORT_SYMBOL(synchronize_hardirq); | 80 | EXPORT_SYMBOL(synchronize_hardirq); |
81 | 81 | ||
82 | /** | 82 | /** |
83 | * synchronize_irq - wait for pending IRQ handlers (on other CPUs) | 83 | * synchronize_irq - wait for pending IRQ handlers (on other CPUs) |
84 | * @irq: interrupt number to wait for | 84 | * @irq: interrupt number to wait for |
85 | * | 85 | * |
86 | * This function waits for any pending IRQ handlers for this interrupt | 86 | * This function waits for any pending IRQ handlers for this interrupt |
87 | * to complete before returning. If you use this function while | 87 | * to complete before returning. If you use this function while |
88 | * holding a resource the IRQ handler may need you will deadlock. | 88 | * holding a resource the IRQ handler may need you will deadlock. |
89 | * | 89 | * |
90 | * This function may be called - with care - from IRQ context. | 90 | * This function may be called - with care - from IRQ context. |
91 | */ | 91 | */ |
92 | void synchronize_irq(unsigned int irq) | 92 | void synchronize_irq(unsigned int irq) |
93 | { | 93 | { |
94 | struct irq_desc *desc = irq_to_desc(irq); | 94 | struct irq_desc *desc = irq_to_desc(irq); |
95 | 95 | ||
96 | if (desc) { | 96 | if (desc) { |
97 | __synchronize_hardirq(desc); | 97 | __synchronize_hardirq(desc); |
98 | /* | 98 | /* |
99 | * We made sure that no hardirq handler is | 99 | * We made sure that no hardirq handler is |
100 | * running. Now verify that no threaded handlers are | 100 | * running. Now verify that no threaded handlers are |
101 | * active. | 101 | * active. |
102 | */ | 102 | */ |
103 | wait_event(desc->wait_for_threads, | 103 | wait_event(desc->wait_for_threads, |
104 | !atomic_read(&desc->threads_active)); | 104 | !atomic_read(&desc->threads_active)); |
105 | } | 105 | } |
106 | } | 106 | } |
107 | EXPORT_SYMBOL(synchronize_irq); | 107 | EXPORT_SYMBOL(synchronize_irq); |
108 | 108 | ||
109 | #ifdef CONFIG_SMP | 109 | #ifdef CONFIG_SMP |
110 | cpumask_var_t irq_default_affinity; | 110 | cpumask_var_t irq_default_affinity; |
111 | 111 | ||
112 | /** | 112 | /** |
113 | * irq_can_set_affinity - Check if the affinity of a given irq can be set | 113 | * irq_can_set_affinity - Check if the affinity of a given irq can be set |
114 | * @irq: Interrupt to check | 114 | * @irq: Interrupt to check |
115 | * | 115 | * |
116 | */ | 116 | */ |
117 | int irq_can_set_affinity(unsigned int irq) | 117 | int irq_can_set_affinity(unsigned int irq) |
118 | { | 118 | { |
119 | struct irq_desc *desc = irq_to_desc(irq); | 119 | struct irq_desc *desc = irq_to_desc(irq); |
120 | 120 | ||
121 | if (!desc || !irqd_can_balance(&desc->irq_data) || | 121 | if (!desc || !irqd_can_balance(&desc->irq_data) || |
122 | !desc->irq_data.chip || !desc->irq_data.chip->irq_set_affinity) | 122 | !desc->irq_data.chip || !desc->irq_data.chip->irq_set_affinity) |
123 | return 0; | 123 | return 0; |
124 | 124 | ||
125 | return 1; | 125 | return 1; |
126 | } | 126 | } |
127 | 127 | ||
128 | /** | 128 | /** |
129 | * irq_set_thread_affinity - Notify irq threads to adjust affinity | 129 | * irq_set_thread_affinity - Notify irq threads to adjust affinity |
130 | * @desc: irq descriptor which has affitnity changed | 130 | * @desc: irq descriptor which has affitnity changed |
131 | * | 131 | * |
132 | * We just set IRQTF_AFFINITY and delegate the affinity setting | 132 | * We just set IRQTF_AFFINITY and delegate the affinity setting |
133 | * to the interrupt thread itself. We can not call | 133 | * to the interrupt thread itself. We can not call |
134 | * set_cpus_allowed_ptr() here as we hold desc->lock and this | 134 | * set_cpus_allowed_ptr() here as we hold desc->lock and this |
135 | * code can be called from hard interrupt context. | 135 | * code can be called from hard interrupt context. |
136 | */ | 136 | */ |
137 | void irq_set_thread_affinity(struct irq_desc *desc) | 137 | void irq_set_thread_affinity(struct irq_desc *desc) |
138 | { | 138 | { |
139 | struct irqaction *action = desc->action; | 139 | struct irqaction *action = desc->action; |
140 | 140 | ||
141 | while (action) { | 141 | while (action) { |
142 | if (action->thread) | 142 | if (action->thread) |
143 | set_bit(IRQTF_AFFINITY, &action->thread_flags); | 143 | set_bit(IRQTF_AFFINITY, &action->thread_flags); |
144 | action = action->next; | 144 | action = action->next; |
145 | } | 145 | } |
146 | } | 146 | } |
147 | 147 | ||
148 | #ifdef CONFIG_GENERIC_PENDING_IRQ | 148 | #ifdef CONFIG_GENERIC_PENDING_IRQ |
149 | static inline bool irq_can_move_pcntxt(struct irq_data *data) | 149 | static inline bool irq_can_move_pcntxt(struct irq_data *data) |
150 | { | 150 | { |
151 | return irqd_can_move_in_process_context(data); | 151 | return irqd_can_move_in_process_context(data); |
152 | } | 152 | } |
153 | static inline bool irq_move_pending(struct irq_data *data) | 153 | static inline bool irq_move_pending(struct irq_data *data) |
154 | { | 154 | { |
155 | return irqd_is_setaffinity_pending(data); | 155 | return irqd_is_setaffinity_pending(data); |
156 | } | 156 | } |
157 | static inline void | 157 | static inline void |
158 | irq_copy_pending(struct irq_desc *desc, const struct cpumask *mask) | 158 | irq_copy_pending(struct irq_desc *desc, const struct cpumask *mask) |
159 | { | 159 | { |
160 | cpumask_copy(desc->pending_mask, mask); | 160 | cpumask_copy(desc->pending_mask, mask); |
161 | } | 161 | } |
162 | static inline void | 162 | static inline void |
163 | irq_get_pending(struct cpumask *mask, struct irq_desc *desc) | 163 | irq_get_pending(struct cpumask *mask, struct irq_desc *desc) |
164 | { | 164 | { |
165 | cpumask_copy(mask, desc->pending_mask); | 165 | cpumask_copy(mask, desc->pending_mask); |
166 | } | 166 | } |
167 | #else | 167 | #else |
168 | static inline bool irq_can_move_pcntxt(struct irq_data *data) { return true; } | 168 | static inline bool irq_can_move_pcntxt(struct irq_data *data) { return true; } |
169 | static inline bool irq_move_pending(struct irq_data *data) { return false; } | 169 | static inline bool irq_move_pending(struct irq_data *data) { return false; } |
170 | static inline void | 170 | static inline void |
171 | irq_copy_pending(struct irq_desc *desc, const struct cpumask *mask) { } | 171 | irq_copy_pending(struct irq_desc *desc, const struct cpumask *mask) { } |
172 | static inline void | 172 | static inline void |
173 | irq_get_pending(struct cpumask *mask, struct irq_desc *desc) { } | 173 | irq_get_pending(struct cpumask *mask, struct irq_desc *desc) { } |
174 | #endif | 174 | #endif |
175 | 175 | ||
176 | int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask, | 176 | int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask, |
177 | bool force) | 177 | bool force) |
178 | { | 178 | { |
179 | struct irq_desc *desc = irq_data_to_desc(data); | 179 | struct irq_desc *desc = irq_data_to_desc(data); |
180 | struct irq_chip *chip = irq_data_get_irq_chip(data); | 180 | struct irq_chip *chip = irq_data_get_irq_chip(data); |
181 | int ret; | 181 | int ret; |
182 | 182 | ||
183 | ret = chip->irq_set_affinity(data, mask, false); | 183 | ret = chip->irq_set_affinity(data, mask, force); |
184 | switch (ret) { | 184 | switch (ret) { |
185 | case IRQ_SET_MASK_OK: | 185 | case IRQ_SET_MASK_OK: |
186 | cpumask_copy(data->affinity, mask); | 186 | cpumask_copy(data->affinity, mask); |
187 | case IRQ_SET_MASK_OK_NOCOPY: | 187 | case IRQ_SET_MASK_OK_NOCOPY: |
188 | irq_set_thread_affinity(desc); | 188 | irq_set_thread_affinity(desc); |
189 | ret = 0; | 189 | ret = 0; |
190 | } | 190 | } |
191 | 191 | ||
192 | return ret; | 192 | return ret; |
193 | } | 193 | } |
194 | 194 | ||
195 | int __irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask) | 195 | int irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask, |
196 | bool force) | ||
196 | { | 197 | { |
197 | struct irq_chip *chip = irq_data_get_irq_chip(data); | 198 | struct irq_chip *chip = irq_data_get_irq_chip(data); |
198 | struct irq_desc *desc = irq_data_to_desc(data); | 199 | struct irq_desc *desc = irq_data_to_desc(data); |
199 | int ret = 0; | 200 | int ret = 0; |
200 | 201 | ||
201 | if (!chip || !chip->irq_set_affinity) | 202 | if (!chip || !chip->irq_set_affinity) |
202 | return -EINVAL; | 203 | return -EINVAL; |
203 | 204 | ||
204 | if (irq_can_move_pcntxt(data)) { | 205 | if (irq_can_move_pcntxt(data)) { |
205 | ret = irq_do_set_affinity(data, mask, false); | 206 | ret = irq_do_set_affinity(data, mask, force); |
206 | } else { | 207 | } else { |
207 | irqd_set_move_pending(data); | 208 | irqd_set_move_pending(data); |
208 | irq_copy_pending(desc, mask); | 209 | irq_copy_pending(desc, mask); |
209 | } | 210 | } |
210 | 211 | ||
211 | if (desc->affinity_notify) { | 212 | if (desc->affinity_notify) { |
212 | kref_get(&desc->affinity_notify->kref); | 213 | kref_get(&desc->affinity_notify->kref); |
213 | schedule_work(&desc->affinity_notify->work); | 214 | schedule_work(&desc->affinity_notify->work); |
214 | } | 215 | } |
215 | irqd_set(data, IRQD_AFFINITY_SET); | 216 | irqd_set(data, IRQD_AFFINITY_SET); |
216 | 217 | ||
217 | return ret; | 218 | return ret; |
218 | } | 219 | } |
219 | 220 | ||
220 | /** | 221 | int __irq_set_affinity(unsigned int irq, const struct cpumask *mask, bool force) |
221 | * irq_set_affinity - Set the irq affinity of a given irq | ||
222 | * @irq: Interrupt to set affinity | ||
223 | * @mask: cpumask | ||
224 | * | ||
225 | */ | ||
226 | int irq_set_affinity(unsigned int irq, const struct cpumask *mask) | ||
227 | { | 222 | { |
228 | struct irq_desc *desc = irq_to_desc(irq); | 223 | struct irq_desc *desc = irq_to_desc(irq); |
229 | unsigned long flags; | 224 | unsigned long flags; |
230 | int ret; | 225 | int ret; |
231 | 226 | ||
232 | if (!desc) | 227 | if (!desc) |
233 | return -EINVAL; | 228 | return -EINVAL; |
234 | 229 | ||
235 | raw_spin_lock_irqsave(&desc->lock, flags); | 230 | raw_spin_lock_irqsave(&desc->lock, flags); |
236 | ret = __irq_set_affinity_locked(irq_desc_get_irq_data(desc), mask); | 231 | ret = irq_set_affinity_locked(irq_desc_get_irq_data(desc), mask, force); |
237 | raw_spin_unlock_irqrestore(&desc->lock, flags); | 232 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
238 | return ret; | 233 | return ret; |
239 | } | 234 | } |
240 | 235 | ||
241 | int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m) | 236 | int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m) |
242 | { | 237 | { |
243 | unsigned long flags; | 238 | unsigned long flags; |
244 | struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL); | 239 | struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL); |
245 | 240 | ||
246 | if (!desc) | 241 | if (!desc) |
247 | return -EINVAL; | 242 | return -EINVAL; |
248 | desc->affinity_hint = m; | 243 | desc->affinity_hint = m; |
249 | irq_put_desc_unlock(desc, flags); | 244 | irq_put_desc_unlock(desc, flags); |
250 | return 0; | 245 | return 0; |
251 | } | 246 | } |
252 | EXPORT_SYMBOL_GPL(irq_set_affinity_hint); | 247 | EXPORT_SYMBOL_GPL(irq_set_affinity_hint); |
253 | 248 | ||
254 | static void irq_affinity_notify(struct work_struct *work) | 249 | static void irq_affinity_notify(struct work_struct *work) |
255 | { | 250 | { |
256 | struct irq_affinity_notify *notify = | 251 | struct irq_affinity_notify *notify = |
257 | container_of(work, struct irq_affinity_notify, work); | 252 | container_of(work, struct irq_affinity_notify, work); |
258 | struct irq_desc *desc = irq_to_desc(notify->irq); | 253 | struct irq_desc *desc = irq_to_desc(notify->irq); |
259 | cpumask_var_t cpumask; | 254 | cpumask_var_t cpumask; |
260 | unsigned long flags; | 255 | unsigned long flags; |
261 | 256 | ||
262 | if (!desc || !alloc_cpumask_var(&cpumask, GFP_KERNEL)) | 257 | if (!desc || !alloc_cpumask_var(&cpumask, GFP_KERNEL)) |
263 | goto out; | 258 | goto out; |
264 | 259 | ||
265 | raw_spin_lock_irqsave(&desc->lock, flags); | 260 | raw_spin_lock_irqsave(&desc->lock, flags); |
266 | if (irq_move_pending(&desc->irq_data)) | 261 | if (irq_move_pending(&desc->irq_data)) |
267 | irq_get_pending(cpumask, desc); | 262 | irq_get_pending(cpumask, desc); |
268 | else | 263 | else |
269 | cpumask_copy(cpumask, desc->irq_data.affinity); | 264 | cpumask_copy(cpumask, desc->irq_data.affinity); |
270 | raw_spin_unlock_irqrestore(&desc->lock, flags); | 265 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
271 | 266 | ||
272 | notify->notify(notify, cpumask); | 267 | notify->notify(notify, cpumask); |
273 | 268 | ||
274 | free_cpumask_var(cpumask); | 269 | free_cpumask_var(cpumask); |
275 | out: | 270 | out: |
276 | kref_put(¬ify->kref, notify->release); | 271 | kref_put(¬ify->kref, notify->release); |
277 | } | 272 | } |
278 | 273 | ||
279 | /** | 274 | /** |
280 | * irq_set_affinity_notifier - control notification of IRQ affinity changes | 275 | * irq_set_affinity_notifier - control notification of IRQ affinity changes |
281 | * @irq: Interrupt for which to enable/disable notification | 276 | * @irq: Interrupt for which to enable/disable notification |
282 | * @notify: Context for notification, or %NULL to disable | 277 | * @notify: Context for notification, or %NULL to disable |
283 | * notification. Function pointers must be initialised; | 278 | * notification. Function pointers must be initialised; |
284 | * the other fields will be initialised by this function. | 279 | * the other fields will be initialised by this function. |
285 | * | 280 | * |
286 | * Must be called in process context. Notification may only be enabled | 281 | * Must be called in process context. Notification may only be enabled |
287 | * after the IRQ is allocated and must be disabled before the IRQ is | 282 | * after the IRQ is allocated and must be disabled before the IRQ is |
288 | * freed using free_irq(). | 283 | * freed using free_irq(). |
289 | */ | 284 | */ |
290 | int | 285 | int |
291 | irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify) | 286 | irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify) |
292 | { | 287 | { |
293 | struct irq_desc *desc = irq_to_desc(irq); | 288 | struct irq_desc *desc = irq_to_desc(irq); |
294 | struct irq_affinity_notify *old_notify; | 289 | struct irq_affinity_notify *old_notify; |
295 | unsigned long flags; | 290 | unsigned long flags; |
296 | 291 | ||
297 | /* The release function is promised process context */ | 292 | /* The release function is promised process context */ |
298 | might_sleep(); | 293 | might_sleep(); |
299 | 294 | ||
300 | if (!desc) | 295 | if (!desc) |
301 | return -EINVAL; | 296 | return -EINVAL; |
302 | 297 | ||
303 | /* Complete initialisation of *notify */ | 298 | /* Complete initialisation of *notify */ |
304 | if (notify) { | 299 | if (notify) { |
305 | notify->irq = irq; | 300 | notify->irq = irq; |
306 | kref_init(¬ify->kref); | 301 | kref_init(¬ify->kref); |
307 | INIT_WORK(¬ify->work, irq_affinity_notify); | 302 | INIT_WORK(¬ify->work, irq_affinity_notify); |
308 | } | 303 | } |
309 | 304 | ||
310 | raw_spin_lock_irqsave(&desc->lock, flags); | 305 | raw_spin_lock_irqsave(&desc->lock, flags); |
311 | old_notify = desc->affinity_notify; | 306 | old_notify = desc->affinity_notify; |
312 | desc->affinity_notify = notify; | 307 | desc->affinity_notify = notify; |
313 | raw_spin_unlock_irqrestore(&desc->lock, flags); | 308 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
314 | 309 | ||
315 | if (old_notify) | 310 | if (old_notify) |
316 | kref_put(&old_notify->kref, old_notify->release); | 311 | kref_put(&old_notify->kref, old_notify->release); |
317 | 312 | ||
318 | return 0; | 313 | return 0; |
319 | } | 314 | } |
320 | EXPORT_SYMBOL_GPL(irq_set_affinity_notifier); | 315 | EXPORT_SYMBOL_GPL(irq_set_affinity_notifier); |
321 | 316 | ||
322 | #ifndef CONFIG_AUTO_IRQ_AFFINITY | 317 | #ifndef CONFIG_AUTO_IRQ_AFFINITY |
323 | /* | 318 | /* |
324 | * Generic version of the affinity autoselector. | 319 | * Generic version of the affinity autoselector. |
325 | */ | 320 | */ |
326 | static int | 321 | static int |
327 | setup_affinity(unsigned int irq, struct irq_desc *desc, struct cpumask *mask) | 322 | setup_affinity(unsigned int irq, struct irq_desc *desc, struct cpumask *mask) |
328 | { | 323 | { |
329 | struct cpumask *set = irq_default_affinity; | 324 | struct cpumask *set = irq_default_affinity; |
330 | int node = desc->irq_data.node; | 325 | int node = desc->irq_data.node; |
331 | 326 | ||
332 | /* Excludes PER_CPU and NO_BALANCE interrupts */ | 327 | /* Excludes PER_CPU and NO_BALANCE interrupts */ |
333 | if (!irq_can_set_affinity(irq)) | 328 | if (!irq_can_set_affinity(irq)) |
334 | return 0; | 329 | return 0; |
335 | 330 | ||
336 | /* | 331 | /* |
337 | * Preserve an userspace affinity setup, but make sure that | 332 | * Preserve an userspace affinity setup, but make sure that |
338 | * one of the targets is online. | 333 | * one of the targets is online. |
339 | */ | 334 | */ |
340 | if (irqd_has_set(&desc->irq_data, IRQD_AFFINITY_SET)) { | 335 | if (irqd_has_set(&desc->irq_data, IRQD_AFFINITY_SET)) { |
341 | if (cpumask_intersects(desc->irq_data.affinity, | 336 | if (cpumask_intersects(desc->irq_data.affinity, |
342 | cpu_online_mask)) | 337 | cpu_online_mask)) |
343 | set = desc->irq_data.affinity; | 338 | set = desc->irq_data.affinity; |
344 | else | 339 | else |
345 | irqd_clear(&desc->irq_data, IRQD_AFFINITY_SET); | 340 | irqd_clear(&desc->irq_data, IRQD_AFFINITY_SET); |
346 | } | 341 | } |
347 | 342 | ||
348 | cpumask_and(mask, cpu_online_mask, set); | 343 | cpumask_and(mask, cpu_online_mask, set); |
349 | if (node != NUMA_NO_NODE) { | 344 | if (node != NUMA_NO_NODE) { |
350 | const struct cpumask *nodemask = cpumask_of_node(node); | 345 | const struct cpumask *nodemask = cpumask_of_node(node); |
351 | 346 | ||
352 | /* make sure at least one of the cpus in nodemask is online */ | 347 | /* make sure at least one of the cpus in nodemask is online */ |
353 | if (cpumask_intersects(mask, nodemask)) | 348 | if (cpumask_intersects(mask, nodemask)) |
354 | cpumask_and(mask, mask, nodemask); | 349 | cpumask_and(mask, mask, nodemask); |
355 | } | 350 | } |
356 | irq_do_set_affinity(&desc->irq_data, mask, false); | 351 | irq_do_set_affinity(&desc->irq_data, mask, false); |
357 | return 0; | 352 | return 0; |
358 | } | 353 | } |
359 | #else | 354 | #else |
360 | static inline int | 355 | static inline int |
361 | setup_affinity(unsigned int irq, struct irq_desc *d, struct cpumask *mask) | 356 | setup_affinity(unsigned int irq, struct irq_desc *d, struct cpumask *mask) |
362 | { | 357 | { |
363 | return irq_select_affinity(irq); | 358 | return irq_select_affinity(irq); |
364 | } | 359 | } |
365 | #endif | 360 | #endif |
366 | 361 | ||
367 | /* | 362 | /* |
368 | * Called when affinity is set via /proc/irq | 363 | * Called when affinity is set via /proc/irq |
369 | */ | 364 | */ |
370 | int irq_select_affinity_usr(unsigned int irq, struct cpumask *mask) | 365 | int irq_select_affinity_usr(unsigned int irq, struct cpumask *mask) |
371 | { | 366 | { |
372 | struct irq_desc *desc = irq_to_desc(irq); | 367 | struct irq_desc *desc = irq_to_desc(irq); |
373 | unsigned long flags; | 368 | unsigned long flags; |
374 | int ret; | 369 | int ret; |
375 | 370 | ||
376 | raw_spin_lock_irqsave(&desc->lock, flags); | 371 | raw_spin_lock_irqsave(&desc->lock, flags); |
377 | ret = setup_affinity(irq, desc, mask); | 372 | ret = setup_affinity(irq, desc, mask); |
378 | raw_spin_unlock_irqrestore(&desc->lock, flags); | 373 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
379 | return ret; | 374 | return ret; |
380 | } | 375 | } |
381 | 376 | ||
382 | #else | 377 | #else |
383 | static inline int | 378 | static inline int |
384 | setup_affinity(unsigned int irq, struct irq_desc *desc, struct cpumask *mask) | 379 | setup_affinity(unsigned int irq, struct irq_desc *desc, struct cpumask *mask) |
385 | { | 380 | { |
386 | return 0; | 381 | return 0; |
387 | } | 382 | } |
388 | #endif | 383 | #endif |
389 | 384 | ||
390 | void __disable_irq(struct irq_desc *desc, unsigned int irq, bool suspend) | 385 | void __disable_irq(struct irq_desc *desc, unsigned int irq, bool suspend) |
391 | { | 386 | { |
392 | if (suspend) { | 387 | if (suspend) { |
393 | if (!desc->action || (desc->action->flags & IRQF_NO_SUSPEND)) | 388 | if (!desc->action || (desc->action->flags & IRQF_NO_SUSPEND)) |
394 | return; | 389 | return; |
395 | desc->istate |= IRQS_SUSPENDED; | 390 | desc->istate |= IRQS_SUSPENDED; |
396 | } | 391 | } |
397 | 392 | ||
398 | if (!desc->depth++) | 393 | if (!desc->depth++) |
399 | irq_disable(desc); | 394 | irq_disable(desc); |
400 | } | 395 | } |
401 | 396 | ||
402 | static int __disable_irq_nosync(unsigned int irq) | 397 | static int __disable_irq_nosync(unsigned int irq) |
403 | { | 398 | { |
404 | unsigned long flags; | 399 | unsigned long flags; |
405 | struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL); | 400 | struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL); |
406 | 401 | ||
407 | if (!desc) | 402 | if (!desc) |
408 | return -EINVAL; | 403 | return -EINVAL; |
409 | __disable_irq(desc, irq, false); | 404 | __disable_irq(desc, irq, false); |
410 | irq_put_desc_busunlock(desc, flags); | 405 | irq_put_desc_busunlock(desc, flags); |
411 | return 0; | 406 | return 0; |
412 | } | 407 | } |
413 | 408 | ||
414 | /** | 409 | /** |
415 | * disable_irq_nosync - disable an irq without waiting | 410 | * disable_irq_nosync - disable an irq without waiting |
416 | * @irq: Interrupt to disable | 411 | * @irq: Interrupt to disable |
417 | * | 412 | * |
418 | * Disable the selected interrupt line. Disables and Enables are | 413 | * Disable the selected interrupt line. Disables and Enables are |
419 | * nested. | 414 | * nested. |
420 | * Unlike disable_irq(), this function does not ensure existing | 415 | * Unlike disable_irq(), this function does not ensure existing |
421 | * instances of the IRQ handler have completed before returning. | 416 | * instances of the IRQ handler have completed before returning. |
422 | * | 417 | * |
423 | * This function may be called from IRQ context. | 418 | * This function may be called from IRQ context. |
424 | */ | 419 | */ |
425 | void disable_irq_nosync(unsigned int irq) | 420 | void disable_irq_nosync(unsigned int irq) |
426 | { | 421 | { |
427 | __disable_irq_nosync(irq); | 422 | __disable_irq_nosync(irq); |
428 | } | 423 | } |
429 | EXPORT_SYMBOL(disable_irq_nosync); | 424 | EXPORT_SYMBOL(disable_irq_nosync); |
430 | 425 | ||
431 | /** | 426 | /** |
432 | * disable_irq - disable an irq and wait for completion | 427 | * disable_irq - disable an irq and wait for completion |
433 | * @irq: Interrupt to disable | 428 | * @irq: Interrupt to disable |
434 | * | 429 | * |
435 | * Disable the selected interrupt line. Enables and Disables are | 430 | * Disable the selected interrupt line. Enables and Disables are |
436 | * nested. | 431 | * nested. |
437 | * This function waits for any pending IRQ handlers for this interrupt | 432 | * This function waits for any pending IRQ handlers for this interrupt |
438 | * to complete before returning. If you use this function while | 433 | * to complete before returning. If you use this function while |
439 | * holding a resource the IRQ handler may need you will deadlock. | 434 | * holding a resource the IRQ handler may need you will deadlock. |
440 | * | 435 | * |
441 | * This function may be called - with care - from IRQ context. | 436 | * This function may be called - with care - from IRQ context. |
442 | */ | 437 | */ |
443 | void disable_irq(unsigned int irq) | 438 | void disable_irq(unsigned int irq) |
444 | { | 439 | { |
445 | if (!__disable_irq_nosync(irq)) | 440 | if (!__disable_irq_nosync(irq)) |
446 | synchronize_irq(irq); | 441 | synchronize_irq(irq); |
447 | } | 442 | } |
448 | EXPORT_SYMBOL(disable_irq); | 443 | EXPORT_SYMBOL(disable_irq); |
449 | 444 | ||
450 | void __enable_irq(struct irq_desc *desc, unsigned int irq, bool resume) | 445 | void __enable_irq(struct irq_desc *desc, unsigned int irq, bool resume) |
451 | { | 446 | { |
452 | if (resume) { | 447 | if (resume) { |
453 | if (!(desc->istate & IRQS_SUSPENDED)) { | 448 | if (!(desc->istate & IRQS_SUSPENDED)) { |
454 | if (!desc->action) | 449 | if (!desc->action) |
455 | return; | 450 | return; |
456 | if (!(desc->action->flags & IRQF_FORCE_RESUME)) | 451 | if (!(desc->action->flags & IRQF_FORCE_RESUME)) |
457 | return; | 452 | return; |
458 | /* Pretend that it got disabled ! */ | 453 | /* Pretend that it got disabled ! */ |
459 | desc->depth++; | 454 | desc->depth++; |
460 | } | 455 | } |
461 | desc->istate &= ~IRQS_SUSPENDED; | 456 | desc->istate &= ~IRQS_SUSPENDED; |
462 | } | 457 | } |
463 | 458 | ||
464 | switch (desc->depth) { | 459 | switch (desc->depth) { |
465 | case 0: | 460 | case 0: |
466 | err_out: | 461 | err_out: |
467 | WARN(1, KERN_WARNING "Unbalanced enable for IRQ %d\n", irq); | 462 | WARN(1, KERN_WARNING "Unbalanced enable for IRQ %d\n", irq); |
468 | break; | 463 | break; |
469 | case 1: { | 464 | case 1: { |
470 | if (desc->istate & IRQS_SUSPENDED) | 465 | if (desc->istate & IRQS_SUSPENDED) |
471 | goto err_out; | 466 | goto err_out; |
472 | /* Prevent probing on this irq: */ | 467 | /* Prevent probing on this irq: */ |
473 | irq_settings_set_noprobe(desc); | 468 | irq_settings_set_noprobe(desc); |
474 | irq_enable(desc); | 469 | irq_enable(desc); |
475 | check_irq_resend(desc, irq); | 470 | check_irq_resend(desc, irq); |
476 | /* fall-through */ | 471 | /* fall-through */ |
477 | } | 472 | } |
478 | default: | 473 | default: |
479 | desc->depth--; | 474 | desc->depth--; |
480 | } | 475 | } |
481 | } | 476 | } |
482 | 477 | ||
483 | /** | 478 | /** |
484 | * enable_irq - enable handling of an irq | 479 | * enable_irq - enable handling of an irq |
485 | * @irq: Interrupt to enable | 480 | * @irq: Interrupt to enable |
486 | * | 481 | * |
487 | * Undoes the effect of one call to disable_irq(). If this | 482 | * Undoes the effect of one call to disable_irq(). If this |
488 | * matches the last disable, processing of interrupts on this | 483 | * matches the last disable, processing of interrupts on this |
489 | * IRQ line is re-enabled. | 484 | * IRQ line is re-enabled. |
490 | * | 485 | * |
491 | * This function may be called from IRQ context only when | 486 | * This function may be called from IRQ context only when |
492 | * desc->irq_data.chip->bus_lock and desc->chip->bus_sync_unlock are NULL ! | 487 | * desc->irq_data.chip->bus_lock and desc->chip->bus_sync_unlock are NULL ! |
493 | */ | 488 | */ |
494 | void enable_irq(unsigned int irq) | 489 | void enable_irq(unsigned int irq) |
495 | { | 490 | { |
496 | unsigned long flags; | 491 | unsigned long flags; |
497 | struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL); | 492 | struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL); |
498 | 493 | ||
499 | if (!desc) | 494 | if (!desc) |
500 | return; | 495 | return; |
501 | if (WARN(!desc->irq_data.chip, | 496 | if (WARN(!desc->irq_data.chip, |
502 | KERN_ERR "enable_irq before setup/request_irq: irq %u\n", irq)) | 497 | KERN_ERR "enable_irq before setup/request_irq: irq %u\n", irq)) |
503 | goto out; | 498 | goto out; |
504 | 499 | ||
505 | __enable_irq(desc, irq, false); | 500 | __enable_irq(desc, irq, false); |
506 | out: | 501 | out: |
507 | irq_put_desc_busunlock(desc, flags); | 502 | irq_put_desc_busunlock(desc, flags); |
508 | } | 503 | } |
509 | EXPORT_SYMBOL(enable_irq); | 504 | EXPORT_SYMBOL(enable_irq); |
510 | 505 | ||
511 | static int set_irq_wake_real(unsigned int irq, unsigned int on) | 506 | static int set_irq_wake_real(unsigned int irq, unsigned int on) |
512 | { | 507 | { |
513 | struct irq_desc *desc = irq_to_desc(irq); | 508 | struct irq_desc *desc = irq_to_desc(irq); |
514 | int ret = -ENXIO; | 509 | int ret = -ENXIO; |
515 | 510 | ||
516 | if (irq_desc_get_chip(desc)->flags & IRQCHIP_SKIP_SET_WAKE) | 511 | if (irq_desc_get_chip(desc)->flags & IRQCHIP_SKIP_SET_WAKE) |
517 | return 0; | 512 | return 0; |
518 | 513 | ||
519 | if (desc->irq_data.chip->irq_set_wake) | 514 | if (desc->irq_data.chip->irq_set_wake) |
520 | ret = desc->irq_data.chip->irq_set_wake(&desc->irq_data, on); | 515 | ret = desc->irq_data.chip->irq_set_wake(&desc->irq_data, on); |
521 | 516 | ||
522 | return ret; | 517 | return ret; |
523 | } | 518 | } |
524 | 519 | ||
525 | /** | 520 | /** |
526 | * irq_set_irq_wake - control irq power management wakeup | 521 | * irq_set_irq_wake - control irq power management wakeup |
527 | * @irq: interrupt to control | 522 | * @irq: interrupt to control |
528 | * @on: enable/disable power management wakeup | 523 | * @on: enable/disable power management wakeup |
529 | * | 524 | * |
530 | * Enable/disable power management wakeup mode, which is | 525 | * Enable/disable power management wakeup mode, which is |
531 | * disabled by default. Enables and disables must match, | 526 | * disabled by default. Enables and disables must match, |
532 | * just as they match for non-wakeup mode support. | 527 | * just as they match for non-wakeup mode support. |
533 | * | 528 | * |
534 | * Wakeup mode lets this IRQ wake the system from sleep | 529 | * Wakeup mode lets this IRQ wake the system from sleep |
535 | * states like "suspend to RAM". | 530 | * states like "suspend to RAM". |
536 | */ | 531 | */ |
537 | int irq_set_irq_wake(unsigned int irq, unsigned int on) | 532 | int irq_set_irq_wake(unsigned int irq, unsigned int on) |
538 | { | 533 | { |
539 | unsigned long flags; | 534 | unsigned long flags; |
540 | struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL); | 535 | struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL); |
541 | int ret = 0; | 536 | int ret = 0; |
542 | 537 | ||
543 | if (!desc) | 538 | if (!desc) |
544 | return -EINVAL; | 539 | return -EINVAL; |
545 | 540 | ||
546 | /* wakeup-capable irqs can be shared between drivers that | 541 | /* wakeup-capable irqs can be shared between drivers that |
547 | * don't need to have the same sleep mode behaviors. | 542 | * don't need to have the same sleep mode behaviors. |
548 | */ | 543 | */ |
549 | if (on) { | 544 | if (on) { |
550 | if (desc->wake_depth++ == 0) { | 545 | if (desc->wake_depth++ == 0) { |
551 | ret = set_irq_wake_real(irq, on); | 546 | ret = set_irq_wake_real(irq, on); |
552 | if (ret) | 547 | if (ret) |
553 | desc->wake_depth = 0; | 548 | desc->wake_depth = 0; |
554 | else | 549 | else |
555 | irqd_set(&desc->irq_data, IRQD_WAKEUP_STATE); | 550 | irqd_set(&desc->irq_data, IRQD_WAKEUP_STATE); |
556 | } | 551 | } |
557 | } else { | 552 | } else { |
558 | if (desc->wake_depth == 0) { | 553 | if (desc->wake_depth == 0) { |
559 | WARN(1, "Unbalanced IRQ %d wake disable\n", irq); | 554 | WARN(1, "Unbalanced IRQ %d wake disable\n", irq); |
560 | } else if (--desc->wake_depth == 0) { | 555 | } else if (--desc->wake_depth == 0) { |
561 | ret = set_irq_wake_real(irq, on); | 556 | ret = set_irq_wake_real(irq, on); |
562 | if (ret) | 557 | if (ret) |
563 | desc->wake_depth = 1; | 558 | desc->wake_depth = 1; |
564 | else | 559 | else |
565 | irqd_clear(&desc->irq_data, IRQD_WAKEUP_STATE); | 560 | irqd_clear(&desc->irq_data, IRQD_WAKEUP_STATE); |
566 | } | 561 | } |
567 | } | 562 | } |
568 | irq_put_desc_busunlock(desc, flags); | 563 | irq_put_desc_busunlock(desc, flags); |
569 | return ret; | 564 | return ret; |
570 | } | 565 | } |
571 | EXPORT_SYMBOL(irq_set_irq_wake); | 566 | EXPORT_SYMBOL(irq_set_irq_wake); |
572 | 567 | ||
573 | /* | 568 | /* |
574 | * Internal function that tells the architecture code whether a | 569 | * Internal function that tells the architecture code whether a |
575 | * particular irq has been exclusively allocated or is available | 570 | * particular irq has been exclusively allocated or is available |
576 | * for driver use. | 571 | * for driver use. |
577 | */ | 572 | */ |
578 | int can_request_irq(unsigned int irq, unsigned long irqflags) | 573 | int can_request_irq(unsigned int irq, unsigned long irqflags) |
579 | { | 574 | { |
580 | unsigned long flags; | 575 | unsigned long flags; |
581 | struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0); | 576 | struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0); |
582 | int canrequest = 0; | 577 | int canrequest = 0; |
583 | 578 | ||
584 | if (!desc) | 579 | if (!desc) |
585 | return 0; | 580 | return 0; |
586 | 581 | ||
587 | if (irq_settings_can_request(desc)) { | 582 | if (irq_settings_can_request(desc)) { |
588 | if (!desc->action || | 583 | if (!desc->action || |
589 | irqflags & desc->action->flags & IRQF_SHARED) | 584 | irqflags & desc->action->flags & IRQF_SHARED) |
590 | canrequest = 1; | 585 | canrequest = 1; |
591 | } | 586 | } |
592 | irq_put_desc_unlock(desc, flags); | 587 | irq_put_desc_unlock(desc, flags); |
593 | return canrequest; | 588 | return canrequest; |
594 | } | 589 | } |
595 | 590 | ||
596 | int __irq_set_trigger(struct irq_desc *desc, unsigned int irq, | 591 | int __irq_set_trigger(struct irq_desc *desc, unsigned int irq, |
597 | unsigned long flags) | 592 | unsigned long flags) |
598 | { | 593 | { |
599 | struct irq_chip *chip = desc->irq_data.chip; | 594 | struct irq_chip *chip = desc->irq_data.chip; |
600 | int ret, unmask = 0; | 595 | int ret, unmask = 0; |
601 | 596 | ||
602 | if (!chip || !chip->irq_set_type) { | 597 | if (!chip || !chip->irq_set_type) { |
603 | /* | 598 | /* |
604 | * IRQF_TRIGGER_* but the PIC does not support multiple | 599 | * IRQF_TRIGGER_* but the PIC does not support multiple |
605 | * flow-types? | 600 | * flow-types? |
606 | */ | 601 | */ |
607 | pr_debug("No set_type function for IRQ %d (%s)\n", irq, | 602 | pr_debug("No set_type function for IRQ %d (%s)\n", irq, |
608 | chip ? (chip->name ? : "unknown") : "unknown"); | 603 | chip ? (chip->name ? : "unknown") : "unknown"); |
609 | return 0; | 604 | return 0; |
610 | } | 605 | } |
611 | 606 | ||
612 | flags &= IRQ_TYPE_SENSE_MASK; | 607 | flags &= IRQ_TYPE_SENSE_MASK; |
613 | 608 | ||
614 | if (chip->flags & IRQCHIP_SET_TYPE_MASKED) { | 609 | if (chip->flags & IRQCHIP_SET_TYPE_MASKED) { |
615 | if (!irqd_irq_masked(&desc->irq_data)) | 610 | if (!irqd_irq_masked(&desc->irq_data)) |
616 | mask_irq(desc); | 611 | mask_irq(desc); |
617 | if (!irqd_irq_disabled(&desc->irq_data)) | 612 | if (!irqd_irq_disabled(&desc->irq_data)) |
618 | unmask = 1; | 613 | unmask = 1; |
619 | } | 614 | } |
620 | 615 | ||
621 | /* caller masked out all except trigger mode flags */ | 616 | /* caller masked out all except trigger mode flags */ |
622 | ret = chip->irq_set_type(&desc->irq_data, flags); | 617 | ret = chip->irq_set_type(&desc->irq_data, flags); |
623 | 618 | ||
624 | switch (ret) { | 619 | switch (ret) { |
625 | case IRQ_SET_MASK_OK: | 620 | case IRQ_SET_MASK_OK: |
626 | irqd_clear(&desc->irq_data, IRQD_TRIGGER_MASK); | 621 | irqd_clear(&desc->irq_data, IRQD_TRIGGER_MASK); |
627 | irqd_set(&desc->irq_data, flags); | 622 | irqd_set(&desc->irq_data, flags); |
628 | 623 | ||
629 | case IRQ_SET_MASK_OK_NOCOPY: | 624 | case IRQ_SET_MASK_OK_NOCOPY: |
630 | flags = irqd_get_trigger_type(&desc->irq_data); | 625 | flags = irqd_get_trigger_type(&desc->irq_data); |
631 | irq_settings_set_trigger_mask(desc, flags); | 626 | irq_settings_set_trigger_mask(desc, flags); |
632 | irqd_clear(&desc->irq_data, IRQD_LEVEL); | 627 | irqd_clear(&desc->irq_data, IRQD_LEVEL); |
633 | irq_settings_clr_level(desc); | 628 | irq_settings_clr_level(desc); |
634 | if (flags & IRQ_TYPE_LEVEL_MASK) { | 629 | if (flags & IRQ_TYPE_LEVEL_MASK) { |
635 | irq_settings_set_level(desc); | 630 | irq_settings_set_level(desc); |
636 | irqd_set(&desc->irq_data, IRQD_LEVEL); | 631 | irqd_set(&desc->irq_data, IRQD_LEVEL); |
637 | } | 632 | } |
638 | 633 | ||
639 | ret = 0; | 634 | ret = 0; |
640 | break; | 635 | break; |
641 | default: | 636 | default: |
642 | pr_err("Setting trigger mode %lu for irq %u failed (%pF)\n", | 637 | pr_err("Setting trigger mode %lu for irq %u failed (%pF)\n", |
643 | flags, irq, chip->irq_set_type); | 638 | flags, irq, chip->irq_set_type); |
644 | } | 639 | } |
645 | if (unmask) | 640 | if (unmask) |
646 | unmask_irq(desc); | 641 | unmask_irq(desc); |
647 | return ret; | 642 | return ret; |
648 | } | 643 | } |
649 | 644 | ||
650 | #ifdef CONFIG_HARDIRQS_SW_RESEND | 645 | #ifdef CONFIG_HARDIRQS_SW_RESEND |
651 | int irq_set_parent(int irq, int parent_irq) | 646 | int irq_set_parent(int irq, int parent_irq) |
652 | { | 647 | { |
653 | unsigned long flags; | 648 | unsigned long flags; |
654 | struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0); | 649 | struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0); |
655 | 650 | ||
656 | if (!desc) | 651 | if (!desc) |
657 | return -EINVAL; | 652 | return -EINVAL; |
658 | 653 | ||
659 | desc->parent_irq = parent_irq; | 654 | desc->parent_irq = parent_irq; |
660 | 655 | ||
661 | irq_put_desc_unlock(desc, flags); | 656 | irq_put_desc_unlock(desc, flags); |
662 | return 0; | 657 | return 0; |
663 | } | 658 | } |
664 | #endif | 659 | #endif |
665 | 660 | ||
666 | /* | 661 | /* |
667 | * Default primary interrupt handler for threaded interrupts. Is | 662 | * Default primary interrupt handler for threaded interrupts. Is |
668 | * assigned as primary handler when request_threaded_irq is called | 663 | * assigned as primary handler when request_threaded_irq is called |
669 | * with handler == NULL. Useful for oneshot interrupts. | 664 | * with handler == NULL. Useful for oneshot interrupts. |
670 | */ | 665 | */ |
671 | static irqreturn_t irq_default_primary_handler(int irq, void *dev_id) | 666 | static irqreturn_t irq_default_primary_handler(int irq, void *dev_id) |
672 | { | 667 | { |
673 | return IRQ_WAKE_THREAD; | 668 | return IRQ_WAKE_THREAD; |
674 | } | 669 | } |
675 | 670 | ||
676 | /* | 671 | /* |
677 | * Primary handler for nested threaded interrupts. Should never be | 672 | * Primary handler for nested threaded interrupts. Should never be |
678 | * called. | 673 | * called. |
679 | */ | 674 | */ |
680 | static irqreturn_t irq_nested_primary_handler(int irq, void *dev_id) | 675 | static irqreturn_t irq_nested_primary_handler(int irq, void *dev_id) |
681 | { | 676 | { |
682 | WARN(1, "Primary handler called for nested irq %d\n", irq); | 677 | WARN(1, "Primary handler called for nested irq %d\n", irq); |
683 | return IRQ_NONE; | 678 | return IRQ_NONE; |
684 | } | 679 | } |
685 | 680 | ||
686 | static int irq_wait_for_interrupt(struct irqaction *action) | 681 | static int irq_wait_for_interrupt(struct irqaction *action) |
687 | { | 682 | { |
688 | set_current_state(TASK_INTERRUPTIBLE); | 683 | set_current_state(TASK_INTERRUPTIBLE); |
689 | 684 | ||
690 | while (!kthread_should_stop()) { | 685 | while (!kthread_should_stop()) { |
691 | 686 | ||
692 | if (test_and_clear_bit(IRQTF_RUNTHREAD, | 687 | if (test_and_clear_bit(IRQTF_RUNTHREAD, |
693 | &action->thread_flags)) { | 688 | &action->thread_flags)) { |
694 | __set_current_state(TASK_RUNNING); | 689 | __set_current_state(TASK_RUNNING); |
695 | return 0; | 690 | return 0; |
696 | } | 691 | } |
697 | schedule(); | 692 | schedule(); |
698 | set_current_state(TASK_INTERRUPTIBLE); | 693 | set_current_state(TASK_INTERRUPTIBLE); |
699 | } | 694 | } |
700 | __set_current_state(TASK_RUNNING); | 695 | __set_current_state(TASK_RUNNING); |
701 | return -1; | 696 | return -1; |
702 | } | 697 | } |
703 | 698 | ||
704 | /* | 699 | /* |
705 | * Oneshot interrupts keep the irq line masked until the threaded | 700 | * Oneshot interrupts keep the irq line masked until the threaded |
706 | * handler finished. unmask if the interrupt has not been disabled and | 701 | * handler finished. unmask if the interrupt has not been disabled and |
707 | * is marked MASKED. | 702 | * is marked MASKED. |
708 | */ | 703 | */ |
709 | static void irq_finalize_oneshot(struct irq_desc *desc, | 704 | static void irq_finalize_oneshot(struct irq_desc *desc, |
710 | struct irqaction *action) | 705 | struct irqaction *action) |
711 | { | 706 | { |
712 | if (!(desc->istate & IRQS_ONESHOT)) | 707 | if (!(desc->istate & IRQS_ONESHOT)) |
713 | return; | 708 | return; |
714 | again: | 709 | again: |
715 | chip_bus_lock(desc); | 710 | chip_bus_lock(desc); |
716 | raw_spin_lock_irq(&desc->lock); | 711 | raw_spin_lock_irq(&desc->lock); |
717 | 712 | ||
718 | /* | 713 | /* |
719 | * Implausible though it may be we need to protect us against | 714 | * Implausible though it may be we need to protect us against |
720 | * the following scenario: | 715 | * the following scenario: |
721 | * | 716 | * |
722 | * The thread is faster done than the hard interrupt handler | 717 | * The thread is faster done than the hard interrupt handler |
723 | * on the other CPU. If we unmask the irq line then the | 718 | * on the other CPU. If we unmask the irq line then the |
724 | * interrupt can come in again and masks the line, leaves due | 719 | * interrupt can come in again and masks the line, leaves due |
725 | * to IRQS_INPROGRESS and the irq line is masked forever. | 720 | * to IRQS_INPROGRESS and the irq line is masked forever. |
726 | * | 721 | * |
727 | * This also serializes the state of shared oneshot handlers | 722 | * This also serializes the state of shared oneshot handlers |
728 | * versus "desc->threads_onehsot |= action->thread_mask;" in | 723 | * versus "desc->threads_onehsot |= action->thread_mask;" in |
729 | * irq_wake_thread(). See the comment there which explains the | 724 | * irq_wake_thread(). See the comment there which explains the |
730 | * serialization. | 725 | * serialization. |
731 | */ | 726 | */ |
732 | if (unlikely(irqd_irq_inprogress(&desc->irq_data))) { | 727 | if (unlikely(irqd_irq_inprogress(&desc->irq_data))) { |
733 | raw_spin_unlock_irq(&desc->lock); | 728 | raw_spin_unlock_irq(&desc->lock); |
734 | chip_bus_sync_unlock(desc); | 729 | chip_bus_sync_unlock(desc); |
735 | cpu_relax(); | 730 | cpu_relax(); |
736 | goto again; | 731 | goto again; |
737 | } | 732 | } |
738 | 733 | ||
739 | /* | 734 | /* |
740 | * Now check again, whether the thread should run. Otherwise | 735 | * Now check again, whether the thread should run. Otherwise |
741 | * we would clear the threads_oneshot bit of this thread which | 736 | * we would clear the threads_oneshot bit of this thread which |
742 | * was just set. | 737 | * was just set. |
743 | */ | 738 | */ |
744 | if (test_bit(IRQTF_RUNTHREAD, &action->thread_flags)) | 739 | if (test_bit(IRQTF_RUNTHREAD, &action->thread_flags)) |
745 | goto out_unlock; | 740 | goto out_unlock; |
746 | 741 | ||
747 | desc->threads_oneshot &= ~action->thread_mask; | 742 | desc->threads_oneshot &= ~action->thread_mask; |
748 | 743 | ||
749 | if (!desc->threads_oneshot && !irqd_irq_disabled(&desc->irq_data) && | 744 | if (!desc->threads_oneshot && !irqd_irq_disabled(&desc->irq_data) && |
750 | irqd_irq_masked(&desc->irq_data)) | 745 | irqd_irq_masked(&desc->irq_data)) |
751 | unmask_threaded_irq(desc); | 746 | unmask_threaded_irq(desc); |
752 | 747 | ||
753 | out_unlock: | 748 | out_unlock: |
754 | raw_spin_unlock_irq(&desc->lock); | 749 | raw_spin_unlock_irq(&desc->lock); |
755 | chip_bus_sync_unlock(desc); | 750 | chip_bus_sync_unlock(desc); |
756 | } | 751 | } |
757 | 752 | ||
758 | #ifdef CONFIG_SMP | 753 | #ifdef CONFIG_SMP |
759 | /* | 754 | /* |
760 | * Check whether we need to change the affinity of the interrupt thread. | 755 | * Check whether we need to change the affinity of the interrupt thread. |
761 | */ | 756 | */ |
762 | static void | 757 | static void |
763 | irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) | 758 | irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) |
764 | { | 759 | { |
765 | cpumask_var_t mask; | 760 | cpumask_var_t mask; |
766 | bool valid = true; | 761 | bool valid = true; |
767 | 762 | ||
768 | if (!test_and_clear_bit(IRQTF_AFFINITY, &action->thread_flags)) | 763 | if (!test_and_clear_bit(IRQTF_AFFINITY, &action->thread_flags)) |
769 | return; | 764 | return; |
770 | 765 | ||
771 | /* | 766 | /* |
772 | * In case we are out of memory we set IRQTF_AFFINITY again and | 767 | * In case we are out of memory we set IRQTF_AFFINITY again and |
773 | * try again next time | 768 | * try again next time |
774 | */ | 769 | */ |
775 | if (!alloc_cpumask_var(&mask, GFP_KERNEL)) { | 770 | if (!alloc_cpumask_var(&mask, GFP_KERNEL)) { |
776 | set_bit(IRQTF_AFFINITY, &action->thread_flags); | 771 | set_bit(IRQTF_AFFINITY, &action->thread_flags); |
777 | return; | 772 | return; |
778 | } | 773 | } |
779 | 774 | ||
780 | raw_spin_lock_irq(&desc->lock); | 775 | raw_spin_lock_irq(&desc->lock); |
781 | /* | 776 | /* |
782 | * This code is triggered unconditionally. Check the affinity | 777 | * This code is triggered unconditionally. Check the affinity |
783 | * mask pointer. For CPU_MASK_OFFSTACK=n this is optimized out. | 778 | * mask pointer. For CPU_MASK_OFFSTACK=n this is optimized out. |
784 | */ | 779 | */ |
785 | if (desc->irq_data.affinity) | 780 | if (desc->irq_data.affinity) |
786 | cpumask_copy(mask, desc->irq_data.affinity); | 781 | cpumask_copy(mask, desc->irq_data.affinity); |
787 | else | 782 | else |
788 | valid = false; | 783 | valid = false; |
789 | raw_spin_unlock_irq(&desc->lock); | 784 | raw_spin_unlock_irq(&desc->lock); |
790 | 785 | ||
791 | if (valid) | 786 | if (valid) |
792 | set_cpus_allowed_ptr(current, mask); | 787 | set_cpus_allowed_ptr(current, mask); |
793 | free_cpumask_var(mask); | 788 | free_cpumask_var(mask); |
794 | } | 789 | } |
795 | #else | 790 | #else |
796 | static inline void | 791 | static inline void |
797 | irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) { } | 792 | irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) { } |
798 | #endif | 793 | #endif |
799 | 794 | ||
800 | /* | 795 | /* |
801 | * Interrupts which are not explicitely requested as threaded | 796 | * Interrupts which are not explicitely requested as threaded |
802 | * interrupts rely on the implicit bh/preempt disable of the hard irq | 797 | * interrupts rely on the implicit bh/preempt disable of the hard irq |
803 | * context. So we need to disable bh here to avoid deadlocks and other | 798 | * context. So we need to disable bh here to avoid deadlocks and other |
804 | * side effects. | 799 | * side effects. |
805 | */ | 800 | */ |
806 | static irqreturn_t | 801 | static irqreturn_t |
807 | irq_forced_thread_fn(struct irq_desc *desc, struct irqaction *action) | 802 | irq_forced_thread_fn(struct irq_desc *desc, struct irqaction *action) |
808 | { | 803 | { |
809 | irqreturn_t ret; | 804 | irqreturn_t ret; |
810 | 805 | ||
811 | local_bh_disable(); | 806 | local_bh_disable(); |
812 | ret = action->thread_fn(action->irq, action->dev_id); | 807 | ret = action->thread_fn(action->irq, action->dev_id); |
813 | irq_finalize_oneshot(desc, action); | 808 | irq_finalize_oneshot(desc, action); |
814 | local_bh_enable(); | 809 | local_bh_enable(); |
815 | return ret; | 810 | return ret; |
816 | } | 811 | } |
817 | 812 | ||
818 | /* | 813 | /* |
819 | * Interrupts explicitly requested as threaded interrupts want to be | 814 | * Interrupts explicitly requested as threaded interrupts want to be |
820 | * preemtible - many of them need to sleep and wait for slow busses to | 815 | * preemtible - many of them need to sleep and wait for slow busses to |
821 | * complete. | 816 | * complete. |
822 | */ | 817 | */ |
823 | static irqreturn_t irq_thread_fn(struct irq_desc *desc, | 818 | static irqreturn_t irq_thread_fn(struct irq_desc *desc, |
824 | struct irqaction *action) | 819 | struct irqaction *action) |
825 | { | 820 | { |
826 | irqreturn_t ret; | 821 | irqreturn_t ret; |
827 | 822 | ||
828 | ret = action->thread_fn(action->irq, action->dev_id); | 823 | ret = action->thread_fn(action->irq, action->dev_id); |
829 | irq_finalize_oneshot(desc, action); | 824 | irq_finalize_oneshot(desc, action); |
830 | return ret; | 825 | return ret; |
831 | } | 826 | } |
832 | 827 | ||
833 | static void wake_threads_waitq(struct irq_desc *desc) | 828 | static void wake_threads_waitq(struct irq_desc *desc) |
834 | { | 829 | { |
835 | if (atomic_dec_and_test(&desc->threads_active)) | 830 | if (atomic_dec_and_test(&desc->threads_active)) |
836 | wake_up(&desc->wait_for_threads); | 831 | wake_up(&desc->wait_for_threads); |
837 | } | 832 | } |
838 | 833 | ||
839 | static void irq_thread_dtor(struct callback_head *unused) | 834 | static void irq_thread_dtor(struct callback_head *unused) |
840 | { | 835 | { |
841 | struct task_struct *tsk = current; | 836 | struct task_struct *tsk = current; |
842 | struct irq_desc *desc; | 837 | struct irq_desc *desc; |
843 | struct irqaction *action; | 838 | struct irqaction *action; |
844 | 839 | ||
845 | if (WARN_ON_ONCE(!(current->flags & PF_EXITING))) | 840 | if (WARN_ON_ONCE(!(current->flags & PF_EXITING))) |
846 | return; | 841 | return; |
847 | 842 | ||
848 | action = kthread_data(tsk); | 843 | action = kthread_data(tsk); |
849 | 844 | ||
850 | pr_err("exiting task \"%s\" (%d) is an active IRQ thread (irq %d)\n", | 845 | pr_err("exiting task \"%s\" (%d) is an active IRQ thread (irq %d)\n", |
851 | tsk->comm, tsk->pid, action->irq); | 846 | tsk->comm, tsk->pid, action->irq); |
852 | 847 | ||
853 | 848 | ||
854 | desc = irq_to_desc(action->irq); | 849 | desc = irq_to_desc(action->irq); |
855 | /* | 850 | /* |
856 | * If IRQTF_RUNTHREAD is set, we need to decrement | 851 | * If IRQTF_RUNTHREAD is set, we need to decrement |
857 | * desc->threads_active and wake possible waiters. | 852 | * desc->threads_active and wake possible waiters. |
858 | */ | 853 | */ |
859 | if (test_and_clear_bit(IRQTF_RUNTHREAD, &action->thread_flags)) | 854 | if (test_and_clear_bit(IRQTF_RUNTHREAD, &action->thread_flags)) |
860 | wake_threads_waitq(desc); | 855 | wake_threads_waitq(desc); |
861 | 856 | ||
862 | /* Prevent a stale desc->threads_oneshot */ | 857 | /* Prevent a stale desc->threads_oneshot */ |
863 | irq_finalize_oneshot(desc, action); | 858 | irq_finalize_oneshot(desc, action); |
864 | } | 859 | } |
865 | 860 | ||
866 | /* | 861 | /* |
867 | * Interrupt handler thread | 862 | * Interrupt handler thread |
868 | */ | 863 | */ |
869 | static int irq_thread(void *data) | 864 | static int irq_thread(void *data) |
870 | { | 865 | { |
871 | struct callback_head on_exit_work; | 866 | struct callback_head on_exit_work; |
872 | struct irqaction *action = data; | 867 | struct irqaction *action = data; |
873 | struct irq_desc *desc = irq_to_desc(action->irq); | 868 | struct irq_desc *desc = irq_to_desc(action->irq); |
874 | irqreturn_t (*handler_fn)(struct irq_desc *desc, | 869 | irqreturn_t (*handler_fn)(struct irq_desc *desc, |
875 | struct irqaction *action); | 870 | struct irqaction *action); |
876 | 871 | ||
877 | if (force_irqthreads && test_bit(IRQTF_FORCED_THREAD, | 872 | if (force_irqthreads && test_bit(IRQTF_FORCED_THREAD, |
878 | &action->thread_flags)) | 873 | &action->thread_flags)) |
879 | handler_fn = irq_forced_thread_fn; | 874 | handler_fn = irq_forced_thread_fn; |
880 | else | 875 | else |
881 | handler_fn = irq_thread_fn; | 876 | handler_fn = irq_thread_fn; |
882 | 877 | ||
883 | init_task_work(&on_exit_work, irq_thread_dtor); | 878 | init_task_work(&on_exit_work, irq_thread_dtor); |
884 | task_work_add(current, &on_exit_work, false); | 879 | task_work_add(current, &on_exit_work, false); |
885 | 880 | ||
886 | irq_thread_check_affinity(desc, action); | 881 | irq_thread_check_affinity(desc, action); |
887 | 882 | ||
888 | while (!irq_wait_for_interrupt(action)) { | 883 | while (!irq_wait_for_interrupt(action)) { |
889 | irqreturn_t action_ret; | 884 | irqreturn_t action_ret; |
890 | 885 | ||
891 | irq_thread_check_affinity(desc, action); | 886 | irq_thread_check_affinity(desc, action); |
892 | 887 | ||
893 | action_ret = handler_fn(desc, action); | 888 | action_ret = handler_fn(desc, action); |
894 | if (!noirqdebug) | 889 | if (!noirqdebug) |
895 | note_interrupt(action->irq, desc, action_ret); | 890 | note_interrupt(action->irq, desc, action_ret); |
896 | 891 | ||
897 | wake_threads_waitq(desc); | 892 | wake_threads_waitq(desc); |
898 | } | 893 | } |
899 | 894 | ||
900 | /* | 895 | /* |
901 | * This is the regular exit path. __free_irq() is stopping the | 896 | * This is the regular exit path. __free_irq() is stopping the |
902 | * thread via kthread_stop() after calling | 897 | * thread via kthread_stop() after calling |
903 | * synchronize_irq(). So neither IRQTF_RUNTHREAD nor the | 898 | * synchronize_irq(). So neither IRQTF_RUNTHREAD nor the |
904 | * oneshot mask bit can be set. We cannot verify that as we | 899 | * oneshot mask bit can be set. We cannot verify that as we |
905 | * cannot touch the oneshot mask at this point anymore as | 900 | * cannot touch the oneshot mask at this point anymore as |
906 | * __setup_irq() might have given out currents thread_mask | 901 | * __setup_irq() might have given out currents thread_mask |
907 | * again. | 902 | * again. |
908 | */ | 903 | */ |
909 | task_work_cancel(current, irq_thread_dtor); | 904 | task_work_cancel(current, irq_thread_dtor); |
910 | return 0; | 905 | return 0; |
911 | } | 906 | } |
912 | 907 | ||
913 | /** | 908 | /** |
914 | * irq_wake_thread - wake the irq thread for the action identified by dev_id | 909 | * irq_wake_thread - wake the irq thread for the action identified by dev_id |
915 | * @irq: Interrupt line | 910 | * @irq: Interrupt line |
916 | * @dev_id: Device identity for which the thread should be woken | 911 | * @dev_id: Device identity for which the thread should be woken |
917 | * | 912 | * |
918 | */ | 913 | */ |
919 | void irq_wake_thread(unsigned int irq, void *dev_id) | 914 | void irq_wake_thread(unsigned int irq, void *dev_id) |
920 | { | 915 | { |
921 | struct irq_desc *desc = irq_to_desc(irq); | 916 | struct irq_desc *desc = irq_to_desc(irq); |
922 | struct irqaction *action; | 917 | struct irqaction *action; |
923 | unsigned long flags; | 918 | unsigned long flags; |
924 | 919 | ||
925 | if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc))) | 920 | if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc))) |
926 | return; | 921 | return; |
927 | 922 | ||
928 | raw_spin_lock_irqsave(&desc->lock, flags); | 923 | raw_spin_lock_irqsave(&desc->lock, flags); |
929 | for (action = desc->action; action; action = action->next) { | 924 | for (action = desc->action; action; action = action->next) { |
930 | if (action->dev_id == dev_id) { | 925 | if (action->dev_id == dev_id) { |
931 | if (action->thread) | 926 | if (action->thread) |
932 | __irq_wake_thread(desc, action); | 927 | __irq_wake_thread(desc, action); |
933 | break; | 928 | break; |
934 | } | 929 | } |
935 | } | 930 | } |
936 | raw_spin_unlock_irqrestore(&desc->lock, flags); | 931 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
937 | } | 932 | } |
938 | EXPORT_SYMBOL_GPL(irq_wake_thread); | 933 | EXPORT_SYMBOL_GPL(irq_wake_thread); |
939 | 934 | ||
940 | static void irq_setup_forced_threading(struct irqaction *new) | 935 | static void irq_setup_forced_threading(struct irqaction *new) |
941 | { | 936 | { |
942 | if (!force_irqthreads) | 937 | if (!force_irqthreads) |
943 | return; | 938 | return; |
944 | if (new->flags & (IRQF_NO_THREAD | IRQF_PERCPU | IRQF_ONESHOT)) | 939 | if (new->flags & (IRQF_NO_THREAD | IRQF_PERCPU | IRQF_ONESHOT)) |
945 | return; | 940 | return; |
946 | 941 | ||
947 | new->flags |= IRQF_ONESHOT; | 942 | new->flags |= IRQF_ONESHOT; |
948 | 943 | ||
949 | if (!new->thread_fn) { | 944 | if (!new->thread_fn) { |
950 | set_bit(IRQTF_FORCED_THREAD, &new->thread_flags); | 945 | set_bit(IRQTF_FORCED_THREAD, &new->thread_flags); |
951 | new->thread_fn = new->handler; | 946 | new->thread_fn = new->handler; |
952 | new->handler = irq_default_primary_handler; | 947 | new->handler = irq_default_primary_handler; |
953 | } | 948 | } |
954 | } | 949 | } |
955 | 950 | ||
956 | static int irq_request_resources(struct irq_desc *desc) | 951 | static int irq_request_resources(struct irq_desc *desc) |
957 | { | 952 | { |
958 | struct irq_data *d = &desc->irq_data; | 953 | struct irq_data *d = &desc->irq_data; |
959 | struct irq_chip *c = d->chip; | 954 | struct irq_chip *c = d->chip; |
960 | 955 | ||
961 | return c->irq_request_resources ? c->irq_request_resources(d) : 0; | 956 | return c->irq_request_resources ? c->irq_request_resources(d) : 0; |
962 | } | 957 | } |
963 | 958 | ||
964 | static void irq_release_resources(struct irq_desc *desc) | 959 | static void irq_release_resources(struct irq_desc *desc) |
965 | { | 960 | { |
966 | struct irq_data *d = &desc->irq_data; | 961 | struct irq_data *d = &desc->irq_data; |
967 | struct irq_chip *c = d->chip; | 962 | struct irq_chip *c = d->chip; |
968 | 963 | ||
969 | if (c->irq_release_resources) | 964 | if (c->irq_release_resources) |
970 | c->irq_release_resources(d); | 965 | c->irq_release_resources(d); |
971 | } | 966 | } |
972 | 967 | ||
973 | /* | 968 | /* |
974 | * Internal function to register an irqaction - typically used to | 969 | * Internal function to register an irqaction - typically used to |
975 | * allocate special interrupts that are part of the architecture. | 970 | * allocate special interrupts that are part of the architecture. |
976 | */ | 971 | */ |
977 | static int | 972 | static int |
978 | __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new) | 973 | __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new) |
979 | { | 974 | { |
980 | struct irqaction *old, **old_ptr; | 975 | struct irqaction *old, **old_ptr; |
981 | unsigned long flags, thread_mask = 0; | 976 | unsigned long flags, thread_mask = 0; |
982 | int ret, nested, shared = 0; | 977 | int ret, nested, shared = 0; |
983 | cpumask_var_t mask; | 978 | cpumask_var_t mask; |
984 | 979 | ||
985 | if (!desc) | 980 | if (!desc) |
986 | return -EINVAL; | 981 | return -EINVAL; |
987 | 982 | ||
988 | if (desc->irq_data.chip == &no_irq_chip) | 983 | if (desc->irq_data.chip == &no_irq_chip) |
989 | return -ENOSYS; | 984 | return -ENOSYS; |
990 | if (!try_module_get(desc->owner)) | 985 | if (!try_module_get(desc->owner)) |
991 | return -ENODEV; | 986 | return -ENODEV; |
992 | 987 | ||
993 | /* | 988 | /* |
994 | * Check whether the interrupt nests into another interrupt | 989 | * Check whether the interrupt nests into another interrupt |
995 | * thread. | 990 | * thread. |
996 | */ | 991 | */ |
997 | nested = irq_settings_is_nested_thread(desc); | 992 | nested = irq_settings_is_nested_thread(desc); |
998 | if (nested) { | 993 | if (nested) { |
999 | if (!new->thread_fn) { | 994 | if (!new->thread_fn) { |
1000 | ret = -EINVAL; | 995 | ret = -EINVAL; |
1001 | goto out_mput; | 996 | goto out_mput; |
1002 | } | 997 | } |
1003 | /* | 998 | /* |
1004 | * Replace the primary handler which was provided from | 999 | * Replace the primary handler which was provided from |
1005 | * the driver for non nested interrupt handling by the | 1000 | * the driver for non nested interrupt handling by the |
1006 | * dummy function which warns when called. | 1001 | * dummy function which warns when called. |
1007 | */ | 1002 | */ |
1008 | new->handler = irq_nested_primary_handler; | 1003 | new->handler = irq_nested_primary_handler; |
1009 | } else { | 1004 | } else { |
1010 | if (irq_settings_can_thread(desc)) | 1005 | if (irq_settings_can_thread(desc)) |
1011 | irq_setup_forced_threading(new); | 1006 | irq_setup_forced_threading(new); |
1012 | } | 1007 | } |
1013 | 1008 | ||
1014 | /* | 1009 | /* |
1015 | * Create a handler thread when a thread function is supplied | 1010 | * Create a handler thread when a thread function is supplied |
1016 | * and the interrupt does not nest into another interrupt | 1011 | * and the interrupt does not nest into another interrupt |
1017 | * thread. | 1012 | * thread. |
1018 | */ | 1013 | */ |
1019 | if (new->thread_fn && !nested) { | 1014 | if (new->thread_fn && !nested) { |
1020 | struct task_struct *t; | 1015 | struct task_struct *t; |
1021 | static const struct sched_param param = { | 1016 | static const struct sched_param param = { |
1022 | .sched_priority = MAX_USER_RT_PRIO/2, | 1017 | .sched_priority = MAX_USER_RT_PRIO/2, |
1023 | }; | 1018 | }; |
1024 | 1019 | ||
1025 | t = kthread_create(irq_thread, new, "irq/%d-%s", irq, | 1020 | t = kthread_create(irq_thread, new, "irq/%d-%s", irq, |
1026 | new->name); | 1021 | new->name); |
1027 | if (IS_ERR(t)) { | 1022 | if (IS_ERR(t)) { |
1028 | ret = PTR_ERR(t); | 1023 | ret = PTR_ERR(t); |
1029 | goto out_mput; | 1024 | goto out_mput; |
1030 | } | 1025 | } |
1031 | 1026 | ||
1032 | sched_setscheduler_nocheck(t, SCHED_FIFO, ¶m); | 1027 | sched_setscheduler_nocheck(t, SCHED_FIFO, ¶m); |
1033 | 1028 | ||
1034 | /* | 1029 | /* |
1035 | * We keep the reference to the task struct even if | 1030 | * We keep the reference to the task struct even if |
1036 | * the thread dies to avoid that the interrupt code | 1031 | * the thread dies to avoid that the interrupt code |
1037 | * references an already freed task_struct. | 1032 | * references an already freed task_struct. |
1038 | */ | 1033 | */ |
1039 | get_task_struct(t); | 1034 | get_task_struct(t); |
1040 | new->thread = t; | 1035 | new->thread = t; |
1041 | /* | 1036 | /* |
1042 | * Tell the thread to set its affinity. This is | 1037 | * Tell the thread to set its affinity. This is |
1043 | * important for shared interrupt handlers as we do | 1038 | * important for shared interrupt handlers as we do |
1044 | * not invoke setup_affinity() for the secondary | 1039 | * not invoke setup_affinity() for the secondary |
1045 | * handlers as everything is already set up. Even for | 1040 | * handlers as everything is already set up. Even for |
1046 | * interrupts marked with IRQF_NO_BALANCE this is | 1041 | * interrupts marked with IRQF_NO_BALANCE this is |
1047 | * correct as we want the thread to move to the cpu(s) | 1042 | * correct as we want the thread to move to the cpu(s) |
1048 | * on which the requesting code placed the interrupt. | 1043 | * on which the requesting code placed the interrupt. |
1049 | */ | 1044 | */ |
1050 | set_bit(IRQTF_AFFINITY, &new->thread_flags); | 1045 | set_bit(IRQTF_AFFINITY, &new->thread_flags); |
1051 | } | 1046 | } |
1052 | 1047 | ||
1053 | if (!alloc_cpumask_var(&mask, GFP_KERNEL)) { | 1048 | if (!alloc_cpumask_var(&mask, GFP_KERNEL)) { |
1054 | ret = -ENOMEM; | 1049 | ret = -ENOMEM; |
1055 | goto out_thread; | 1050 | goto out_thread; |
1056 | } | 1051 | } |
1057 | 1052 | ||
1058 | /* | 1053 | /* |
1059 | * Drivers are often written to work w/o knowledge about the | 1054 | * Drivers are often written to work w/o knowledge about the |
1060 | * underlying irq chip implementation, so a request for a | 1055 | * underlying irq chip implementation, so a request for a |
1061 | * threaded irq without a primary hard irq context handler | 1056 | * threaded irq without a primary hard irq context handler |
1062 | * requires the ONESHOT flag to be set. Some irq chips like | 1057 | * requires the ONESHOT flag to be set. Some irq chips like |
1063 | * MSI based interrupts are per se one shot safe. Check the | 1058 | * MSI based interrupts are per se one shot safe. Check the |
1064 | * chip flags, so we can avoid the unmask dance at the end of | 1059 | * chip flags, so we can avoid the unmask dance at the end of |
1065 | * the threaded handler for those. | 1060 | * the threaded handler for those. |
1066 | */ | 1061 | */ |
1067 | if (desc->irq_data.chip->flags & IRQCHIP_ONESHOT_SAFE) | 1062 | if (desc->irq_data.chip->flags & IRQCHIP_ONESHOT_SAFE) |
1068 | new->flags &= ~IRQF_ONESHOT; | 1063 | new->flags &= ~IRQF_ONESHOT; |
1069 | 1064 | ||
1070 | /* | 1065 | /* |
1071 | * The following block of code has to be executed atomically | 1066 | * The following block of code has to be executed atomically |
1072 | */ | 1067 | */ |
1073 | raw_spin_lock_irqsave(&desc->lock, flags); | 1068 | raw_spin_lock_irqsave(&desc->lock, flags); |
1074 | old_ptr = &desc->action; | 1069 | old_ptr = &desc->action; |
1075 | old = *old_ptr; | 1070 | old = *old_ptr; |
1076 | if (old) { | 1071 | if (old) { |
1077 | /* | 1072 | /* |
1078 | * Can't share interrupts unless both agree to and are | 1073 | * Can't share interrupts unless both agree to and are |
1079 | * the same type (level, edge, polarity). So both flag | 1074 | * the same type (level, edge, polarity). So both flag |
1080 | * fields must have IRQF_SHARED set and the bits which | 1075 | * fields must have IRQF_SHARED set and the bits which |
1081 | * set the trigger type must match. Also all must | 1076 | * set the trigger type must match. Also all must |
1082 | * agree on ONESHOT. | 1077 | * agree on ONESHOT. |
1083 | */ | 1078 | */ |
1084 | if (!((old->flags & new->flags) & IRQF_SHARED) || | 1079 | if (!((old->flags & new->flags) & IRQF_SHARED) || |
1085 | ((old->flags ^ new->flags) & IRQF_TRIGGER_MASK) || | 1080 | ((old->flags ^ new->flags) & IRQF_TRIGGER_MASK) || |
1086 | ((old->flags ^ new->flags) & IRQF_ONESHOT)) | 1081 | ((old->flags ^ new->flags) & IRQF_ONESHOT)) |
1087 | goto mismatch; | 1082 | goto mismatch; |
1088 | 1083 | ||
1089 | /* All handlers must agree on per-cpuness */ | 1084 | /* All handlers must agree on per-cpuness */ |
1090 | if ((old->flags & IRQF_PERCPU) != | 1085 | if ((old->flags & IRQF_PERCPU) != |
1091 | (new->flags & IRQF_PERCPU)) | 1086 | (new->flags & IRQF_PERCPU)) |
1092 | goto mismatch; | 1087 | goto mismatch; |
1093 | 1088 | ||
1094 | /* add new interrupt at end of irq queue */ | 1089 | /* add new interrupt at end of irq queue */ |
1095 | do { | 1090 | do { |
1096 | /* | 1091 | /* |
1097 | * Or all existing action->thread_mask bits, | 1092 | * Or all existing action->thread_mask bits, |
1098 | * so we can find the next zero bit for this | 1093 | * so we can find the next zero bit for this |
1099 | * new action. | 1094 | * new action. |
1100 | */ | 1095 | */ |
1101 | thread_mask |= old->thread_mask; | 1096 | thread_mask |= old->thread_mask; |
1102 | old_ptr = &old->next; | 1097 | old_ptr = &old->next; |
1103 | old = *old_ptr; | 1098 | old = *old_ptr; |
1104 | } while (old); | 1099 | } while (old); |
1105 | shared = 1; | 1100 | shared = 1; |
1106 | } | 1101 | } |
1107 | 1102 | ||
1108 | /* | 1103 | /* |
1109 | * Setup the thread mask for this irqaction for ONESHOT. For | 1104 | * Setup the thread mask for this irqaction for ONESHOT. For |
1110 | * !ONESHOT irqs the thread mask is 0 so we can avoid a | 1105 | * !ONESHOT irqs the thread mask is 0 so we can avoid a |
1111 | * conditional in irq_wake_thread(). | 1106 | * conditional in irq_wake_thread(). |
1112 | */ | 1107 | */ |
1113 | if (new->flags & IRQF_ONESHOT) { | 1108 | if (new->flags & IRQF_ONESHOT) { |
1114 | /* | 1109 | /* |
1115 | * Unlikely to have 32 resp 64 irqs sharing one line, | 1110 | * Unlikely to have 32 resp 64 irqs sharing one line, |
1116 | * but who knows. | 1111 | * but who knows. |
1117 | */ | 1112 | */ |
1118 | if (thread_mask == ~0UL) { | 1113 | if (thread_mask == ~0UL) { |
1119 | ret = -EBUSY; | 1114 | ret = -EBUSY; |
1120 | goto out_mask; | 1115 | goto out_mask; |
1121 | } | 1116 | } |
1122 | /* | 1117 | /* |
1123 | * The thread_mask for the action is or'ed to | 1118 | * The thread_mask for the action is or'ed to |
1124 | * desc->thread_active to indicate that the | 1119 | * desc->thread_active to indicate that the |
1125 | * IRQF_ONESHOT thread handler has been woken, but not | 1120 | * IRQF_ONESHOT thread handler has been woken, but not |
1126 | * yet finished. The bit is cleared when a thread | 1121 | * yet finished. The bit is cleared when a thread |
1127 | * completes. When all threads of a shared interrupt | 1122 | * completes. When all threads of a shared interrupt |
1128 | * line have completed desc->threads_active becomes | 1123 | * line have completed desc->threads_active becomes |
1129 | * zero and the interrupt line is unmasked. See | 1124 | * zero and the interrupt line is unmasked. See |
1130 | * handle.c:irq_wake_thread() for further information. | 1125 | * handle.c:irq_wake_thread() for further information. |
1131 | * | 1126 | * |
1132 | * If no thread is woken by primary (hard irq context) | 1127 | * If no thread is woken by primary (hard irq context) |
1133 | * interrupt handlers, then desc->threads_active is | 1128 | * interrupt handlers, then desc->threads_active is |
1134 | * also checked for zero to unmask the irq line in the | 1129 | * also checked for zero to unmask the irq line in the |
1135 | * affected hard irq flow handlers | 1130 | * affected hard irq flow handlers |
1136 | * (handle_[fasteoi|level]_irq). | 1131 | * (handle_[fasteoi|level]_irq). |
1137 | * | 1132 | * |
1138 | * The new action gets the first zero bit of | 1133 | * The new action gets the first zero bit of |
1139 | * thread_mask assigned. See the loop above which or's | 1134 | * thread_mask assigned. See the loop above which or's |
1140 | * all existing action->thread_mask bits. | 1135 | * all existing action->thread_mask bits. |
1141 | */ | 1136 | */ |
1142 | new->thread_mask = 1 << ffz(thread_mask); | 1137 | new->thread_mask = 1 << ffz(thread_mask); |
1143 | 1138 | ||
1144 | } else if (new->handler == irq_default_primary_handler && | 1139 | } else if (new->handler == irq_default_primary_handler && |
1145 | !(desc->irq_data.chip->flags & IRQCHIP_ONESHOT_SAFE)) { | 1140 | !(desc->irq_data.chip->flags & IRQCHIP_ONESHOT_SAFE)) { |
1146 | /* | 1141 | /* |
1147 | * The interrupt was requested with handler = NULL, so | 1142 | * The interrupt was requested with handler = NULL, so |
1148 | * we use the default primary handler for it. But it | 1143 | * we use the default primary handler for it. But it |
1149 | * does not have the oneshot flag set. In combination | 1144 | * does not have the oneshot flag set. In combination |
1150 | * with level interrupts this is deadly, because the | 1145 | * with level interrupts this is deadly, because the |
1151 | * default primary handler just wakes the thread, then | 1146 | * default primary handler just wakes the thread, then |
1152 | * the irq lines is reenabled, but the device still | 1147 | * the irq lines is reenabled, but the device still |
1153 | * has the level irq asserted. Rinse and repeat.... | 1148 | * has the level irq asserted. Rinse and repeat.... |
1154 | * | 1149 | * |
1155 | * While this works for edge type interrupts, we play | 1150 | * While this works for edge type interrupts, we play |
1156 | * it safe and reject unconditionally because we can't | 1151 | * it safe and reject unconditionally because we can't |
1157 | * say for sure which type this interrupt really | 1152 | * say for sure which type this interrupt really |
1158 | * has. The type flags are unreliable as the | 1153 | * has. The type flags are unreliable as the |
1159 | * underlying chip implementation can override them. | 1154 | * underlying chip implementation can override them. |
1160 | */ | 1155 | */ |
1161 | pr_err("Threaded irq requested with handler=NULL and !ONESHOT for irq %d\n", | 1156 | pr_err("Threaded irq requested with handler=NULL and !ONESHOT for irq %d\n", |
1162 | irq); | 1157 | irq); |
1163 | ret = -EINVAL; | 1158 | ret = -EINVAL; |
1164 | goto out_mask; | 1159 | goto out_mask; |
1165 | } | 1160 | } |
1166 | 1161 | ||
1167 | if (!shared) { | 1162 | if (!shared) { |
1168 | ret = irq_request_resources(desc); | 1163 | ret = irq_request_resources(desc); |
1169 | if (ret) { | 1164 | if (ret) { |
1170 | pr_err("Failed to request resources for %s (irq %d) on irqchip %s\n", | 1165 | pr_err("Failed to request resources for %s (irq %d) on irqchip %s\n", |
1171 | new->name, irq, desc->irq_data.chip->name); | 1166 | new->name, irq, desc->irq_data.chip->name); |
1172 | goto out_mask; | 1167 | goto out_mask; |
1173 | } | 1168 | } |
1174 | 1169 | ||
1175 | init_waitqueue_head(&desc->wait_for_threads); | 1170 | init_waitqueue_head(&desc->wait_for_threads); |
1176 | 1171 | ||
1177 | /* Setup the type (level, edge polarity) if configured: */ | 1172 | /* Setup the type (level, edge polarity) if configured: */ |
1178 | if (new->flags & IRQF_TRIGGER_MASK) { | 1173 | if (new->flags & IRQF_TRIGGER_MASK) { |
1179 | ret = __irq_set_trigger(desc, irq, | 1174 | ret = __irq_set_trigger(desc, irq, |
1180 | new->flags & IRQF_TRIGGER_MASK); | 1175 | new->flags & IRQF_TRIGGER_MASK); |
1181 | 1176 | ||
1182 | if (ret) | 1177 | if (ret) |
1183 | goto out_mask; | 1178 | goto out_mask; |
1184 | } | 1179 | } |
1185 | 1180 | ||
1186 | desc->istate &= ~(IRQS_AUTODETECT | IRQS_SPURIOUS_DISABLED | \ | 1181 | desc->istate &= ~(IRQS_AUTODETECT | IRQS_SPURIOUS_DISABLED | \ |
1187 | IRQS_ONESHOT | IRQS_WAITING); | 1182 | IRQS_ONESHOT | IRQS_WAITING); |
1188 | irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS); | 1183 | irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS); |
1189 | 1184 | ||
1190 | if (new->flags & IRQF_PERCPU) { | 1185 | if (new->flags & IRQF_PERCPU) { |
1191 | irqd_set(&desc->irq_data, IRQD_PER_CPU); | 1186 | irqd_set(&desc->irq_data, IRQD_PER_CPU); |
1192 | irq_settings_set_per_cpu(desc); | 1187 | irq_settings_set_per_cpu(desc); |
1193 | } | 1188 | } |
1194 | 1189 | ||
1195 | if (new->flags & IRQF_ONESHOT) | 1190 | if (new->flags & IRQF_ONESHOT) |
1196 | desc->istate |= IRQS_ONESHOT; | 1191 | desc->istate |= IRQS_ONESHOT; |
1197 | 1192 | ||
1198 | if (irq_settings_can_autoenable(desc)) | 1193 | if (irq_settings_can_autoenable(desc)) |
1199 | irq_startup(desc, true); | 1194 | irq_startup(desc, true); |
1200 | else | 1195 | else |
1201 | /* Undo nested disables: */ | 1196 | /* Undo nested disables: */ |
1202 | desc->depth = 1; | 1197 | desc->depth = 1; |
1203 | 1198 | ||
1204 | /* Exclude IRQ from balancing if requested */ | 1199 | /* Exclude IRQ from balancing if requested */ |
1205 | if (new->flags & IRQF_NOBALANCING) { | 1200 | if (new->flags & IRQF_NOBALANCING) { |
1206 | irq_settings_set_no_balancing(desc); | 1201 | irq_settings_set_no_balancing(desc); |
1207 | irqd_set(&desc->irq_data, IRQD_NO_BALANCING); | 1202 | irqd_set(&desc->irq_data, IRQD_NO_BALANCING); |
1208 | } | 1203 | } |
1209 | 1204 | ||
1210 | /* Set default affinity mask once everything is setup */ | 1205 | /* Set default affinity mask once everything is setup */ |
1211 | setup_affinity(irq, desc, mask); | 1206 | setup_affinity(irq, desc, mask); |
1212 | 1207 | ||
1213 | } else if (new->flags & IRQF_TRIGGER_MASK) { | 1208 | } else if (new->flags & IRQF_TRIGGER_MASK) { |
1214 | unsigned int nmsk = new->flags & IRQF_TRIGGER_MASK; | 1209 | unsigned int nmsk = new->flags & IRQF_TRIGGER_MASK; |
1215 | unsigned int omsk = irq_settings_get_trigger_mask(desc); | 1210 | unsigned int omsk = irq_settings_get_trigger_mask(desc); |
1216 | 1211 | ||
1217 | if (nmsk != omsk) | 1212 | if (nmsk != omsk) |
1218 | /* hope the handler works with current trigger mode */ | 1213 | /* hope the handler works with current trigger mode */ |
1219 | pr_warning("irq %d uses trigger mode %u; requested %u\n", | 1214 | pr_warning("irq %d uses trigger mode %u; requested %u\n", |
1220 | irq, nmsk, omsk); | 1215 | irq, nmsk, omsk); |
1221 | } | 1216 | } |
1222 | 1217 | ||
1223 | new->irq = irq; | 1218 | new->irq = irq; |
1224 | *old_ptr = new; | 1219 | *old_ptr = new; |
1225 | 1220 | ||
1226 | /* Reset broken irq detection when installing new handler */ | 1221 | /* Reset broken irq detection when installing new handler */ |
1227 | desc->irq_count = 0; | 1222 | desc->irq_count = 0; |
1228 | desc->irqs_unhandled = 0; | 1223 | desc->irqs_unhandled = 0; |
1229 | 1224 | ||
1230 | /* | 1225 | /* |
1231 | * Check whether we disabled the irq via the spurious handler | 1226 | * Check whether we disabled the irq via the spurious handler |
1232 | * before. Reenable it and give it another chance. | 1227 | * before. Reenable it and give it another chance. |
1233 | */ | 1228 | */ |
1234 | if (shared && (desc->istate & IRQS_SPURIOUS_DISABLED)) { | 1229 | if (shared && (desc->istate & IRQS_SPURIOUS_DISABLED)) { |
1235 | desc->istate &= ~IRQS_SPURIOUS_DISABLED; | 1230 | desc->istate &= ~IRQS_SPURIOUS_DISABLED; |
1236 | __enable_irq(desc, irq, false); | 1231 | __enable_irq(desc, irq, false); |
1237 | } | 1232 | } |
1238 | 1233 | ||
1239 | raw_spin_unlock_irqrestore(&desc->lock, flags); | 1234 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
1240 | 1235 | ||
1241 | /* | 1236 | /* |
1242 | * Strictly no need to wake it up, but hung_task complains | 1237 | * Strictly no need to wake it up, but hung_task complains |
1243 | * when no hard interrupt wakes the thread up. | 1238 | * when no hard interrupt wakes the thread up. |
1244 | */ | 1239 | */ |
1245 | if (new->thread) | 1240 | if (new->thread) |
1246 | wake_up_process(new->thread); | 1241 | wake_up_process(new->thread); |
1247 | 1242 | ||
1248 | register_irq_proc(irq, desc); | 1243 | register_irq_proc(irq, desc); |
1249 | new->dir = NULL; | 1244 | new->dir = NULL; |
1250 | register_handler_proc(irq, new); | 1245 | register_handler_proc(irq, new); |
1251 | free_cpumask_var(mask); | 1246 | free_cpumask_var(mask); |
1252 | 1247 | ||
1253 | return 0; | 1248 | return 0; |
1254 | 1249 | ||
1255 | mismatch: | 1250 | mismatch: |
1256 | if (!(new->flags & IRQF_PROBE_SHARED)) { | 1251 | if (!(new->flags & IRQF_PROBE_SHARED)) { |
1257 | pr_err("Flags mismatch irq %d. %08x (%s) vs. %08x (%s)\n", | 1252 | pr_err("Flags mismatch irq %d. %08x (%s) vs. %08x (%s)\n", |
1258 | irq, new->flags, new->name, old->flags, old->name); | 1253 | irq, new->flags, new->name, old->flags, old->name); |
1259 | #ifdef CONFIG_DEBUG_SHIRQ | 1254 | #ifdef CONFIG_DEBUG_SHIRQ |
1260 | dump_stack(); | 1255 | dump_stack(); |
1261 | #endif | 1256 | #endif |
1262 | } | 1257 | } |
1263 | ret = -EBUSY; | 1258 | ret = -EBUSY; |
1264 | 1259 | ||
1265 | out_mask: | 1260 | out_mask: |
1266 | raw_spin_unlock_irqrestore(&desc->lock, flags); | 1261 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
1267 | free_cpumask_var(mask); | 1262 | free_cpumask_var(mask); |
1268 | 1263 | ||
1269 | out_thread: | 1264 | out_thread: |
1270 | if (new->thread) { | 1265 | if (new->thread) { |
1271 | struct task_struct *t = new->thread; | 1266 | struct task_struct *t = new->thread; |
1272 | 1267 | ||
1273 | new->thread = NULL; | 1268 | new->thread = NULL; |
1274 | kthread_stop(t); | 1269 | kthread_stop(t); |
1275 | put_task_struct(t); | 1270 | put_task_struct(t); |
1276 | } | 1271 | } |
1277 | out_mput: | 1272 | out_mput: |
1278 | module_put(desc->owner); | 1273 | module_put(desc->owner); |
1279 | return ret; | 1274 | return ret; |
1280 | } | 1275 | } |
1281 | 1276 | ||
1282 | /** | 1277 | /** |
1283 | * setup_irq - setup an interrupt | 1278 | * setup_irq - setup an interrupt |
1284 | * @irq: Interrupt line to setup | 1279 | * @irq: Interrupt line to setup |
1285 | * @act: irqaction for the interrupt | 1280 | * @act: irqaction for the interrupt |
1286 | * | 1281 | * |
1287 | * Used to statically setup interrupts in the early boot process. | 1282 | * Used to statically setup interrupts in the early boot process. |
1288 | */ | 1283 | */ |
1289 | int setup_irq(unsigned int irq, struct irqaction *act) | 1284 | int setup_irq(unsigned int irq, struct irqaction *act) |
1290 | { | 1285 | { |
1291 | int retval; | 1286 | int retval; |
1292 | struct irq_desc *desc = irq_to_desc(irq); | 1287 | struct irq_desc *desc = irq_to_desc(irq); |
1293 | 1288 | ||
1294 | if (WARN_ON(irq_settings_is_per_cpu_devid(desc))) | 1289 | if (WARN_ON(irq_settings_is_per_cpu_devid(desc))) |
1295 | return -EINVAL; | 1290 | return -EINVAL; |
1296 | chip_bus_lock(desc); | 1291 | chip_bus_lock(desc); |
1297 | retval = __setup_irq(irq, desc, act); | 1292 | retval = __setup_irq(irq, desc, act); |
1298 | chip_bus_sync_unlock(desc); | 1293 | chip_bus_sync_unlock(desc); |
1299 | 1294 | ||
1300 | return retval; | 1295 | return retval; |
1301 | } | 1296 | } |
1302 | EXPORT_SYMBOL_GPL(setup_irq); | 1297 | EXPORT_SYMBOL_GPL(setup_irq); |
1303 | 1298 | ||
1304 | /* | 1299 | /* |
1305 | * Internal function to unregister an irqaction - used to free | 1300 | * Internal function to unregister an irqaction - used to free |
1306 | * regular and special interrupts that are part of the architecture. | 1301 | * regular and special interrupts that are part of the architecture. |
1307 | */ | 1302 | */ |
1308 | static struct irqaction *__free_irq(unsigned int irq, void *dev_id) | 1303 | static struct irqaction *__free_irq(unsigned int irq, void *dev_id) |
1309 | { | 1304 | { |
1310 | struct irq_desc *desc = irq_to_desc(irq); | 1305 | struct irq_desc *desc = irq_to_desc(irq); |
1311 | struct irqaction *action, **action_ptr; | 1306 | struct irqaction *action, **action_ptr; |
1312 | unsigned long flags; | 1307 | unsigned long flags; |
1313 | 1308 | ||
1314 | WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq); | 1309 | WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq); |
1315 | 1310 | ||
1316 | if (!desc) | 1311 | if (!desc) |
1317 | return NULL; | 1312 | return NULL; |
1318 | 1313 | ||
1319 | raw_spin_lock_irqsave(&desc->lock, flags); | 1314 | raw_spin_lock_irqsave(&desc->lock, flags); |
1320 | 1315 | ||
1321 | /* | 1316 | /* |
1322 | * There can be multiple actions per IRQ descriptor, find the right | 1317 | * There can be multiple actions per IRQ descriptor, find the right |
1323 | * one based on the dev_id: | 1318 | * one based on the dev_id: |
1324 | */ | 1319 | */ |
1325 | action_ptr = &desc->action; | 1320 | action_ptr = &desc->action; |
1326 | for (;;) { | 1321 | for (;;) { |
1327 | action = *action_ptr; | 1322 | action = *action_ptr; |
1328 | 1323 | ||
1329 | if (!action) { | 1324 | if (!action) { |
1330 | WARN(1, "Trying to free already-free IRQ %d\n", irq); | 1325 | WARN(1, "Trying to free already-free IRQ %d\n", irq); |
1331 | raw_spin_unlock_irqrestore(&desc->lock, flags); | 1326 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
1332 | 1327 | ||
1333 | return NULL; | 1328 | return NULL; |
1334 | } | 1329 | } |
1335 | 1330 | ||
1336 | if (action->dev_id == dev_id) | 1331 | if (action->dev_id == dev_id) |
1337 | break; | 1332 | break; |
1338 | action_ptr = &action->next; | 1333 | action_ptr = &action->next; |
1339 | } | 1334 | } |
1340 | 1335 | ||
1341 | /* Found it - now remove it from the list of entries: */ | 1336 | /* Found it - now remove it from the list of entries: */ |
1342 | *action_ptr = action->next; | 1337 | *action_ptr = action->next; |
1343 | 1338 | ||
1344 | /* If this was the last handler, shut down the IRQ line: */ | 1339 | /* If this was the last handler, shut down the IRQ line: */ |
1345 | if (!desc->action) { | 1340 | if (!desc->action) { |
1346 | irq_shutdown(desc); | 1341 | irq_shutdown(desc); |
1347 | irq_release_resources(desc); | 1342 | irq_release_resources(desc); |
1348 | } | 1343 | } |
1349 | 1344 | ||
1350 | #ifdef CONFIG_SMP | 1345 | #ifdef CONFIG_SMP |
1351 | /* make sure affinity_hint is cleaned up */ | 1346 | /* make sure affinity_hint is cleaned up */ |
1352 | if (WARN_ON_ONCE(desc->affinity_hint)) | 1347 | if (WARN_ON_ONCE(desc->affinity_hint)) |
1353 | desc->affinity_hint = NULL; | 1348 | desc->affinity_hint = NULL; |
1354 | #endif | 1349 | #endif |
1355 | 1350 | ||
1356 | raw_spin_unlock_irqrestore(&desc->lock, flags); | 1351 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
1357 | 1352 | ||
1358 | unregister_handler_proc(irq, action); | 1353 | unregister_handler_proc(irq, action); |
1359 | 1354 | ||
1360 | /* Make sure it's not being used on another CPU: */ | 1355 | /* Make sure it's not being used on another CPU: */ |
1361 | synchronize_irq(irq); | 1356 | synchronize_irq(irq); |
1362 | 1357 | ||
1363 | #ifdef CONFIG_DEBUG_SHIRQ | 1358 | #ifdef CONFIG_DEBUG_SHIRQ |
1364 | /* | 1359 | /* |
1365 | * It's a shared IRQ -- the driver ought to be prepared for an IRQ | 1360 | * It's a shared IRQ -- the driver ought to be prepared for an IRQ |
1366 | * event to happen even now it's being freed, so let's make sure that | 1361 | * event to happen even now it's being freed, so let's make sure that |
1367 | * is so by doing an extra call to the handler .... | 1362 | * is so by doing an extra call to the handler .... |
1368 | * | 1363 | * |
1369 | * ( We do this after actually deregistering it, to make sure that a | 1364 | * ( We do this after actually deregistering it, to make sure that a |
1370 | * 'real' IRQ doesn't run in * parallel with our fake. ) | 1365 | * 'real' IRQ doesn't run in * parallel with our fake. ) |
1371 | */ | 1366 | */ |
1372 | if (action->flags & IRQF_SHARED) { | 1367 | if (action->flags & IRQF_SHARED) { |
1373 | local_irq_save(flags); | 1368 | local_irq_save(flags); |
1374 | action->handler(irq, dev_id); | 1369 | action->handler(irq, dev_id); |
1375 | local_irq_restore(flags); | 1370 | local_irq_restore(flags); |
1376 | } | 1371 | } |
1377 | #endif | 1372 | #endif |
1378 | 1373 | ||
1379 | if (action->thread) { | 1374 | if (action->thread) { |
1380 | kthread_stop(action->thread); | 1375 | kthread_stop(action->thread); |
1381 | put_task_struct(action->thread); | 1376 | put_task_struct(action->thread); |
1382 | } | 1377 | } |
1383 | 1378 | ||
1384 | module_put(desc->owner); | 1379 | module_put(desc->owner); |
1385 | return action; | 1380 | return action; |
1386 | } | 1381 | } |
1387 | 1382 | ||
1388 | /** | 1383 | /** |
1389 | * remove_irq - free an interrupt | 1384 | * remove_irq - free an interrupt |
1390 | * @irq: Interrupt line to free | 1385 | * @irq: Interrupt line to free |
1391 | * @act: irqaction for the interrupt | 1386 | * @act: irqaction for the interrupt |
1392 | * | 1387 | * |
1393 | * Used to remove interrupts statically setup by the early boot process. | 1388 | * Used to remove interrupts statically setup by the early boot process. |
1394 | */ | 1389 | */ |
1395 | void remove_irq(unsigned int irq, struct irqaction *act) | 1390 | void remove_irq(unsigned int irq, struct irqaction *act) |
1396 | { | 1391 | { |
1397 | struct irq_desc *desc = irq_to_desc(irq); | 1392 | struct irq_desc *desc = irq_to_desc(irq); |
1398 | 1393 | ||
1399 | if (desc && !WARN_ON(irq_settings_is_per_cpu_devid(desc))) | 1394 | if (desc && !WARN_ON(irq_settings_is_per_cpu_devid(desc))) |
1400 | __free_irq(irq, act->dev_id); | 1395 | __free_irq(irq, act->dev_id); |
1401 | } | 1396 | } |
1402 | EXPORT_SYMBOL_GPL(remove_irq); | 1397 | EXPORT_SYMBOL_GPL(remove_irq); |
1403 | 1398 | ||
1404 | /** | 1399 | /** |
1405 | * free_irq - free an interrupt allocated with request_irq | 1400 | * free_irq - free an interrupt allocated with request_irq |
1406 | * @irq: Interrupt line to free | 1401 | * @irq: Interrupt line to free |
1407 | * @dev_id: Device identity to free | 1402 | * @dev_id: Device identity to free |
1408 | * | 1403 | * |
1409 | * Remove an interrupt handler. The handler is removed and if the | 1404 | * Remove an interrupt handler. The handler is removed and if the |
1410 | * interrupt line is no longer in use by any driver it is disabled. | 1405 | * interrupt line is no longer in use by any driver it is disabled. |
1411 | * On a shared IRQ the caller must ensure the interrupt is disabled | 1406 | * On a shared IRQ the caller must ensure the interrupt is disabled |
1412 | * on the card it drives before calling this function. The function | 1407 | * on the card it drives before calling this function. The function |
1413 | * does not return until any executing interrupts for this IRQ | 1408 | * does not return until any executing interrupts for this IRQ |
1414 | * have completed. | 1409 | * have completed. |
1415 | * | 1410 | * |
1416 | * This function must not be called from interrupt context. | 1411 | * This function must not be called from interrupt context. |
1417 | */ | 1412 | */ |
1418 | void free_irq(unsigned int irq, void *dev_id) | 1413 | void free_irq(unsigned int irq, void *dev_id) |
1419 | { | 1414 | { |
1420 | struct irq_desc *desc = irq_to_desc(irq); | 1415 | struct irq_desc *desc = irq_to_desc(irq); |
1421 | 1416 | ||
1422 | if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc))) | 1417 | if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc))) |
1423 | return; | 1418 | return; |
1424 | 1419 | ||
1425 | #ifdef CONFIG_SMP | 1420 | #ifdef CONFIG_SMP |
1426 | if (WARN_ON(desc->affinity_notify)) | 1421 | if (WARN_ON(desc->affinity_notify)) |
1427 | desc->affinity_notify = NULL; | 1422 | desc->affinity_notify = NULL; |
1428 | #endif | 1423 | #endif |
1429 | 1424 | ||
1430 | chip_bus_lock(desc); | 1425 | chip_bus_lock(desc); |
1431 | kfree(__free_irq(irq, dev_id)); | 1426 | kfree(__free_irq(irq, dev_id)); |
1432 | chip_bus_sync_unlock(desc); | 1427 | chip_bus_sync_unlock(desc); |
1433 | } | 1428 | } |
1434 | EXPORT_SYMBOL(free_irq); | 1429 | EXPORT_SYMBOL(free_irq); |
1435 | 1430 | ||
1436 | /** | 1431 | /** |
1437 | * request_threaded_irq - allocate an interrupt line | 1432 | * request_threaded_irq - allocate an interrupt line |
1438 | * @irq: Interrupt line to allocate | 1433 | * @irq: Interrupt line to allocate |
1439 | * @handler: Function to be called when the IRQ occurs. | 1434 | * @handler: Function to be called when the IRQ occurs. |
1440 | * Primary handler for threaded interrupts | 1435 | * Primary handler for threaded interrupts |
1441 | * If NULL and thread_fn != NULL the default | 1436 | * If NULL and thread_fn != NULL the default |
1442 | * primary handler is installed | 1437 | * primary handler is installed |
1443 | * @thread_fn: Function called from the irq handler thread | 1438 | * @thread_fn: Function called from the irq handler thread |
1444 | * If NULL, no irq thread is created | 1439 | * If NULL, no irq thread is created |
1445 | * @irqflags: Interrupt type flags | 1440 | * @irqflags: Interrupt type flags |
1446 | * @devname: An ascii name for the claiming device | 1441 | * @devname: An ascii name for the claiming device |
1447 | * @dev_id: A cookie passed back to the handler function | 1442 | * @dev_id: A cookie passed back to the handler function |
1448 | * | 1443 | * |
1449 | * This call allocates interrupt resources and enables the | 1444 | * This call allocates interrupt resources and enables the |
1450 | * interrupt line and IRQ handling. From the point this | 1445 | * interrupt line and IRQ handling. From the point this |
1451 | * call is made your handler function may be invoked. Since | 1446 | * call is made your handler function may be invoked. Since |
1452 | * your handler function must clear any interrupt the board | 1447 | * your handler function must clear any interrupt the board |
1453 | * raises, you must take care both to initialise your hardware | 1448 | * raises, you must take care both to initialise your hardware |
1454 | * and to set up the interrupt handler in the right order. | 1449 | * and to set up the interrupt handler in the right order. |
1455 | * | 1450 | * |
1456 | * If you want to set up a threaded irq handler for your device | 1451 | * If you want to set up a threaded irq handler for your device |
1457 | * then you need to supply @handler and @thread_fn. @handler is | 1452 | * then you need to supply @handler and @thread_fn. @handler is |
1458 | * still called in hard interrupt context and has to check | 1453 | * still called in hard interrupt context and has to check |
1459 | * whether the interrupt originates from the device. If yes it | 1454 | * whether the interrupt originates from the device. If yes it |
1460 | * needs to disable the interrupt on the device and return | 1455 | * needs to disable the interrupt on the device and return |
1461 | * IRQ_WAKE_THREAD which will wake up the handler thread and run | 1456 | * IRQ_WAKE_THREAD which will wake up the handler thread and run |
1462 | * @thread_fn. This split handler design is necessary to support | 1457 | * @thread_fn. This split handler design is necessary to support |
1463 | * shared interrupts. | 1458 | * shared interrupts. |
1464 | * | 1459 | * |
1465 | * Dev_id must be globally unique. Normally the address of the | 1460 | * Dev_id must be globally unique. Normally the address of the |
1466 | * device data structure is used as the cookie. Since the handler | 1461 | * device data structure is used as the cookie. Since the handler |
1467 | * receives this value it makes sense to use it. | 1462 | * receives this value it makes sense to use it. |
1468 | * | 1463 | * |
1469 | * If your interrupt is shared you must pass a non NULL dev_id | 1464 | * If your interrupt is shared you must pass a non NULL dev_id |
1470 | * as this is required when freeing the interrupt. | 1465 | * as this is required when freeing the interrupt. |
1471 | * | 1466 | * |
1472 | * Flags: | 1467 | * Flags: |
1473 | * | 1468 | * |
1474 | * IRQF_SHARED Interrupt is shared | 1469 | * IRQF_SHARED Interrupt is shared |
1475 | * IRQF_TRIGGER_* Specify active edge(s) or level | 1470 | * IRQF_TRIGGER_* Specify active edge(s) or level |
1476 | * | 1471 | * |
1477 | */ | 1472 | */ |
1478 | int request_threaded_irq(unsigned int irq, irq_handler_t handler, | 1473 | int request_threaded_irq(unsigned int irq, irq_handler_t handler, |
1479 | irq_handler_t thread_fn, unsigned long irqflags, | 1474 | irq_handler_t thread_fn, unsigned long irqflags, |
1480 | const char *devname, void *dev_id) | 1475 | const char *devname, void *dev_id) |
1481 | { | 1476 | { |
1482 | struct irqaction *action; | 1477 | struct irqaction *action; |
1483 | struct irq_desc *desc; | 1478 | struct irq_desc *desc; |
1484 | int retval; | 1479 | int retval; |
1485 | 1480 | ||
1486 | /* | 1481 | /* |
1487 | * Sanity-check: shared interrupts must pass in a real dev-ID, | 1482 | * Sanity-check: shared interrupts must pass in a real dev-ID, |
1488 | * otherwise we'll have trouble later trying to figure out | 1483 | * otherwise we'll have trouble later trying to figure out |
1489 | * which interrupt is which (messes up the interrupt freeing | 1484 | * which interrupt is which (messes up the interrupt freeing |
1490 | * logic etc). | 1485 | * logic etc). |
1491 | */ | 1486 | */ |
1492 | if ((irqflags & IRQF_SHARED) && !dev_id) | 1487 | if ((irqflags & IRQF_SHARED) && !dev_id) |
1493 | return -EINVAL; | 1488 | return -EINVAL; |
1494 | 1489 | ||
1495 | desc = irq_to_desc(irq); | 1490 | desc = irq_to_desc(irq); |
1496 | if (!desc) | 1491 | if (!desc) |
1497 | return -EINVAL; | 1492 | return -EINVAL; |
1498 | 1493 | ||
1499 | if (!irq_settings_can_request(desc) || | 1494 | if (!irq_settings_can_request(desc) || |
1500 | WARN_ON(irq_settings_is_per_cpu_devid(desc))) | 1495 | WARN_ON(irq_settings_is_per_cpu_devid(desc))) |
1501 | return -EINVAL; | 1496 | return -EINVAL; |
1502 | 1497 | ||
1503 | if (!handler) { | 1498 | if (!handler) { |
1504 | if (!thread_fn) | 1499 | if (!thread_fn) |
1505 | return -EINVAL; | 1500 | return -EINVAL; |
1506 | handler = irq_default_primary_handler; | 1501 | handler = irq_default_primary_handler; |
1507 | } | 1502 | } |
1508 | 1503 | ||
1509 | action = kzalloc(sizeof(struct irqaction), GFP_KERNEL); | 1504 | action = kzalloc(sizeof(struct irqaction), GFP_KERNEL); |
1510 | if (!action) | 1505 | if (!action) |
1511 | return -ENOMEM; | 1506 | return -ENOMEM; |
1512 | 1507 | ||
1513 | action->handler = handler; | 1508 | action->handler = handler; |
1514 | action->thread_fn = thread_fn; | 1509 | action->thread_fn = thread_fn; |
1515 | action->flags = irqflags; | 1510 | action->flags = irqflags; |
1516 | action->name = devname; | 1511 | action->name = devname; |
1517 | action->dev_id = dev_id; | 1512 | action->dev_id = dev_id; |
1518 | 1513 | ||
1519 | chip_bus_lock(desc); | 1514 | chip_bus_lock(desc); |
1520 | retval = __setup_irq(irq, desc, action); | 1515 | retval = __setup_irq(irq, desc, action); |
1521 | chip_bus_sync_unlock(desc); | 1516 | chip_bus_sync_unlock(desc); |
1522 | 1517 | ||
1523 | if (retval) | 1518 | if (retval) |
1524 | kfree(action); | 1519 | kfree(action); |
1525 | 1520 | ||
1526 | #ifdef CONFIG_DEBUG_SHIRQ_FIXME | 1521 | #ifdef CONFIG_DEBUG_SHIRQ_FIXME |
1527 | if (!retval && (irqflags & IRQF_SHARED)) { | 1522 | if (!retval && (irqflags & IRQF_SHARED)) { |
1528 | /* | 1523 | /* |
1529 | * It's a shared IRQ -- the driver ought to be prepared for it | 1524 | * It's a shared IRQ -- the driver ought to be prepared for it |
1530 | * to happen immediately, so let's make sure.... | 1525 | * to happen immediately, so let's make sure.... |
1531 | * We disable the irq to make sure that a 'real' IRQ doesn't | 1526 | * We disable the irq to make sure that a 'real' IRQ doesn't |
1532 | * run in parallel with our fake. | 1527 | * run in parallel with our fake. |
1533 | */ | 1528 | */ |
1534 | unsigned long flags; | 1529 | unsigned long flags; |
1535 | 1530 | ||
1536 | disable_irq(irq); | 1531 | disable_irq(irq); |
1537 | local_irq_save(flags); | 1532 | local_irq_save(flags); |
1538 | 1533 | ||
1539 | handler(irq, dev_id); | 1534 | handler(irq, dev_id); |
1540 | 1535 | ||
1541 | local_irq_restore(flags); | 1536 | local_irq_restore(flags); |
1542 | enable_irq(irq); | 1537 | enable_irq(irq); |
1543 | } | 1538 | } |
1544 | #endif | 1539 | #endif |
1545 | return retval; | 1540 | return retval; |
1546 | } | 1541 | } |
1547 | EXPORT_SYMBOL(request_threaded_irq); | 1542 | EXPORT_SYMBOL(request_threaded_irq); |
1548 | 1543 | ||
1549 | /** | 1544 | /** |
1550 | * request_any_context_irq - allocate an interrupt line | 1545 | * request_any_context_irq - allocate an interrupt line |
1551 | * @irq: Interrupt line to allocate | 1546 | * @irq: Interrupt line to allocate |
1552 | * @handler: Function to be called when the IRQ occurs. | 1547 | * @handler: Function to be called when the IRQ occurs. |
1553 | * Threaded handler for threaded interrupts. | 1548 | * Threaded handler for threaded interrupts. |
1554 | * @flags: Interrupt type flags | 1549 | * @flags: Interrupt type flags |
1555 | * @name: An ascii name for the claiming device | 1550 | * @name: An ascii name for the claiming device |
1556 | * @dev_id: A cookie passed back to the handler function | 1551 | * @dev_id: A cookie passed back to the handler function |
1557 | * | 1552 | * |
1558 | * This call allocates interrupt resources and enables the | 1553 | * This call allocates interrupt resources and enables the |
1559 | * interrupt line and IRQ handling. It selects either a | 1554 | * interrupt line and IRQ handling. It selects either a |
1560 | * hardirq or threaded handling method depending on the | 1555 | * hardirq or threaded handling method depending on the |
1561 | * context. | 1556 | * context. |
1562 | * | 1557 | * |
1563 | * On failure, it returns a negative value. On success, | 1558 | * On failure, it returns a negative value. On success, |
1564 | * it returns either IRQC_IS_HARDIRQ or IRQC_IS_NESTED. | 1559 | * it returns either IRQC_IS_HARDIRQ or IRQC_IS_NESTED. |
1565 | */ | 1560 | */ |
1566 | int request_any_context_irq(unsigned int irq, irq_handler_t handler, | 1561 | int request_any_context_irq(unsigned int irq, irq_handler_t handler, |
1567 | unsigned long flags, const char *name, void *dev_id) | 1562 | unsigned long flags, const char *name, void *dev_id) |
1568 | { | 1563 | { |
1569 | struct irq_desc *desc = irq_to_desc(irq); | 1564 | struct irq_desc *desc = irq_to_desc(irq); |
1570 | int ret; | 1565 | int ret; |
1571 | 1566 | ||
1572 | if (!desc) | 1567 | if (!desc) |
1573 | return -EINVAL; | 1568 | return -EINVAL; |
1574 | 1569 | ||
1575 | if (irq_settings_is_nested_thread(desc)) { | 1570 | if (irq_settings_is_nested_thread(desc)) { |
1576 | ret = request_threaded_irq(irq, NULL, handler, | 1571 | ret = request_threaded_irq(irq, NULL, handler, |
1577 | flags, name, dev_id); | 1572 | flags, name, dev_id); |
1578 | return !ret ? IRQC_IS_NESTED : ret; | 1573 | return !ret ? IRQC_IS_NESTED : ret; |
1579 | } | 1574 | } |
1580 | 1575 | ||
1581 | ret = request_irq(irq, handler, flags, name, dev_id); | 1576 | ret = request_irq(irq, handler, flags, name, dev_id); |
1582 | return !ret ? IRQC_IS_HARDIRQ : ret; | 1577 | return !ret ? IRQC_IS_HARDIRQ : ret; |
1583 | } | 1578 | } |
1584 | EXPORT_SYMBOL_GPL(request_any_context_irq); | 1579 | EXPORT_SYMBOL_GPL(request_any_context_irq); |
1585 | 1580 | ||
1586 | void enable_percpu_irq(unsigned int irq, unsigned int type) | 1581 | void enable_percpu_irq(unsigned int irq, unsigned int type) |
1587 | { | 1582 | { |
1588 | unsigned int cpu = smp_processor_id(); | 1583 | unsigned int cpu = smp_processor_id(); |
1589 | unsigned long flags; | 1584 | unsigned long flags; |
1590 | struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU); | 1585 | struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU); |
1591 | 1586 | ||
1592 | if (!desc) | 1587 | if (!desc) |
1593 | return; | 1588 | return; |
1594 | 1589 | ||
1595 | type &= IRQ_TYPE_SENSE_MASK; | 1590 | type &= IRQ_TYPE_SENSE_MASK; |
1596 | if (type != IRQ_TYPE_NONE) { | 1591 | if (type != IRQ_TYPE_NONE) { |
1597 | int ret; | 1592 | int ret; |
1598 | 1593 | ||
1599 | ret = __irq_set_trigger(desc, irq, type); | 1594 | ret = __irq_set_trigger(desc, irq, type); |
1600 | 1595 | ||
1601 | if (ret) { | 1596 | if (ret) { |
1602 | WARN(1, "failed to set type for IRQ%d\n", irq); | 1597 | WARN(1, "failed to set type for IRQ%d\n", irq); |
1603 | goto out; | 1598 | goto out; |
1604 | } | 1599 | } |
1605 | } | 1600 | } |
1606 | 1601 | ||
1607 | irq_percpu_enable(desc, cpu); | 1602 | irq_percpu_enable(desc, cpu); |
1608 | out: | 1603 | out: |
1609 | irq_put_desc_unlock(desc, flags); | 1604 | irq_put_desc_unlock(desc, flags); |
1610 | } | 1605 | } |
1611 | EXPORT_SYMBOL_GPL(enable_percpu_irq); | 1606 | EXPORT_SYMBOL_GPL(enable_percpu_irq); |
1612 | 1607 | ||
1613 | void disable_percpu_irq(unsigned int irq) | 1608 | void disable_percpu_irq(unsigned int irq) |
1614 | { | 1609 | { |
1615 | unsigned int cpu = smp_processor_id(); | 1610 | unsigned int cpu = smp_processor_id(); |
1616 | unsigned long flags; | 1611 | unsigned long flags; |
1617 | struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU); | 1612 | struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU); |
1618 | 1613 | ||
1619 | if (!desc) | 1614 | if (!desc) |
1620 | return; | 1615 | return; |
1621 | 1616 | ||
1622 | irq_percpu_disable(desc, cpu); | 1617 | irq_percpu_disable(desc, cpu); |
1623 | irq_put_desc_unlock(desc, flags); | 1618 | irq_put_desc_unlock(desc, flags); |
1624 | } | 1619 | } |
1625 | EXPORT_SYMBOL_GPL(disable_percpu_irq); | 1620 | EXPORT_SYMBOL_GPL(disable_percpu_irq); |
1626 | 1621 | ||
1627 | /* | 1622 | /* |
1628 | * Internal function to unregister a percpu irqaction. | 1623 | * Internal function to unregister a percpu irqaction. |
1629 | */ | 1624 | */ |
1630 | static struct irqaction *__free_percpu_irq(unsigned int irq, void __percpu *dev_id) | 1625 | static struct irqaction *__free_percpu_irq(unsigned int irq, void __percpu *dev_id) |
1631 | { | 1626 | { |
1632 | struct irq_desc *desc = irq_to_desc(irq); | 1627 | struct irq_desc *desc = irq_to_desc(irq); |
1633 | struct irqaction *action; | 1628 | struct irqaction *action; |
1634 | unsigned long flags; | 1629 | unsigned long flags; |
1635 | 1630 | ||
1636 | WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq); | 1631 | WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq); |
1637 | 1632 | ||
1638 | if (!desc) | 1633 | if (!desc) |
1639 | return NULL; | 1634 | return NULL; |
1640 | 1635 | ||
1641 | raw_spin_lock_irqsave(&desc->lock, flags); | 1636 | raw_spin_lock_irqsave(&desc->lock, flags); |
1642 | 1637 | ||
1643 | action = desc->action; | 1638 | action = desc->action; |
1644 | if (!action || action->percpu_dev_id != dev_id) { | 1639 | if (!action || action->percpu_dev_id != dev_id) { |
1645 | WARN(1, "Trying to free already-free IRQ %d\n", irq); | 1640 | WARN(1, "Trying to free already-free IRQ %d\n", irq); |
1646 | goto bad; | 1641 | goto bad; |
1647 | } | 1642 | } |
1648 | 1643 | ||
1649 | if (!cpumask_empty(desc->percpu_enabled)) { | 1644 | if (!cpumask_empty(desc->percpu_enabled)) { |
1650 | WARN(1, "percpu IRQ %d still enabled on CPU%d!\n", | 1645 | WARN(1, "percpu IRQ %d still enabled on CPU%d!\n", |
1651 | irq, cpumask_first(desc->percpu_enabled)); | 1646 | irq, cpumask_first(desc->percpu_enabled)); |
1652 | goto bad; | 1647 | goto bad; |
1653 | } | 1648 | } |
1654 | 1649 | ||
1655 | /* Found it - now remove it from the list of entries: */ | 1650 | /* Found it - now remove it from the list of entries: */ |
1656 | desc->action = NULL; | 1651 | desc->action = NULL; |
1657 | 1652 | ||
1658 | raw_spin_unlock_irqrestore(&desc->lock, flags); | 1653 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
1659 | 1654 | ||
1660 | unregister_handler_proc(irq, action); | 1655 | unregister_handler_proc(irq, action); |
1661 | 1656 | ||
1662 | module_put(desc->owner); | 1657 | module_put(desc->owner); |
1663 | return action; | 1658 | return action; |
1664 | 1659 | ||
1665 | bad: | 1660 | bad: |
1666 | raw_spin_unlock_irqrestore(&desc->lock, flags); | 1661 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
1667 | return NULL; | 1662 | return NULL; |
1668 | } | 1663 | } |
1669 | 1664 | ||
1670 | /** | 1665 | /** |
1671 | * remove_percpu_irq - free a per-cpu interrupt | 1666 | * remove_percpu_irq - free a per-cpu interrupt |
1672 | * @irq: Interrupt line to free | 1667 | * @irq: Interrupt line to free |
1673 | * @act: irqaction for the interrupt | 1668 | * @act: irqaction for the interrupt |
1674 | * | 1669 | * |
1675 | * Used to remove interrupts statically setup by the early boot process. | 1670 | * Used to remove interrupts statically setup by the early boot process. |
1676 | */ | 1671 | */ |
1677 | void remove_percpu_irq(unsigned int irq, struct irqaction *act) | 1672 | void remove_percpu_irq(unsigned int irq, struct irqaction *act) |
1678 | { | 1673 | { |
1679 | struct irq_desc *desc = irq_to_desc(irq); | 1674 | struct irq_desc *desc = irq_to_desc(irq); |
1680 | 1675 | ||
1681 | if (desc && irq_settings_is_per_cpu_devid(desc)) | 1676 | if (desc && irq_settings_is_per_cpu_devid(desc)) |
1682 | __free_percpu_irq(irq, act->percpu_dev_id); | 1677 | __free_percpu_irq(irq, act->percpu_dev_id); |
1683 | } | 1678 | } |
1684 | 1679 | ||
1685 | /** | 1680 | /** |
1686 | * free_percpu_irq - free an interrupt allocated with request_percpu_irq | 1681 | * free_percpu_irq - free an interrupt allocated with request_percpu_irq |
1687 | * @irq: Interrupt line to free | 1682 | * @irq: Interrupt line to free |
1688 | * @dev_id: Device identity to free | 1683 | * @dev_id: Device identity to free |
1689 | * | 1684 | * |
1690 | * Remove a percpu interrupt handler. The handler is removed, but | 1685 | * Remove a percpu interrupt handler. The handler is removed, but |
1691 | * the interrupt line is not disabled. This must be done on each | 1686 | * the interrupt line is not disabled. This must be done on each |
1692 | * CPU before calling this function. The function does not return | 1687 | * CPU before calling this function. The function does not return |
1693 | * until any executing interrupts for this IRQ have completed. | 1688 | * until any executing interrupts for this IRQ have completed. |
1694 | * | 1689 | * |
1695 | * This function must not be called from interrupt context. | 1690 | * This function must not be called from interrupt context. |
1696 | */ | 1691 | */ |
1697 | void free_percpu_irq(unsigned int irq, void __percpu *dev_id) | 1692 | void free_percpu_irq(unsigned int irq, void __percpu *dev_id) |
1698 | { | 1693 | { |
1699 | struct irq_desc *desc = irq_to_desc(irq); | 1694 | struct irq_desc *desc = irq_to_desc(irq); |
1700 | 1695 | ||
1701 | if (!desc || !irq_settings_is_per_cpu_devid(desc)) | 1696 | if (!desc || !irq_settings_is_per_cpu_devid(desc)) |
1702 | return; | 1697 | return; |
1703 | 1698 | ||
1704 | chip_bus_lock(desc); | 1699 | chip_bus_lock(desc); |
1705 | kfree(__free_percpu_irq(irq, dev_id)); | 1700 | kfree(__free_percpu_irq(irq, dev_id)); |
1706 | chip_bus_sync_unlock(desc); | 1701 | chip_bus_sync_unlock(desc); |
1707 | } | 1702 | } |
1708 | 1703 | ||
1709 | /** | 1704 | /** |
1710 | * setup_percpu_irq - setup a per-cpu interrupt | 1705 | * setup_percpu_irq - setup a per-cpu interrupt |
1711 | * @irq: Interrupt line to setup | 1706 | * @irq: Interrupt line to setup |
1712 | * @act: irqaction for the interrupt | 1707 | * @act: irqaction for the interrupt |
1713 | * | 1708 | * |
1714 | * Used to statically setup per-cpu interrupts in the early boot process. | 1709 | * Used to statically setup per-cpu interrupts in the early boot process. |
1715 | */ | 1710 | */ |
1716 | int setup_percpu_irq(unsigned int irq, struct irqaction *act) | 1711 | int setup_percpu_irq(unsigned int irq, struct irqaction *act) |
1717 | { | 1712 | { |
1718 | struct irq_desc *desc = irq_to_desc(irq); | 1713 | struct irq_desc *desc = irq_to_desc(irq); |
1719 | int retval; | 1714 | int retval; |
1720 | 1715 | ||
1721 | if (!desc || !irq_settings_is_per_cpu_devid(desc)) | 1716 | if (!desc || !irq_settings_is_per_cpu_devid(desc)) |
1722 | return -EINVAL; | 1717 | return -EINVAL; |
1723 | chip_bus_lock(desc); | 1718 | chip_bus_lock(desc); |
1724 | retval = __setup_irq(irq, desc, act); | 1719 | retval = __setup_irq(irq, desc, act); |
1725 | chip_bus_sync_unlock(desc); | 1720 | chip_bus_sync_unlock(desc); |
1726 | 1721 | ||
1727 | return retval; | 1722 | return retval; |
1728 | } | 1723 | } |
1729 | 1724 | ||
1730 | /** | 1725 | /** |
1731 | * request_percpu_irq - allocate a percpu interrupt line | 1726 | * request_percpu_irq - allocate a percpu interrupt line |
1732 | * @irq: Interrupt line to allocate | 1727 | * @irq: Interrupt line to allocate |
1733 | * @handler: Function to be called when the IRQ occurs. | 1728 | * @handler: Function to be called when the IRQ occurs. |
1734 | * @devname: An ascii name for the claiming device | 1729 | * @devname: An ascii name for the claiming device |
1735 | * @dev_id: A percpu cookie passed back to the handler function | 1730 | * @dev_id: A percpu cookie passed back to the handler function |
1736 | * | 1731 | * |
1737 | * This call allocates interrupt resources, but doesn't | 1732 | * This call allocates interrupt resources, but doesn't |
1738 | * automatically enable the interrupt. It has to be done on each | 1733 | * automatically enable the interrupt. It has to be done on each |
1739 | * CPU using enable_percpu_irq(). | 1734 | * CPU using enable_percpu_irq(). |
1740 | * | 1735 | * |
1741 | * Dev_id must be globally unique. It is a per-cpu variable, and | 1736 | * Dev_id must be globally unique. It is a per-cpu variable, and |
1742 | * the handler gets called with the interrupted CPU's instance of | 1737 | * the handler gets called with the interrupted CPU's instance of |
1743 | * that variable. | 1738 | * that variable. |
1744 | */ | 1739 | */ |
1745 | int request_percpu_irq(unsigned int irq, irq_handler_t handler, | 1740 | int request_percpu_irq(unsigned int irq, irq_handler_t handler, |
1746 | const char *devname, void __percpu *dev_id) | 1741 | const char *devname, void __percpu *dev_id) |
1747 | { | 1742 | { |
1748 | struct irqaction *action; | 1743 | struct irqaction *action; |
1749 | struct irq_desc *desc; | 1744 | struct irq_desc *desc; |
1750 | int retval; | 1745 | int retval; |
1751 | 1746 | ||
1752 | if (!dev_id) | 1747 | if (!dev_id) |
1753 | return -EINVAL; | 1748 | return -EINVAL; |
1754 | 1749 | ||
1755 | desc = irq_to_desc(irq); | 1750 | desc = irq_to_desc(irq); |
1756 | if (!desc || !irq_settings_can_request(desc) || | 1751 | if (!desc || !irq_settings_can_request(desc) || |
1757 | !irq_settings_is_per_cpu_devid(desc)) | 1752 | !irq_settings_is_per_cpu_devid(desc)) |
1758 | return -EINVAL; | 1753 | return -EINVAL; |
1759 | 1754 | ||
1760 | action = kzalloc(sizeof(struct irqaction), GFP_KERNEL); | 1755 | action = kzalloc(sizeof(struct irqaction), GFP_KERNEL); |
1761 | if (!action) | 1756 | if (!action) |
1762 | return -ENOMEM; | 1757 | return -ENOMEM; |
1763 | 1758 | ||
1764 | action->handler = handler; | 1759 | action->handler = handler; |
1765 | action->flags = IRQF_PERCPU | IRQF_NO_SUSPEND; | 1760 | action->flags = IRQF_PERCPU | IRQF_NO_SUSPEND; |
1766 | action->name = devname; | 1761 | action->name = devname; |
1767 | action->percpu_dev_id = dev_id; | 1762 | action->percpu_dev_id = dev_id; |
1768 | 1763 | ||
1769 | chip_bus_lock(desc); | 1764 | chip_bus_lock(desc); |
1770 | retval = __setup_irq(irq, desc, action); | 1765 | retval = __setup_irq(irq, desc, action); |
1771 | chip_bus_sync_unlock(desc); | 1766 | chip_bus_sync_unlock(desc); |
1772 | 1767 | ||
1773 | if (retval) | 1768 | if (retval) |
1774 | kfree(action); | 1769 | kfree(action); |
1775 | 1770 | ||
1776 | return retval; | 1771 | return retval; |
1777 | } | 1772 | } |