Commit cc8c3b78433222e5dbc1fdfcfdde29e1743f181a

Authored by Thomas Gleixner
1 parent 0b1adaa031

genirq: Protect access to irq_desc->action in can_request_irq()

can_request_irq() accesses and dereferences irq_desc->action w/o
holding irq_desc->lock. So action can be freed on another CPU before
it's dereferenced. Unlikely, but ...

Protect it with desc->lock.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>

Showing 1 changed file with 4 additions and 0 deletions Inline Diff

1 /* 1 /*
2 * linux/kernel/irq/manage.c 2 * linux/kernel/irq/manage.c
3 * 3 *
4 * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar 4 * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar
5 * Copyright (C) 2005-2006 Thomas Gleixner 5 * Copyright (C) 2005-2006 Thomas Gleixner
6 * 6 *
7 * This file contains driver APIs to the irq subsystem. 7 * This file contains driver APIs to the irq subsystem.
8 */ 8 */
9 9
10 #include <linux/irq.h> 10 #include <linux/irq.h>
11 #include <linux/kthread.h> 11 #include <linux/kthread.h>
12 #include <linux/module.h> 12 #include <linux/module.h>
13 #include <linux/random.h> 13 #include <linux/random.h>
14 #include <linux/interrupt.h> 14 #include <linux/interrupt.h>
15 #include <linux/slab.h> 15 #include <linux/slab.h>
16 #include <linux/sched.h> 16 #include <linux/sched.h>
17 17
18 #include "internals.h" 18 #include "internals.h"
19 19
20 /** 20 /**
21 * synchronize_irq - wait for pending IRQ handlers (on other CPUs) 21 * synchronize_irq - wait for pending IRQ handlers (on other CPUs)
22 * @irq: interrupt number to wait for 22 * @irq: interrupt number to wait for
23 * 23 *
24 * This function waits for any pending IRQ handlers for this interrupt 24 * This function waits for any pending IRQ handlers for this interrupt
25 * to complete before returning. If you use this function while 25 * to complete before returning. If you use this function while
26 * holding a resource the IRQ handler may need you will deadlock. 26 * holding a resource the IRQ handler may need you will deadlock.
27 * 27 *
28 * This function may be called - with care - from IRQ context. 28 * This function may be called - with care - from IRQ context.
29 */ 29 */
30 void synchronize_irq(unsigned int irq) 30 void synchronize_irq(unsigned int irq)
31 { 31 {
32 struct irq_desc *desc = irq_to_desc(irq); 32 struct irq_desc *desc = irq_to_desc(irq);
33 unsigned int status; 33 unsigned int status;
34 34
35 if (!desc) 35 if (!desc)
36 return; 36 return;
37 37
38 do { 38 do {
39 unsigned long flags; 39 unsigned long flags;
40 40
41 /* 41 /*
42 * Wait until we're out of the critical section. This might 42 * Wait until we're out of the critical section. This might
43 * give the wrong answer due to the lack of memory barriers. 43 * give the wrong answer due to the lack of memory barriers.
44 */ 44 */
45 while (desc->status & IRQ_INPROGRESS) 45 while (desc->status & IRQ_INPROGRESS)
46 cpu_relax(); 46 cpu_relax();
47 47
48 /* Ok, that indicated we're done: double-check carefully. */ 48 /* Ok, that indicated we're done: double-check carefully. */
49 raw_spin_lock_irqsave(&desc->lock, flags); 49 raw_spin_lock_irqsave(&desc->lock, flags);
50 status = desc->status; 50 status = desc->status;
51 raw_spin_unlock_irqrestore(&desc->lock, flags); 51 raw_spin_unlock_irqrestore(&desc->lock, flags);
52 52
53 /* Oops, that failed? */ 53 /* Oops, that failed? */
54 } while (status & IRQ_INPROGRESS); 54 } while (status & IRQ_INPROGRESS);
55 55
56 /* 56 /*
57 * We made sure that no hardirq handler is running. Now verify 57 * We made sure that no hardirq handler is running. Now verify
58 * that no threaded handlers are active. 58 * that no threaded handlers are active.
59 */ 59 */
60 wait_event(desc->wait_for_threads, !atomic_read(&desc->threads_active)); 60 wait_event(desc->wait_for_threads, !atomic_read(&desc->threads_active));
61 } 61 }
62 EXPORT_SYMBOL(synchronize_irq); 62 EXPORT_SYMBOL(synchronize_irq);
63 63
64 #ifdef CONFIG_SMP 64 #ifdef CONFIG_SMP
65 cpumask_var_t irq_default_affinity; 65 cpumask_var_t irq_default_affinity;
66 66
67 /** 67 /**
68 * irq_can_set_affinity - Check if the affinity of a given irq can be set 68 * irq_can_set_affinity - Check if the affinity of a given irq can be set
69 * @irq: Interrupt to check 69 * @irq: Interrupt to check
70 * 70 *
71 */ 71 */
72 int irq_can_set_affinity(unsigned int irq) 72 int irq_can_set_affinity(unsigned int irq)
73 { 73 {
74 struct irq_desc *desc = irq_to_desc(irq); 74 struct irq_desc *desc = irq_to_desc(irq);
75 75
76 if (CHECK_IRQ_PER_CPU(desc->status) || !desc->chip || 76 if (CHECK_IRQ_PER_CPU(desc->status) || !desc->chip ||
77 !desc->chip->set_affinity) 77 !desc->chip->set_affinity)
78 return 0; 78 return 0;
79 79
80 return 1; 80 return 1;
81 } 81 }
82 82
83 /** 83 /**
84 * irq_set_thread_affinity - Notify irq threads to adjust affinity 84 * irq_set_thread_affinity - Notify irq threads to adjust affinity
85 * @desc: irq descriptor which has affitnity changed 85 * @desc: irq descriptor which has affitnity changed
86 * 86 *
87 * We just set IRQTF_AFFINITY and delegate the affinity setting 87 * We just set IRQTF_AFFINITY and delegate the affinity setting
88 * to the interrupt thread itself. We can not call 88 * to the interrupt thread itself. We can not call
89 * set_cpus_allowed_ptr() here as we hold desc->lock and this 89 * set_cpus_allowed_ptr() here as we hold desc->lock and this
90 * code can be called from hard interrupt context. 90 * code can be called from hard interrupt context.
91 */ 91 */
92 void irq_set_thread_affinity(struct irq_desc *desc) 92 void irq_set_thread_affinity(struct irq_desc *desc)
93 { 93 {
94 struct irqaction *action = desc->action; 94 struct irqaction *action = desc->action;
95 95
96 while (action) { 96 while (action) {
97 if (action->thread) 97 if (action->thread)
98 set_bit(IRQTF_AFFINITY, &action->thread_flags); 98 set_bit(IRQTF_AFFINITY, &action->thread_flags);
99 action = action->next; 99 action = action->next;
100 } 100 }
101 } 101 }
102 102
103 /** 103 /**
104 * irq_set_affinity - Set the irq affinity of a given irq 104 * irq_set_affinity - Set the irq affinity of a given irq
105 * @irq: Interrupt to set affinity 105 * @irq: Interrupt to set affinity
106 * @cpumask: cpumask 106 * @cpumask: cpumask
107 * 107 *
108 */ 108 */
109 int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask) 109 int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask)
110 { 110 {
111 struct irq_desc *desc = irq_to_desc(irq); 111 struct irq_desc *desc = irq_to_desc(irq);
112 unsigned long flags; 112 unsigned long flags;
113 113
114 if (!desc->chip->set_affinity) 114 if (!desc->chip->set_affinity)
115 return -EINVAL; 115 return -EINVAL;
116 116
117 raw_spin_lock_irqsave(&desc->lock, flags); 117 raw_spin_lock_irqsave(&desc->lock, flags);
118 118
119 #ifdef CONFIG_GENERIC_PENDING_IRQ 119 #ifdef CONFIG_GENERIC_PENDING_IRQ
120 if (desc->status & IRQ_MOVE_PCNTXT) { 120 if (desc->status & IRQ_MOVE_PCNTXT) {
121 if (!desc->chip->set_affinity(irq, cpumask)) { 121 if (!desc->chip->set_affinity(irq, cpumask)) {
122 cpumask_copy(desc->affinity, cpumask); 122 cpumask_copy(desc->affinity, cpumask);
123 irq_set_thread_affinity(desc); 123 irq_set_thread_affinity(desc);
124 } 124 }
125 } 125 }
126 else { 126 else {
127 desc->status |= IRQ_MOVE_PENDING; 127 desc->status |= IRQ_MOVE_PENDING;
128 cpumask_copy(desc->pending_mask, cpumask); 128 cpumask_copy(desc->pending_mask, cpumask);
129 } 129 }
130 #else 130 #else
131 if (!desc->chip->set_affinity(irq, cpumask)) { 131 if (!desc->chip->set_affinity(irq, cpumask)) {
132 cpumask_copy(desc->affinity, cpumask); 132 cpumask_copy(desc->affinity, cpumask);
133 irq_set_thread_affinity(desc); 133 irq_set_thread_affinity(desc);
134 } 134 }
135 #endif 135 #endif
136 desc->status |= IRQ_AFFINITY_SET; 136 desc->status |= IRQ_AFFINITY_SET;
137 raw_spin_unlock_irqrestore(&desc->lock, flags); 137 raw_spin_unlock_irqrestore(&desc->lock, flags);
138 return 0; 138 return 0;
139 } 139 }
140 140
141 #ifndef CONFIG_AUTO_IRQ_AFFINITY 141 #ifndef CONFIG_AUTO_IRQ_AFFINITY
142 /* 142 /*
143 * Generic version of the affinity autoselector. 143 * Generic version of the affinity autoselector.
144 */ 144 */
145 static int setup_affinity(unsigned int irq, struct irq_desc *desc) 145 static int setup_affinity(unsigned int irq, struct irq_desc *desc)
146 { 146 {
147 if (!irq_can_set_affinity(irq)) 147 if (!irq_can_set_affinity(irq))
148 return 0; 148 return 0;
149 149
150 /* 150 /*
151 * Preserve an userspace affinity setup, but make sure that 151 * Preserve an userspace affinity setup, but make sure that
152 * one of the targets is online. 152 * one of the targets is online.
153 */ 153 */
154 if (desc->status & (IRQ_AFFINITY_SET | IRQ_NO_BALANCING)) { 154 if (desc->status & (IRQ_AFFINITY_SET | IRQ_NO_BALANCING)) {
155 if (cpumask_any_and(desc->affinity, cpu_online_mask) 155 if (cpumask_any_and(desc->affinity, cpu_online_mask)
156 < nr_cpu_ids) 156 < nr_cpu_ids)
157 goto set_affinity; 157 goto set_affinity;
158 else 158 else
159 desc->status &= ~IRQ_AFFINITY_SET; 159 desc->status &= ~IRQ_AFFINITY_SET;
160 } 160 }
161 161
162 cpumask_and(desc->affinity, cpu_online_mask, irq_default_affinity); 162 cpumask_and(desc->affinity, cpu_online_mask, irq_default_affinity);
163 set_affinity: 163 set_affinity:
164 desc->chip->set_affinity(irq, desc->affinity); 164 desc->chip->set_affinity(irq, desc->affinity);
165 165
166 return 0; 166 return 0;
167 } 167 }
168 #else 168 #else
169 static inline int setup_affinity(unsigned int irq, struct irq_desc *d) 169 static inline int setup_affinity(unsigned int irq, struct irq_desc *d)
170 { 170 {
171 return irq_select_affinity(irq); 171 return irq_select_affinity(irq);
172 } 172 }
173 #endif 173 #endif
174 174
175 /* 175 /*
176 * Called when affinity is set via /proc/irq 176 * Called when affinity is set via /proc/irq
177 */ 177 */
178 int irq_select_affinity_usr(unsigned int irq) 178 int irq_select_affinity_usr(unsigned int irq)
179 { 179 {
180 struct irq_desc *desc = irq_to_desc(irq); 180 struct irq_desc *desc = irq_to_desc(irq);
181 unsigned long flags; 181 unsigned long flags;
182 int ret; 182 int ret;
183 183
184 raw_spin_lock_irqsave(&desc->lock, flags); 184 raw_spin_lock_irqsave(&desc->lock, flags);
185 ret = setup_affinity(irq, desc); 185 ret = setup_affinity(irq, desc);
186 if (!ret) 186 if (!ret)
187 irq_set_thread_affinity(desc); 187 irq_set_thread_affinity(desc);
188 raw_spin_unlock_irqrestore(&desc->lock, flags); 188 raw_spin_unlock_irqrestore(&desc->lock, flags);
189 189
190 return ret; 190 return ret;
191 } 191 }
192 192
193 #else 193 #else
194 static inline int setup_affinity(unsigned int irq, struct irq_desc *desc) 194 static inline int setup_affinity(unsigned int irq, struct irq_desc *desc)
195 { 195 {
196 return 0; 196 return 0;
197 } 197 }
198 #endif 198 #endif
199 199
200 void __disable_irq(struct irq_desc *desc, unsigned int irq, bool suspend) 200 void __disable_irq(struct irq_desc *desc, unsigned int irq, bool suspend)
201 { 201 {
202 if (suspend) { 202 if (suspend) {
203 if (!desc->action || (desc->action->flags & IRQF_TIMER)) 203 if (!desc->action || (desc->action->flags & IRQF_TIMER))
204 return; 204 return;
205 desc->status |= IRQ_SUSPENDED; 205 desc->status |= IRQ_SUSPENDED;
206 } 206 }
207 207
208 if (!desc->depth++) { 208 if (!desc->depth++) {
209 desc->status |= IRQ_DISABLED; 209 desc->status |= IRQ_DISABLED;
210 desc->chip->disable(irq); 210 desc->chip->disable(irq);
211 } 211 }
212 } 212 }
213 213
214 /** 214 /**
215 * disable_irq_nosync - disable an irq without waiting 215 * disable_irq_nosync - disable an irq without waiting
216 * @irq: Interrupt to disable 216 * @irq: Interrupt to disable
217 * 217 *
218 * Disable the selected interrupt line. Disables and Enables are 218 * Disable the selected interrupt line. Disables and Enables are
219 * nested. 219 * nested.
220 * Unlike disable_irq(), this function does not ensure existing 220 * Unlike disable_irq(), this function does not ensure existing
221 * instances of the IRQ handler have completed before returning. 221 * instances of the IRQ handler have completed before returning.
222 * 222 *
223 * This function may be called from IRQ context. 223 * This function may be called from IRQ context.
224 */ 224 */
225 void disable_irq_nosync(unsigned int irq) 225 void disable_irq_nosync(unsigned int irq)
226 { 226 {
227 struct irq_desc *desc = irq_to_desc(irq); 227 struct irq_desc *desc = irq_to_desc(irq);
228 unsigned long flags; 228 unsigned long flags;
229 229
230 if (!desc) 230 if (!desc)
231 return; 231 return;
232 232
233 chip_bus_lock(irq, desc); 233 chip_bus_lock(irq, desc);
234 raw_spin_lock_irqsave(&desc->lock, flags); 234 raw_spin_lock_irqsave(&desc->lock, flags);
235 __disable_irq(desc, irq, false); 235 __disable_irq(desc, irq, false);
236 raw_spin_unlock_irqrestore(&desc->lock, flags); 236 raw_spin_unlock_irqrestore(&desc->lock, flags);
237 chip_bus_sync_unlock(irq, desc); 237 chip_bus_sync_unlock(irq, desc);
238 } 238 }
239 EXPORT_SYMBOL(disable_irq_nosync); 239 EXPORT_SYMBOL(disable_irq_nosync);
240 240
241 /** 241 /**
242 * disable_irq - disable an irq and wait for completion 242 * disable_irq - disable an irq and wait for completion
243 * @irq: Interrupt to disable 243 * @irq: Interrupt to disable
244 * 244 *
245 * Disable the selected interrupt line. Enables and Disables are 245 * Disable the selected interrupt line. Enables and Disables are
246 * nested. 246 * nested.
247 * This function waits for any pending IRQ handlers for this interrupt 247 * This function waits for any pending IRQ handlers for this interrupt
248 * to complete before returning. If you use this function while 248 * to complete before returning. If you use this function while
249 * holding a resource the IRQ handler may need you will deadlock. 249 * holding a resource the IRQ handler may need you will deadlock.
250 * 250 *
251 * This function may be called - with care - from IRQ context. 251 * This function may be called - with care - from IRQ context.
252 */ 252 */
253 void disable_irq(unsigned int irq) 253 void disable_irq(unsigned int irq)
254 { 254 {
255 struct irq_desc *desc = irq_to_desc(irq); 255 struct irq_desc *desc = irq_to_desc(irq);
256 256
257 if (!desc) 257 if (!desc)
258 return; 258 return;
259 259
260 disable_irq_nosync(irq); 260 disable_irq_nosync(irq);
261 if (desc->action) 261 if (desc->action)
262 synchronize_irq(irq); 262 synchronize_irq(irq);
263 } 263 }
264 EXPORT_SYMBOL(disable_irq); 264 EXPORT_SYMBOL(disable_irq);
265 265
266 void __enable_irq(struct irq_desc *desc, unsigned int irq, bool resume) 266 void __enable_irq(struct irq_desc *desc, unsigned int irq, bool resume)
267 { 267 {
268 if (resume) 268 if (resume)
269 desc->status &= ~IRQ_SUSPENDED; 269 desc->status &= ~IRQ_SUSPENDED;
270 270
271 switch (desc->depth) { 271 switch (desc->depth) {
272 case 0: 272 case 0:
273 err_out: 273 err_out:
274 WARN(1, KERN_WARNING "Unbalanced enable for IRQ %d\n", irq); 274 WARN(1, KERN_WARNING "Unbalanced enable for IRQ %d\n", irq);
275 break; 275 break;
276 case 1: { 276 case 1: {
277 unsigned int status = desc->status & ~IRQ_DISABLED; 277 unsigned int status = desc->status & ~IRQ_DISABLED;
278 278
279 if (desc->status & IRQ_SUSPENDED) 279 if (desc->status & IRQ_SUSPENDED)
280 goto err_out; 280 goto err_out;
281 /* Prevent probing on this irq: */ 281 /* Prevent probing on this irq: */
282 desc->status = status | IRQ_NOPROBE; 282 desc->status = status | IRQ_NOPROBE;
283 check_irq_resend(desc, irq); 283 check_irq_resend(desc, irq);
284 /* fall-through */ 284 /* fall-through */
285 } 285 }
286 default: 286 default:
287 desc->depth--; 287 desc->depth--;
288 } 288 }
289 } 289 }
290 290
291 /** 291 /**
292 * enable_irq - enable handling of an irq 292 * enable_irq - enable handling of an irq
293 * @irq: Interrupt to enable 293 * @irq: Interrupt to enable
294 * 294 *
295 * Undoes the effect of one call to disable_irq(). If this 295 * Undoes the effect of one call to disable_irq(). If this
296 * matches the last disable, processing of interrupts on this 296 * matches the last disable, processing of interrupts on this
297 * IRQ line is re-enabled. 297 * IRQ line is re-enabled.
298 * 298 *
299 * This function may be called from IRQ context only when 299 * This function may be called from IRQ context only when
300 * desc->chip->bus_lock and desc->chip->bus_sync_unlock are NULL ! 300 * desc->chip->bus_lock and desc->chip->bus_sync_unlock are NULL !
301 */ 301 */
302 void enable_irq(unsigned int irq) 302 void enable_irq(unsigned int irq)
303 { 303 {
304 struct irq_desc *desc = irq_to_desc(irq); 304 struct irq_desc *desc = irq_to_desc(irq);
305 unsigned long flags; 305 unsigned long flags;
306 306
307 if (!desc) 307 if (!desc)
308 return; 308 return;
309 309
310 chip_bus_lock(irq, desc); 310 chip_bus_lock(irq, desc);
311 raw_spin_lock_irqsave(&desc->lock, flags); 311 raw_spin_lock_irqsave(&desc->lock, flags);
312 __enable_irq(desc, irq, false); 312 __enable_irq(desc, irq, false);
313 raw_spin_unlock_irqrestore(&desc->lock, flags); 313 raw_spin_unlock_irqrestore(&desc->lock, flags);
314 chip_bus_sync_unlock(irq, desc); 314 chip_bus_sync_unlock(irq, desc);
315 } 315 }
316 EXPORT_SYMBOL(enable_irq); 316 EXPORT_SYMBOL(enable_irq);
317 317
318 static int set_irq_wake_real(unsigned int irq, unsigned int on) 318 static int set_irq_wake_real(unsigned int irq, unsigned int on)
319 { 319 {
320 struct irq_desc *desc = irq_to_desc(irq); 320 struct irq_desc *desc = irq_to_desc(irq);
321 int ret = -ENXIO; 321 int ret = -ENXIO;
322 322
323 if (desc->chip->set_wake) 323 if (desc->chip->set_wake)
324 ret = desc->chip->set_wake(irq, on); 324 ret = desc->chip->set_wake(irq, on);
325 325
326 return ret; 326 return ret;
327 } 327 }
328 328
329 /** 329 /**
330 * set_irq_wake - control irq power management wakeup 330 * set_irq_wake - control irq power management wakeup
331 * @irq: interrupt to control 331 * @irq: interrupt to control
332 * @on: enable/disable power management wakeup 332 * @on: enable/disable power management wakeup
333 * 333 *
334 * Enable/disable power management wakeup mode, which is 334 * Enable/disable power management wakeup mode, which is
335 * disabled by default. Enables and disables must match, 335 * disabled by default. Enables and disables must match,
336 * just as they match for non-wakeup mode support. 336 * just as they match for non-wakeup mode support.
337 * 337 *
338 * Wakeup mode lets this IRQ wake the system from sleep 338 * Wakeup mode lets this IRQ wake the system from sleep
339 * states like "suspend to RAM". 339 * states like "suspend to RAM".
340 */ 340 */
341 int set_irq_wake(unsigned int irq, unsigned int on) 341 int set_irq_wake(unsigned int irq, unsigned int on)
342 { 342 {
343 struct irq_desc *desc = irq_to_desc(irq); 343 struct irq_desc *desc = irq_to_desc(irq);
344 unsigned long flags; 344 unsigned long flags;
345 int ret = 0; 345 int ret = 0;
346 346
347 /* wakeup-capable irqs can be shared between drivers that 347 /* wakeup-capable irqs can be shared between drivers that
348 * don't need to have the same sleep mode behaviors. 348 * don't need to have the same sleep mode behaviors.
349 */ 349 */
350 raw_spin_lock_irqsave(&desc->lock, flags); 350 raw_spin_lock_irqsave(&desc->lock, flags);
351 if (on) { 351 if (on) {
352 if (desc->wake_depth++ == 0) { 352 if (desc->wake_depth++ == 0) {
353 ret = set_irq_wake_real(irq, on); 353 ret = set_irq_wake_real(irq, on);
354 if (ret) 354 if (ret)
355 desc->wake_depth = 0; 355 desc->wake_depth = 0;
356 else 356 else
357 desc->status |= IRQ_WAKEUP; 357 desc->status |= IRQ_WAKEUP;
358 } 358 }
359 } else { 359 } else {
360 if (desc->wake_depth == 0) { 360 if (desc->wake_depth == 0) {
361 WARN(1, "Unbalanced IRQ %d wake disable\n", irq); 361 WARN(1, "Unbalanced IRQ %d wake disable\n", irq);
362 } else if (--desc->wake_depth == 0) { 362 } else if (--desc->wake_depth == 0) {
363 ret = set_irq_wake_real(irq, on); 363 ret = set_irq_wake_real(irq, on);
364 if (ret) 364 if (ret)
365 desc->wake_depth = 1; 365 desc->wake_depth = 1;
366 else 366 else
367 desc->status &= ~IRQ_WAKEUP; 367 desc->status &= ~IRQ_WAKEUP;
368 } 368 }
369 } 369 }
370 370
371 raw_spin_unlock_irqrestore(&desc->lock, flags); 371 raw_spin_unlock_irqrestore(&desc->lock, flags);
372 return ret; 372 return ret;
373 } 373 }
374 EXPORT_SYMBOL(set_irq_wake); 374 EXPORT_SYMBOL(set_irq_wake);
375 375
376 /* 376 /*
377 * Internal function that tells the architecture code whether a 377 * Internal function that tells the architecture code whether a
378 * particular irq has been exclusively allocated or is available 378 * particular irq has been exclusively allocated or is available
379 * for driver use. 379 * for driver use.
380 */ 380 */
381 int can_request_irq(unsigned int irq, unsigned long irqflags) 381 int can_request_irq(unsigned int irq, unsigned long irqflags)
382 { 382 {
383 struct irq_desc *desc = irq_to_desc(irq); 383 struct irq_desc *desc = irq_to_desc(irq);
384 struct irqaction *action; 384 struct irqaction *action;
385 unsigned long flags;
385 386
386 if (!desc) 387 if (!desc)
387 return 0; 388 return 0;
388 389
389 if (desc->status & IRQ_NOREQUEST) 390 if (desc->status & IRQ_NOREQUEST)
390 return 0; 391 return 0;
391 392
393 raw_spin_lock_irqsave(&desc->lock, flags);
392 action = desc->action; 394 action = desc->action;
393 if (action) 395 if (action)
394 if (irqflags & action->flags & IRQF_SHARED) 396 if (irqflags & action->flags & IRQF_SHARED)
395 action = NULL; 397 action = NULL;
398
399 raw_spin_unlock_irqrestore(&desc->lock, flags);
396 400
397 return !action; 401 return !action;
398 } 402 }
399 403
400 void compat_irq_chip_set_default_handler(struct irq_desc *desc) 404 void compat_irq_chip_set_default_handler(struct irq_desc *desc)
401 { 405 {
402 /* 406 /*
403 * If the architecture still has not overriden 407 * If the architecture still has not overriden
404 * the flow handler then zap the default. This 408 * the flow handler then zap the default. This
405 * should catch incorrect flow-type setting. 409 * should catch incorrect flow-type setting.
406 */ 410 */
407 if (desc->handle_irq == &handle_bad_irq) 411 if (desc->handle_irq == &handle_bad_irq)
408 desc->handle_irq = NULL; 412 desc->handle_irq = NULL;
409 } 413 }
410 414
411 int __irq_set_trigger(struct irq_desc *desc, unsigned int irq, 415 int __irq_set_trigger(struct irq_desc *desc, unsigned int irq,
412 unsigned long flags) 416 unsigned long flags)
413 { 417 {
414 int ret; 418 int ret;
415 struct irq_chip *chip = desc->chip; 419 struct irq_chip *chip = desc->chip;
416 420
417 if (!chip || !chip->set_type) { 421 if (!chip || !chip->set_type) {
418 /* 422 /*
419 * IRQF_TRIGGER_* but the PIC does not support multiple 423 * IRQF_TRIGGER_* but the PIC does not support multiple
420 * flow-types? 424 * flow-types?
421 */ 425 */
422 pr_debug("No set_type function for IRQ %d (%s)\n", irq, 426 pr_debug("No set_type function for IRQ %d (%s)\n", irq,
423 chip ? (chip->name ? : "unknown") : "unknown"); 427 chip ? (chip->name ? : "unknown") : "unknown");
424 return 0; 428 return 0;
425 } 429 }
426 430
427 /* caller masked out all except trigger mode flags */ 431 /* caller masked out all except trigger mode flags */
428 ret = chip->set_type(irq, flags); 432 ret = chip->set_type(irq, flags);
429 433
430 if (ret) 434 if (ret)
431 pr_err("setting trigger mode %d for irq %u failed (%pF)\n", 435 pr_err("setting trigger mode %d for irq %u failed (%pF)\n",
432 (int)flags, irq, chip->set_type); 436 (int)flags, irq, chip->set_type);
433 else { 437 else {
434 if (flags & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH)) 438 if (flags & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH))
435 flags |= IRQ_LEVEL; 439 flags |= IRQ_LEVEL;
436 /* note that IRQF_TRIGGER_MASK == IRQ_TYPE_SENSE_MASK */ 440 /* note that IRQF_TRIGGER_MASK == IRQ_TYPE_SENSE_MASK */
437 desc->status &= ~(IRQ_LEVEL | IRQ_TYPE_SENSE_MASK); 441 desc->status &= ~(IRQ_LEVEL | IRQ_TYPE_SENSE_MASK);
438 desc->status |= flags; 442 desc->status |= flags;
439 } 443 }
440 444
441 return ret; 445 return ret;
442 } 446 }
443 447
444 /* 448 /*
445 * Default primary interrupt handler for threaded interrupts. Is 449 * Default primary interrupt handler for threaded interrupts. Is
446 * assigned as primary handler when request_threaded_irq is called 450 * assigned as primary handler when request_threaded_irq is called
447 * with handler == NULL. Useful for oneshot interrupts. 451 * with handler == NULL. Useful for oneshot interrupts.
448 */ 452 */
449 static irqreturn_t irq_default_primary_handler(int irq, void *dev_id) 453 static irqreturn_t irq_default_primary_handler(int irq, void *dev_id)
450 { 454 {
451 return IRQ_WAKE_THREAD; 455 return IRQ_WAKE_THREAD;
452 } 456 }
453 457
454 /* 458 /*
455 * Primary handler for nested threaded interrupts. Should never be 459 * Primary handler for nested threaded interrupts. Should never be
456 * called. 460 * called.
457 */ 461 */
458 static irqreturn_t irq_nested_primary_handler(int irq, void *dev_id) 462 static irqreturn_t irq_nested_primary_handler(int irq, void *dev_id)
459 { 463 {
460 WARN(1, "Primary handler called for nested irq %d\n", irq); 464 WARN(1, "Primary handler called for nested irq %d\n", irq);
461 return IRQ_NONE; 465 return IRQ_NONE;
462 } 466 }
463 467
464 static int irq_wait_for_interrupt(struct irqaction *action) 468 static int irq_wait_for_interrupt(struct irqaction *action)
465 { 469 {
466 while (!kthread_should_stop()) { 470 while (!kthread_should_stop()) {
467 set_current_state(TASK_INTERRUPTIBLE); 471 set_current_state(TASK_INTERRUPTIBLE);
468 472
469 if (test_and_clear_bit(IRQTF_RUNTHREAD, 473 if (test_and_clear_bit(IRQTF_RUNTHREAD,
470 &action->thread_flags)) { 474 &action->thread_flags)) {
471 __set_current_state(TASK_RUNNING); 475 __set_current_state(TASK_RUNNING);
472 return 0; 476 return 0;
473 } 477 }
474 schedule(); 478 schedule();
475 } 479 }
476 return -1; 480 return -1;
477 } 481 }
478 482
479 /* 483 /*
480 * Oneshot interrupts keep the irq line masked until the threaded 484 * Oneshot interrupts keep the irq line masked until the threaded
481 * handler finished. unmask if the interrupt has not been disabled and 485 * handler finished. unmask if the interrupt has not been disabled and
482 * is marked MASKED. 486 * is marked MASKED.
483 */ 487 */
484 static void irq_finalize_oneshot(unsigned int irq, struct irq_desc *desc) 488 static void irq_finalize_oneshot(unsigned int irq, struct irq_desc *desc)
485 { 489 {
486 again: 490 again:
487 chip_bus_lock(irq, desc); 491 chip_bus_lock(irq, desc);
488 raw_spin_lock_irq(&desc->lock); 492 raw_spin_lock_irq(&desc->lock);
489 493
490 /* 494 /*
491 * Implausible though it may be we need to protect us against 495 * Implausible though it may be we need to protect us against
492 * the following scenario: 496 * the following scenario:
493 * 497 *
494 * The thread is faster done than the hard interrupt handler 498 * The thread is faster done than the hard interrupt handler
495 * on the other CPU. If we unmask the irq line then the 499 * on the other CPU. If we unmask the irq line then the
496 * interrupt can come in again and masks the line, leaves due 500 * interrupt can come in again and masks the line, leaves due
497 * to IRQ_INPROGRESS and the irq line is masked forever. 501 * to IRQ_INPROGRESS and the irq line is masked forever.
498 */ 502 */
499 if (unlikely(desc->status & IRQ_INPROGRESS)) { 503 if (unlikely(desc->status & IRQ_INPROGRESS)) {
500 raw_spin_unlock_irq(&desc->lock); 504 raw_spin_unlock_irq(&desc->lock);
501 chip_bus_sync_unlock(irq, desc); 505 chip_bus_sync_unlock(irq, desc);
502 cpu_relax(); 506 cpu_relax();
503 goto again; 507 goto again;
504 } 508 }
505 509
506 if (!(desc->status & IRQ_DISABLED) && (desc->status & IRQ_MASKED)) { 510 if (!(desc->status & IRQ_DISABLED) && (desc->status & IRQ_MASKED)) {
507 desc->status &= ~IRQ_MASKED; 511 desc->status &= ~IRQ_MASKED;
508 desc->chip->unmask(irq); 512 desc->chip->unmask(irq);
509 } 513 }
510 raw_spin_unlock_irq(&desc->lock); 514 raw_spin_unlock_irq(&desc->lock);
511 chip_bus_sync_unlock(irq, desc); 515 chip_bus_sync_unlock(irq, desc);
512 } 516 }
513 517
514 #ifdef CONFIG_SMP 518 #ifdef CONFIG_SMP
515 /* 519 /*
516 * Check whether we need to change the affinity of the interrupt thread. 520 * Check whether we need to change the affinity of the interrupt thread.
517 */ 521 */
518 static void 522 static void
519 irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) 523 irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action)
520 { 524 {
521 cpumask_var_t mask; 525 cpumask_var_t mask;
522 526
523 if (!test_and_clear_bit(IRQTF_AFFINITY, &action->thread_flags)) 527 if (!test_and_clear_bit(IRQTF_AFFINITY, &action->thread_flags))
524 return; 528 return;
525 529
526 /* 530 /*
527 * In case we are out of memory we set IRQTF_AFFINITY again and 531 * In case we are out of memory we set IRQTF_AFFINITY again and
528 * try again next time 532 * try again next time
529 */ 533 */
530 if (!alloc_cpumask_var(&mask, GFP_KERNEL)) { 534 if (!alloc_cpumask_var(&mask, GFP_KERNEL)) {
531 set_bit(IRQTF_AFFINITY, &action->thread_flags); 535 set_bit(IRQTF_AFFINITY, &action->thread_flags);
532 return; 536 return;
533 } 537 }
534 538
535 raw_spin_lock_irq(&desc->lock); 539 raw_spin_lock_irq(&desc->lock);
536 cpumask_copy(mask, desc->affinity); 540 cpumask_copy(mask, desc->affinity);
537 raw_spin_unlock_irq(&desc->lock); 541 raw_spin_unlock_irq(&desc->lock);
538 542
539 set_cpus_allowed_ptr(current, mask); 543 set_cpus_allowed_ptr(current, mask);
540 free_cpumask_var(mask); 544 free_cpumask_var(mask);
541 } 545 }
542 #else 546 #else
543 static inline void 547 static inline void
544 irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) { } 548 irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) { }
545 #endif 549 #endif
546 550
547 /* 551 /*
548 * Interrupt handler thread 552 * Interrupt handler thread
549 */ 553 */
550 static int irq_thread(void *data) 554 static int irq_thread(void *data)
551 { 555 {
552 struct sched_param param = { .sched_priority = MAX_USER_RT_PRIO/2, }; 556 struct sched_param param = { .sched_priority = MAX_USER_RT_PRIO/2, };
553 struct irqaction *action = data; 557 struct irqaction *action = data;
554 struct irq_desc *desc = irq_to_desc(action->irq); 558 struct irq_desc *desc = irq_to_desc(action->irq);
555 int wake, oneshot = desc->status & IRQ_ONESHOT; 559 int wake, oneshot = desc->status & IRQ_ONESHOT;
556 560
557 sched_setscheduler(current, SCHED_FIFO, &param); 561 sched_setscheduler(current, SCHED_FIFO, &param);
558 current->irqaction = action; 562 current->irqaction = action;
559 563
560 while (!irq_wait_for_interrupt(action)) { 564 while (!irq_wait_for_interrupt(action)) {
561 565
562 irq_thread_check_affinity(desc, action); 566 irq_thread_check_affinity(desc, action);
563 567
564 atomic_inc(&desc->threads_active); 568 atomic_inc(&desc->threads_active);
565 569
566 raw_spin_lock_irq(&desc->lock); 570 raw_spin_lock_irq(&desc->lock);
567 if (unlikely(desc->status & IRQ_DISABLED)) { 571 if (unlikely(desc->status & IRQ_DISABLED)) {
568 /* 572 /*
569 * CHECKME: We might need a dedicated 573 * CHECKME: We might need a dedicated
570 * IRQ_THREAD_PENDING flag here, which 574 * IRQ_THREAD_PENDING flag here, which
571 * retriggers the thread in check_irq_resend() 575 * retriggers the thread in check_irq_resend()
572 * but AFAICT IRQ_PENDING should be fine as it 576 * but AFAICT IRQ_PENDING should be fine as it
573 * retriggers the interrupt itself --- tglx 577 * retriggers the interrupt itself --- tglx
574 */ 578 */
575 desc->status |= IRQ_PENDING; 579 desc->status |= IRQ_PENDING;
576 raw_spin_unlock_irq(&desc->lock); 580 raw_spin_unlock_irq(&desc->lock);
577 } else { 581 } else {
578 raw_spin_unlock_irq(&desc->lock); 582 raw_spin_unlock_irq(&desc->lock);
579 583
580 action->thread_fn(action->irq, action->dev_id); 584 action->thread_fn(action->irq, action->dev_id);
581 585
582 if (oneshot) 586 if (oneshot)
583 irq_finalize_oneshot(action->irq, desc); 587 irq_finalize_oneshot(action->irq, desc);
584 } 588 }
585 589
586 wake = atomic_dec_and_test(&desc->threads_active); 590 wake = atomic_dec_and_test(&desc->threads_active);
587 591
588 if (wake && waitqueue_active(&desc->wait_for_threads)) 592 if (wake && waitqueue_active(&desc->wait_for_threads))
589 wake_up(&desc->wait_for_threads); 593 wake_up(&desc->wait_for_threads);
590 } 594 }
591 595
592 /* 596 /*
593 * Clear irqaction. Otherwise exit_irq_thread() would make 597 * Clear irqaction. Otherwise exit_irq_thread() would make
594 * fuzz about an active irq thread going into nirvana. 598 * fuzz about an active irq thread going into nirvana.
595 */ 599 */
596 current->irqaction = NULL; 600 current->irqaction = NULL;
597 return 0; 601 return 0;
598 } 602 }
599 603
600 /* 604 /*
601 * Called from do_exit() 605 * Called from do_exit()
602 */ 606 */
603 void exit_irq_thread(void) 607 void exit_irq_thread(void)
604 { 608 {
605 struct task_struct *tsk = current; 609 struct task_struct *tsk = current;
606 610
607 if (!tsk->irqaction) 611 if (!tsk->irqaction)
608 return; 612 return;
609 613
610 printk(KERN_ERR 614 printk(KERN_ERR
611 "exiting task \"%s\" (%d) is an active IRQ thread (irq %d)\n", 615 "exiting task \"%s\" (%d) is an active IRQ thread (irq %d)\n",
612 tsk->comm ? tsk->comm : "", tsk->pid, tsk->irqaction->irq); 616 tsk->comm ? tsk->comm : "", tsk->pid, tsk->irqaction->irq);
613 617
614 /* 618 /*
615 * Set the THREAD DIED flag to prevent further wakeups of the 619 * Set the THREAD DIED flag to prevent further wakeups of the
616 * soon to be gone threaded handler. 620 * soon to be gone threaded handler.
617 */ 621 */
618 set_bit(IRQTF_DIED, &tsk->irqaction->flags); 622 set_bit(IRQTF_DIED, &tsk->irqaction->flags);
619 } 623 }
620 624
621 /* 625 /*
622 * Internal function to register an irqaction - typically used to 626 * Internal function to register an irqaction - typically used to
623 * allocate special interrupts that are part of the architecture. 627 * allocate special interrupts that are part of the architecture.
624 */ 628 */
625 static int 629 static int
626 __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new) 630 __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
627 { 631 {
628 struct irqaction *old, **old_ptr; 632 struct irqaction *old, **old_ptr;
629 const char *old_name = NULL; 633 const char *old_name = NULL;
630 unsigned long flags; 634 unsigned long flags;
631 int nested, shared = 0; 635 int nested, shared = 0;
632 int ret; 636 int ret;
633 637
634 if (!desc) 638 if (!desc)
635 return -EINVAL; 639 return -EINVAL;
636 640
637 if (desc->chip == &no_irq_chip) 641 if (desc->chip == &no_irq_chip)
638 return -ENOSYS; 642 return -ENOSYS;
639 /* 643 /*
640 * Some drivers like serial.c use request_irq() heavily, 644 * Some drivers like serial.c use request_irq() heavily,
641 * so we have to be careful not to interfere with a 645 * so we have to be careful not to interfere with a
642 * running system. 646 * running system.
643 */ 647 */
644 if (new->flags & IRQF_SAMPLE_RANDOM) { 648 if (new->flags & IRQF_SAMPLE_RANDOM) {
645 /* 649 /*
646 * This function might sleep, we want to call it first, 650 * This function might sleep, we want to call it first,
647 * outside of the atomic block. 651 * outside of the atomic block.
648 * Yes, this might clear the entropy pool if the wrong 652 * Yes, this might clear the entropy pool if the wrong
649 * driver is attempted to be loaded, without actually 653 * driver is attempted to be loaded, without actually
650 * installing a new handler, but is this really a problem, 654 * installing a new handler, but is this really a problem,
651 * only the sysadmin is able to do this. 655 * only the sysadmin is able to do this.
652 */ 656 */
653 rand_initialize_irq(irq); 657 rand_initialize_irq(irq);
654 } 658 }
655 659
656 /* Oneshot interrupts are not allowed with shared */ 660 /* Oneshot interrupts are not allowed with shared */
657 if ((new->flags & IRQF_ONESHOT) && (new->flags & IRQF_SHARED)) 661 if ((new->flags & IRQF_ONESHOT) && (new->flags & IRQF_SHARED))
658 return -EINVAL; 662 return -EINVAL;
659 663
660 /* 664 /*
661 * Check whether the interrupt nests into another interrupt 665 * Check whether the interrupt nests into another interrupt
662 * thread. 666 * thread.
663 */ 667 */
664 nested = desc->status & IRQ_NESTED_THREAD; 668 nested = desc->status & IRQ_NESTED_THREAD;
665 if (nested) { 669 if (nested) {
666 if (!new->thread_fn) 670 if (!new->thread_fn)
667 return -EINVAL; 671 return -EINVAL;
668 /* 672 /*
669 * Replace the primary handler which was provided from 673 * Replace the primary handler which was provided from
670 * the driver for non nested interrupt handling by the 674 * the driver for non nested interrupt handling by the
671 * dummy function which warns when called. 675 * dummy function which warns when called.
672 */ 676 */
673 new->handler = irq_nested_primary_handler; 677 new->handler = irq_nested_primary_handler;
674 } 678 }
675 679
676 /* 680 /*
677 * Create a handler thread when a thread function is supplied 681 * Create a handler thread when a thread function is supplied
678 * and the interrupt does not nest into another interrupt 682 * and the interrupt does not nest into another interrupt
679 * thread. 683 * thread.
680 */ 684 */
681 if (new->thread_fn && !nested) { 685 if (new->thread_fn && !nested) {
682 struct task_struct *t; 686 struct task_struct *t;
683 687
684 t = kthread_create(irq_thread, new, "irq/%d-%s", irq, 688 t = kthread_create(irq_thread, new, "irq/%d-%s", irq,
685 new->name); 689 new->name);
686 if (IS_ERR(t)) 690 if (IS_ERR(t))
687 return PTR_ERR(t); 691 return PTR_ERR(t);
688 /* 692 /*
689 * We keep the reference to the task struct even if 693 * We keep the reference to the task struct even if
690 * the thread dies to avoid that the interrupt code 694 * the thread dies to avoid that the interrupt code
691 * references an already freed task_struct. 695 * references an already freed task_struct.
692 */ 696 */
693 get_task_struct(t); 697 get_task_struct(t);
694 new->thread = t; 698 new->thread = t;
695 } 699 }
696 700
697 /* 701 /*
698 * The following block of code has to be executed atomically 702 * The following block of code has to be executed atomically
699 */ 703 */
700 raw_spin_lock_irqsave(&desc->lock, flags); 704 raw_spin_lock_irqsave(&desc->lock, flags);
701 old_ptr = &desc->action; 705 old_ptr = &desc->action;
702 old = *old_ptr; 706 old = *old_ptr;
703 if (old) { 707 if (old) {
704 /* 708 /*
705 * Can't share interrupts unless both agree to and are 709 * Can't share interrupts unless both agree to and are
706 * the same type (level, edge, polarity). So both flag 710 * the same type (level, edge, polarity). So both flag
707 * fields must have IRQF_SHARED set and the bits which 711 * fields must have IRQF_SHARED set and the bits which
708 * set the trigger type must match. 712 * set the trigger type must match.
709 */ 713 */
710 if (!((old->flags & new->flags) & IRQF_SHARED) || 714 if (!((old->flags & new->flags) & IRQF_SHARED) ||
711 ((old->flags ^ new->flags) & IRQF_TRIGGER_MASK)) { 715 ((old->flags ^ new->flags) & IRQF_TRIGGER_MASK)) {
712 old_name = old->name; 716 old_name = old->name;
713 goto mismatch; 717 goto mismatch;
714 } 718 }
715 719
716 #if defined(CONFIG_IRQ_PER_CPU) 720 #if defined(CONFIG_IRQ_PER_CPU)
717 /* All handlers must agree on per-cpuness */ 721 /* All handlers must agree on per-cpuness */
718 if ((old->flags & IRQF_PERCPU) != 722 if ((old->flags & IRQF_PERCPU) !=
719 (new->flags & IRQF_PERCPU)) 723 (new->flags & IRQF_PERCPU))
720 goto mismatch; 724 goto mismatch;
721 #endif 725 #endif
722 726
723 /* add new interrupt at end of irq queue */ 727 /* add new interrupt at end of irq queue */
724 do { 728 do {
725 old_ptr = &old->next; 729 old_ptr = &old->next;
726 old = *old_ptr; 730 old = *old_ptr;
727 } while (old); 731 } while (old);
728 shared = 1; 732 shared = 1;
729 } 733 }
730 734
731 if (!shared) { 735 if (!shared) {
732 irq_chip_set_defaults(desc->chip); 736 irq_chip_set_defaults(desc->chip);
733 737
734 init_waitqueue_head(&desc->wait_for_threads); 738 init_waitqueue_head(&desc->wait_for_threads);
735 739
736 /* Setup the type (level, edge polarity) if configured: */ 740 /* Setup the type (level, edge polarity) if configured: */
737 if (new->flags & IRQF_TRIGGER_MASK) { 741 if (new->flags & IRQF_TRIGGER_MASK) {
738 ret = __irq_set_trigger(desc, irq, 742 ret = __irq_set_trigger(desc, irq,
739 new->flags & IRQF_TRIGGER_MASK); 743 new->flags & IRQF_TRIGGER_MASK);
740 744
741 if (ret) 745 if (ret)
742 goto out_thread; 746 goto out_thread;
743 } else 747 } else
744 compat_irq_chip_set_default_handler(desc); 748 compat_irq_chip_set_default_handler(desc);
745 #if defined(CONFIG_IRQ_PER_CPU) 749 #if defined(CONFIG_IRQ_PER_CPU)
746 if (new->flags & IRQF_PERCPU) 750 if (new->flags & IRQF_PERCPU)
747 desc->status |= IRQ_PER_CPU; 751 desc->status |= IRQ_PER_CPU;
748 #endif 752 #endif
749 753
750 desc->status &= ~(IRQ_AUTODETECT | IRQ_WAITING | IRQ_ONESHOT | 754 desc->status &= ~(IRQ_AUTODETECT | IRQ_WAITING | IRQ_ONESHOT |
751 IRQ_INPROGRESS | IRQ_SPURIOUS_DISABLED); 755 IRQ_INPROGRESS | IRQ_SPURIOUS_DISABLED);
752 756
753 if (new->flags & IRQF_ONESHOT) 757 if (new->flags & IRQF_ONESHOT)
754 desc->status |= IRQ_ONESHOT; 758 desc->status |= IRQ_ONESHOT;
755 759
756 if (!(desc->status & IRQ_NOAUTOEN)) { 760 if (!(desc->status & IRQ_NOAUTOEN)) {
757 desc->depth = 0; 761 desc->depth = 0;
758 desc->status &= ~IRQ_DISABLED; 762 desc->status &= ~IRQ_DISABLED;
759 desc->chip->startup(irq); 763 desc->chip->startup(irq);
760 } else 764 } else
761 /* Undo nested disables: */ 765 /* Undo nested disables: */
762 desc->depth = 1; 766 desc->depth = 1;
763 767
764 /* Exclude IRQ from balancing if requested */ 768 /* Exclude IRQ from balancing if requested */
765 if (new->flags & IRQF_NOBALANCING) 769 if (new->flags & IRQF_NOBALANCING)
766 desc->status |= IRQ_NO_BALANCING; 770 desc->status |= IRQ_NO_BALANCING;
767 771
768 /* Set default affinity mask once everything is setup */ 772 /* Set default affinity mask once everything is setup */
769 setup_affinity(irq, desc); 773 setup_affinity(irq, desc);
770 774
771 } else if ((new->flags & IRQF_TRIGGER_MASK) 775 } else if ((new->flags & IRQF_TRIGGER_MASK)
772 && (new->flags & IRQF_TRIGGER_MASK) 776 && (new->flags & IRQF_TRIGGER_MASK)
773 != (desc->status & IRQ_TYPE_SENSE_MASK)) { 777 != (desc->status & IRQ_TYPE_SENSE_MASK)) {
774 /* hope the handler works with the actual trigger mode... */ 778 /* hope the handler works with the actual trigger mode... */
775 pr_warning("IRQ %d uses trigger mode %d; requested %d\n", 779 pr_warning("IRQ %d uses trigger mode %d; requested %d\n",
776 irq, (int)(desc->status & IRQ_TYPE_SENSE_MASK), 780 irq, (int)(desc->status & IRQ_TYPE_SENSE_MASK),
777 (int)(new->flags & IRQF_TRIGGER_MASK)); 781 (int)(new->flags & IRQF_TRIGGER_MASK));
778 } 782 }
779 783
780 new->irq = irq; 784 new->irq = irq;
781 *old_ptr = new; 785 *old_ptr = new;
782 786
783 /* Reset broken irq detection when installing new handler */ 787 /* Reset broken irq detection when installing new handler */
784 desc->irq_count = 0; 788 desc->irq_count = 0;
785 desc->irqs_unhandled = 0; 789 desc->irqs_unhandled = 0;
786 790
787 /* 791 /*
788 * Check whether we disabled the irq via the spurious handler 792 * Check whether we disabled the irq via the spurious handler
789 * before. Reenable it and give it another chance. 793 * before. Reenable it and give it another chance.
790 */ 794 */
791 if (shared && (desc->status & IRQ_SPURIOUS_DISABLED)) { 795 if (shared && (desc->status & IRQ_SPURIOUS_DISABLED)) {
792 desc->status &= ~IRQ_SPURIOUS_DISABLED; 796 desc->status &= ~IRQ_SPURIOUS_DISABLED;
793 __enable_irq(desc, irq, false); 797 __enable_irq(desc, irq, false);
794 } 798 }
795 799
796 raw_spin_unlock_irqrestore(&desc->lock, flags); 800 raw_spin_unlock_irqrestore(&desc->lock, flags);
797 801
798 /* 802 /*
799 * Strictly no need to wake it up, but hung_task complains 803 * Strictly no need to wake it up, but hung_task complains
800 * when no hard interrupt wakes the thread up. 804 * when no hard interrupt wakes the thread up.
801 */ 805 */
802 if (new->thread) 806 if (new->thread)
803 wake_up_process(new->thread); 807 wake_up_process(new->thread);
804 808
805 register_irq_proc(irq, desc); 809 register_irq_proc(irq, desc);
806 new->dir = NULL; 810 new->dir = NULL;
807 register_handler_proc(irq, new); 811 register_handler_proc(irq, new);
808 812
809 return 0; 813 return 0;
810 814
811 mismatch: 815 mismatch:
812 #ifdef CONFIG_DEBUG_SHIRQ 816 #ifdef CONFIG_DEBUG_SHIRQ
813 if (!(new->flags & IRQF_PROBE_SHARED)) { 817 if (!(new->flags & IRQF_PROBE_SHARED)) {
814 printk(KERN_ERR "IRQ handler type mismatch for IRQ %d\n", irq); 818 printk(KERN_ERR "IRQ handler type mismatch for IRQ %d\n", irq);
815 if (old_name) 819 if (old_name)
816 printk(KERN_ERR "current handler: %s\n", old_name); 820 printk(KERN_ERR "current handler: %s\n", old_name);
817 dump_stack(); 821 dump_stack();
818 } 822 }
819 #endif 823 #endif
820 ret = -EBUSY; 824 ret = -EBUSY;
821 825
822 out_thread: 826 out_thread:
823 raw_spin_unlock_irqrestore(&desc->lock, flags); 827 raw_spin_unlock_irqrestore(&desc->lock, flags);
824 if (new->thread) { 828 if (new->thread) {
825 struct task_struct *t = new->thread; 829 struct task_struct *t = new->thread;
826 830
827 new->thread = NULL; 831 new->thread = NULL;
828 if (likely(!test_bit(IRQTF_DIED, &new->thread_flags))) 832 if (likely(!test_bit(IRQTF_DIED, &new->thread_flags)))
829 kthread_stop(t); 833 kthread_stop(t);
830 put_task_struct(t); 834 put_task_struct(t);
831 } 835 }
832 return ret; 836 return ret;
833 } 837 }
834 838
835 /** 839 /**
836 * setup_irq - setup an interrupt 840 * setup_irq - setup an interrupt
837 * @irq: Interrupt line to setup 841 * @irq: Interrupt line to setup
838 * @act: irqaction for the interrupt 842 * @act: irqaction for the interrupt
839 * 843 *
840 * Used to statically setup interrupts in the early boot process. 844 * Used to statically setup interrupts in the early boot process.
841 */ 845 */
842 int setup_irq(unsigned int irq, struct irqaction *act) 846 int setup_irq(unsigned int irq, struct irqaction *act)
843 { 847 {
844 struct irq_desc *desc = irq_to_desc(irq); 848 struct irq_desc *desc = irq_to_desc(irq);
845 849
846 return __setup_irq(irq, desc, act); 850 return __setup_irq(irq, desc, act);
847 } 851 }
848 EXPORT_SYMBOL_GPL(setup_irq); 852 EXPORT_SYMBOL_GPL(setup_irq);
849 853
850 /* 854 /*
851 * Internal function to unregister an irqaction - used to free 855 * Internal function to unregister an irqaction - used to free
852 * regular and special interrupts that are part of the architecture. 856 * regular and special interrupts that are part of the architecture.
853 */ 857 */
854 static struct irqaction *__free_irq(unsigned int irq, void *dev_id) 858 static struct irqaction *__free_irq(unsigned int irq, void *dev_id)
855 { 859 {
856 struct irq_desc *desc = irq_to_desc(irq); 860 struct irq_desc *desc = irq_to_desc(irq);
857 struct irqaction *action, **action_ptr; 861 struct irqaction *action, **action_ptr;
858 unsigned long flags; 862 unsigned long flags;
859 863
860 WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq); 864 WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq);
861 865
862 if (!desc) 866 if (!desc)
863 return NULL; 867 return NULL;
864 868
865 raw_spin_lock_irqsave(&desc->lock, flags); 869 raw_spin_lock_irqsave(&desc->lock, flags);
866 870
867 /* 871 /*
868 * There can be multiple actions per IRQ descriptor, find the right 872 * There can be multiple actions per IRQ descriptor, find the right
869 * one based on the dev_id: 873 * one based on the dev_id:
870 */ 874 */
871 action_ptr = &desc->action; 875 action_ptr = &desc->action;
872 for (;;) { 876 for (;;) {
873 action = *action_ptr; 877 action = *action_ptr;
874 878
875 if (!action) { 879 if (!action) {
876 WARN(1, "Trying to free already-free IRQ %d\n", irq); 880 WARN(1, "Trying to free already-free IRQ %d\n", irq);
877 raw_spin_unlock_irqrestore(&desc->lock, flags); 881 raw_spin_unlock_irqrestore(&desc->lock, flags);
878 882
879 return NULL; 883 return NULL;
880 } 884 }
881 885
882 if (action->dev_id == dev_id) 886 if (action->dev_id == dev_id)
883 break; 887 break;
884 action_ptr = &action->next; 888 action_ptr = &action->next;
885 } 889 }
886 890
887 /* Found it - now remove it from the list of entries: */ 891 /* Found it - now remove it from the list of entries: */
888 *action_ptr = action->next; 892 *action_ptr = action->next;
889 893
890 /* Currently used only by UML, might disappear one day: */ 894 /* Currently used only by UML, might disappear one day: */
891 #ifdef CONFIG_IRQ_RELEASE_METHOD 895 #ifdef CONFIG_IRQ_RELEASE_METHOD
892 if (desc->chip->release) 896 if (desc->chip->release)
893 desc->chip->release(irq, dev_id); 897 desc->chip->release(irq, dev_id);
894 #endif 898 #endif
895 899
896 /* If this was the last handler, shut down the IRQ line: */ 900 /* If this was the last handler, shut down the IRQ line: */
897 if (!desc->action) { 901 if (!desc->action) {
898 desc->status |= IRQ_DISABLED; 902 desc->status |= IRQ_DISABLED;
899 if (desc->chip->shutdown) 903 if (desc->chip->shutdown)
900 desc->chip->shutdown(irq); 904 desc->chip->shutdown(irq);
901 else 905 else
902 desc->chip->disable(irq); 906 desc->chip->disable(irq);
903 } 907 }
904 908
905 raw_spin_unlock_irqrestore(&desc->lock, flags); 909 raw_spin_unlock_irqrestore(&desc->lock, flags);
906 910
907 unregister_handler_proc(irq, action); 911 unregister_handler_proc(irq, action);
908 912
909 /* Make sure it's not being used on another CPU: */ 913 /* Make sure it's not being used on another CPU: */
910 synchronize_irq(irq); 914 synchronize_irq(irq);
911 915
912 #ifdef CONFIG_DEBUG_SHIRQ 916 #ifdef CONFIG_DEBUG_SHIRQ
913 /* 917 /*
914 * It's a shared IRQ -- the driver ought to be prepared for an IRQ 918 * It's a shared IRQ -- the driver ought to be prepared for an IRQ
915 * event to happen even now it's being freed, so let's make sure that 919 * event to happen even now it's being freed, so let's make sure that
916 * is so by doing an extra call to the handler .... 920 * is so by doing an extra call to the handler ....
917 * 921 *
918 * ( We do this after actually deregistering it, to make sure that a 922 * ( We do this after actually deregistering it, to make sure that a
919 * 'real' IRQ doesn't run in * parallel with our fake. ) 923 * 'real' IRQ doesn't run in * parallel with our fake. )
920 */ 924 */
921 if (action->flags & IRQF_SHARED) { 925 if (action->flags & IRQF_SHARED) {
922 local_irq_save(flags); 926 local_irq_save(flags);
923 action->handler(irq, dev_id); 927 action->handler(irq, dev_id);
924 local_irq_restore(flags); 928 local_irq_restore(flags);
925 } 929 }
926 #endif 930 #endif
927 931
928 if (action->thread) { 932 if (action->thread) {
929 if (!test_bit(IRQTF_DIED, &action->thread_flags)) 933 if (!test_bit(IRQTF_DIED, &action->thread_flags))
930 kthread_stop(action->thread); 934 kthread_stop(action->thread);
931 put_task_struct(action->thread); 935 put_task_struct(action->thread);
932 } 936 }
933 937
934 return action; 938 return action;
935 } 939 }
936 940
937 /** 941 /**
938 * remove_irq - free an interrupt 942 * remove_irq - free an interrupt
939 * @irq: Interrupt line to free 943 * @irq: Interrupt line to free
940 * @act: irqaction for the interrupt 944 * @act: irqaction for the interrupt
941 * 945 *
942 * Used to remove interrupts statically setup by the early boot process. 946 * Used to remove interrupts statically setup by the early boot process.
943 */ 947 */
944 void remove_irq(unsigned int irq, struct irqaction *act) 948 void remove_irq(unsigned int irq, struct irqaction *act)
945 { 949 {
946 __free_irq(irq, act->dev_id); 950 __free_irq(irq, act->dev_id);
947 } 951 }
948 EXPORT_SYMBOL_GPL(remove_irq); 952 EXPORT_SYMBOL_GPL(remove_irq);
949 953
950 /** 954 /**
951 * free_irq - free an interrupt allocated with request_irq 955 * free_irq - free an interrupt allocated with request_irq
952 * @irq: Interrupt line to free 956 * @irq: Interrupt line to free
953 * @dev_id: Device identity to free 957 * @dev_id: Device identity to free
954 * 958 *
955 * Remove an interrupt handler. The handler is removed and if the 959 * Remove an interrupt handler. The handler is removed and if the
956 * interrupt line is no longer in use by any driver it is disabled. 960 * interrupt line is no longer in use by any driver it is disabled.
957 * On a shared IRQ the caller must ensure the interrupt is disabled 961 * On a shared IRQ the caller must ensure the interrupt is disabled
958 * on the card it drives before calling this function. The function 962 * on the card it drives before calling this function. The function
959 * does not return until any executing interrupts for this IRQ 963 * does not return until any executing interrupts for this IRQ
960 * have completed. 964 * have completed.
961 * 965 *
962 * This function must not be called from interrupt context. 966 * This function must not be called from interrupt context.
963 */ 967 */
964 void free_irq(unsigned int irq, void *dev_id) 968 void free_irq(unsigned int irq, void *dev_id)
965 { 969 {
966 struct irq_desc *desc = irq_to_desc(irq); 970 struct irq_desc *desc = irq_to_desc(irq);
967 971
968 if (!desc) 972 if (!desc)
969 return; 973 return;
970 974
971 chip_bus_lock(irq, desc); 975 chip_bus_lock(irq, desc);
972 kfree(__free_irq(irq, dev_id)); 976 kfree(__free_irq(irq, dev_id));
973 chip_bus_sync_unlock(irq, desc); 977 chip_bus_sync_unlock(irq, desc);
974 } 978 }
975 EXPORT_SYMBOL(free_irq); 979 EXPORT_SYMBOL(free_irq);
976 980
977 /** 981 /**
978 * request_threaded_irq - allocate an interrupt line 982 * request_threaded_irq - allocate an interrupt line
979 * @irq: Interrupt line to allocate 983 * @irq: Interrupt line to allocate
980 * @handler: Function to be called when the IRQ occurs. 984 * @handler: Function to be called when the IRQ occurs.
981 * Primary handler for threaded interrupts 985 * Primary handler for threaded interrupts
982 * If NULL and thread_fn != NULL the default 986 * If NULL and thread_fn != NULL the default
983 * primary handler is installed 987 * primary handler is installed
984 * @thread_fn: Function called from the irq handler thread 988 * @thread_fn: Function called from the irq handler thread
985 * If NULL, no irq thread is created 989 * If NULL, no irq thread is created
986 * @irqflags: Interrupt type flags 990 * @irqflags: Interrupt type flags
987 * @devname: An ascii name for the claiming device 991 * @devname: An ascii name for the claiming device
988 * @dev_id: A cookie passed back to the handler function 992 * @dev_id: A cookie passed back to the handler function
989 * 993 *
990 * This call allocates interrupt resources and enables the 994 * This call allocates interrupt resources and enables the
991 * interrupt line and IRQ handling. From the point this 995 * interrupt line and IRQ handling. From the point this
992 * call is made your handler function may be invoked. Since 996 * call is made your handler function may be invoked. Since
993 * your handler function must clear any interrupt the board 997 * your handler function must clear any interrupt the board
994 * raises, you must take care both to initialise your hardware 998 * raises, you must take care both to initialise your hardware
995 * and to set up the interrupt handler in the right order. 999 * and to set up the interrupt handler in the right order.
996 * 1000 *
997 * If you want to set up a threaded irq handler for your device 1001 * If you want to set up a threaded irq handler for your device
998 * then you need to supply @handler and @thread_fn. @handler ist 1002 * then you need to supply @handler and @thread_fn. @handler ist
999 * still called in hard interrupt context and has to check 1003 * still called in hard interrupt context and has to check
1000 * whether the interrupt originates from the device. If yes it 1004 * whether the interrupt originates from the device. If yes it
1001 * needs to disable the interrupt on the device and return 1005 * needs to disable the interrupt on the device and return
1002 * IRQ_WAKE_THREAD which will wake up the handler thread and run 1006 * IRQ_WAKE_THREAD which will wake up the handler thread and run
1003 * @thread_fn. This split handler design is necessary to support 1007 * @thread_fn. This split handler design is necessary to support
1004 * shared interrupts. 1008 * shared interrupts.
1005 * 1009 *
1006 * Dev_id must be globally unique. Normally the address of the 1010 * Dev_id must be globally unique. Normally the address of the
1007 * device data structure is used as the cookie. Since the handler 1011 * device data structure is used as the cookie. Since the handler
1008 * receives this value it makes sense to use it. 1012 * receives this value it makes sense to use it.
1009 * 1013 *
1010 * If your interrupt is shared you must pass a non NULL dev_id 1014 * If your interrupt is shared you must pass a non NULL dev_id
1011 * as this is required when freeing the interrupt. 1015 * as this is required when freeing the interrupt.
1012 * 1016 *
1013 * Flags: 1017 * Flags:
1014 * 1018 *
1015 * IRQF_SHARED Interrupt is shared 1019 * IRQF_SHARED Interrupt is shared
1016 * IRQF_DISABLED Disable local interrupts while processing 1020 * IRQF_DISABLED Disable local interrupts while processing
1017 * IRQF_SAMPLE_RANDOM The interrupt can be used for entropy 1021 * IRQF_SAMPLE_RANDOM The interrupt can be used for entropy
1018 * IRQF_TRIGGER_* Specify active edge(s) or level 1022 * IRQF_TRIGGER_* Specify active edge(s) or level
1019 * 1023 *
1020 */ 1024 */
1021 int request_threaded_irq(unsigned int irq, irq_handler_t handler, 1025 int request_threaded_irq(unsigned int irq, irq_handler_t handler,
1022 irq_handler_t thread_fn, unsigned long irqflags, 1026 irq_handler_t thread_fn, unsigned long irqflags,
1023 const char *devname, void *dev_id) 1027 const char *devname, void *dev_id)
1024 { 1028 {
1025 struct irqaction *action; 1029 struct irqaction *action;
1026 struct irq_desc *desc; 1030 struct irq_desc *desc;
1027 int retval; 1031 int retval;
1028 1032
1029 /* 1033 /*
1030 * handle_IRQ_event() always ignores IRQF_DISABLED except for 1034 * handle_IRQ_event() always ignores IRQF_DISABLED except for
1031 * the _first_ irqaction (sigh). That can cause oopsing, but 1035 * the _first_ irqaction (sigh). That can cause oopsing, but
1032 * the behavior is classified as "will not fix" so we need to 1036 * the behavior is classified as "will not fix" so we need to
1033 * start nudging drivers away from using that idiom. 1037 * start nudging drivers away from using that idiom.
1034 */ 1038 */
1035 if ((irqflags & (IRQF_SHARED|IRQF_DISABLED)) == 1039 if ((irqflags & (IRQF_SHARED|IRQF_DISABLED)) ==
1036 (IRQF_SHARED|IRQF_DISABLED)) { 1040 (IRQF_SHARED|IRQF_DISABLED)) {
1037 pr_warning( 1041 pr_warning(
1038 "IRQ %d/%s: IRQF_DISABLED is not guaranteed on shared IRQs\n", 1042 "IRQ %d/%s: IRQF_DISABLED is not guaranteed on shared IRQs\n",
1039 irq, devname); 1043 irq, devname);
1040 } 1044 }
1041 1045
1042 #ifdef CONFIG_LOCKDEP 1046 #ifdef CONFIG_LOCKDEP
1043 /* 1047 /*
1044 * Lockdep wants atomic interrupt handlers: 1048 * Lockdep wants atomic interrupt handlers:
1045 */ 1049 */
1046 irqflags |= IRQF_DISABLED; 1050 irqflags |= IRQF_DISABLED;
1047 #endif 1051 #endif
1048 /* 1052 /*
1049 * Sanity-check: shared interrupts must pass in a real dev-ID, 1053 * Sanity-check: shared interrupts must pass in a real dev-ID,
1050 * otherwise we'll have trouble later trying to figure out 1054 * otherwise we'll have trouble later trying to figure out
1051 * which interrupt is which (messes up the interrupt freeing 1055 * which interrupt is which (messes up the interrupt freeing
1052 * logic etc). 1056 * logic etc).
1053 */ 1057 */
1054 if ((irqflags & IRQF_SHARED) && !dev_id) 1058 if ((irqflags & IRQF_SHARED) && !dev_id)
1055 return -EINVAL; 1059 return -EINVAL;
1056 1060
1057 desc = irq_to_desc(irq); 1061 desc = irq_to_desc(irq);
1058 if (!desc) 1062 if (!desc)
1059 return -EINVAL; 1063 return -EINVAL;
1060 1064
1061 if (desc->status & IRQ_NOREQUEST) 1065 if (desc->status & IRQ_NOREQUEST)
1062 return -EINVAL; 1066 return -EINVAL;
1063 1067
1064 if (!handler) { 1068 if (!handler) {
1065 if (!thread_fn) 1069 if (!thread_fn)
1066 return -EINVAL; 1070 return -EINVAL;
1067 handler = irq_default_primary_handler; 1071 handler = irq_default_primary_handler;
1068 } 1072 }
1069 1073
1070 action = kzalloc(sizeof(struct irqaction), GFP_KERNEL); 1074 action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
1071 if (!action) 1075 if (!action)
1072 return -ENOMEM; 1076 return -ENOMEM;
1073 1077
1074 action->handler = handler; 1078 action->handler = handler;
1075 action->thread_fn = thread_fn; 1079 action->thread_fn = thread_fn;
1076 action->flags = irqflags; 1080 action->flags = irqflags;
1077 action->name = devname; 1081 action->name = devname;
1078 action->dev_id = dev_id; 1082 action->dev_id = dev_id;
1079 1083
1080 chip_bus_lock(irq, desc); 1084 chip_bus_lock(irq, desc);
1081 retval = __setup_irq(irq, desc, action); 1085 retval = __setup_irq(irq, desc, action);
1082 chip_bus_sync_unlock(irq, desc); 1086 chip_bus_sync_unlock(irq, desc);
1083 1087
1084 if (retval) 1088 if (retval)
1085 kfree(action); 1089 kfree(action);
1086 1090
1087 #ifdef CONFIG_DEBUG_SHIRQ 1091 #ifdef CONFIG_DEBUG_SHIRQ
1088 if (!retval && (irqflags & IRQF_SHARED)) { 1092 if (!retval && (irqflags & IRQF_SHARED)) {
1089 /* 1093 /*
1090 * It's a shared IRQ -- the driver ought to be prepared for it 1094 * It's a shared IRQ -- the driver ought to be prepared for it
1091 * to happen immediately, so let's make sure.... 1095 * to happen immediately, so let's make sure....
1092 * We disable the irq to make sure that a 'real' IRQ doesn't 1096 * We disable the irq to make sure that a 'real' IRQ doesn't
1093 * run in parallel with our fake. 1097 * run in parallel with our fake.
1094 */ 1098 */
1095 unsigned long flags; 1099 unsigned long flags;
1096 1100
1097 disable_irq(irq); 1101 disable_irq(irq);
1098 local_irq_save(flags); 1102 local_irq_save(flags);
1099 1103
1100 handler(irq, dev_id); 1104 handler(irq, dev_id);
1101 1105
1102 local_irq_restore(flags); 1106 local_irq_restore(flags);
1103 enable_irq(irq); 1107 enable_irq(irq);
1104 } 1108 }
1105 #endif 1109 #endif
1106 return retval; 1110 return retval;
1107 } 1111 }
1108 EXPORT_SYMBOL(request_threaded_irq); 1112 EXPORT_SYMBOL(request_threaded_irq);
1109 1113