Blame view

kernel/irq/manage.c 40.7 KB
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1
2
3
  /*
   * linux/kernel/irq/manage.c
   *
a34db9b28   Ingo Molnar   [PATCH] genirq: u...
4
5
   * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar
   * Copyright (C) 2005-2006 Thomas Gleixner
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
6
7
8
9
10
   *
   * This file contains driver APIs to the irq subsystem.
   */
  
  #include <linux/irq.h>
3aa551c9b   Thomas Gleixner   genirq: add threa...
11
  #include <linux/kthread.h>
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
12
13
14
  #include <linux/module.h>
  #include <linux/random.h>
  #include <linux/interrupt.h>
1aeb272cf   Robert P. J. Day   kernel: explicitl...
15
  #include <linux/slab.h>
3aa551c9b   Thomas Gleixner   genirq: add threa...
16
  #include <linux/sched.h>
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
17
18
  
  #include "internals.h"
8d32a307e   Thomas Gleixner   genirq: Provide f...
19
20
21
22
23
24
25
26
27
28
  #ifdef CONFIG_IRQ_FORCED_THREADING
  __read_mostly bool force_irqthreads;
  
  static int __init setup_forced_irqthreads(char *arg)
  {
  	force_irqthreads = true;
  	return 0;
  }
  early_param("threadirqs", setup_forced_irqthreads);
  #endif
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
29
30
  /**
   *	synchronize_irq - wait for pending IRQ handlers (on other CPUs)
1e5d53314   Randy Dunlap   [PATCH] more kern...
31
   *	@irq: interrupt number to wait for
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
32
33
34
35
36
37
38
39
40
   *
   *	This function waits for any pending IRQ handlers for this interrupt
   *	to complete before returning. If you use this function while
   *	holding a resource the IRQ handler may need you will deadlock.
   *
   *	This function may be called - with care - from IRQ context.
   */
  void synchronize_irq(unsigned int irq)
  {
cb5bc8322   Yinghai Lu   x86_64: rename ir...
41
  	struct irq_desc *desc = irq_to_desc(irq);
32f4125eb   Thomas Gleixner   genirq: Move INPR...
42
  	bool inprogress;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
43

7d94f7ca4   Yinghai Lu   irq: remove >= nr...
44
  	if (!desc)
c2b5a251b   Matthew Wilcox   [PATCH] Check the...
45
  		return;
a98ce5c6f   Herbert Xu   Fix synchronize_i...
46
47
48
49
50
51
52
  	do {
  		unsigned long flags;
  
  		/*
  		 * Wait until we're out of the critical section.  This might
  		 * give the wrong answer due to the lack of memory barriers.
  		 */
32f4125eb   Thomas Gleixner   genirq: Move INPR...
53
  		while (irqd_irq_inprogress(&desc->irq_data))
a98ce5c6f   Herbert Xu   Fix synchronize_i...
54
55
56
  			cpu_relax();
  
  		/* Ok, that indicated we're done: double-check carefully. */
239007b84   Thomas Gleixner   genirq: Convert i...
57
  		raw_spin_lock_irqsave(&desc->lock, flags);
32f4125eb   Thomas Gleixner   genirq: Move INPR...
58
  		inprogress = irqd_irq_inprogress(&desc->irq_data);
239007b84   Thomas Gleixner   genirq: Convert i...
59
  		raw_spin_unlock_irqrestore(&desc->lock, flags);
a98ce5c6f   Herbert Xu   Fix synchronize_i...
60
61
  
  		/* Oops, that failed? */
32f4125eb   Thomas Gleixner   genirq: Move INPR...
62
  	} while (inprogress);
3aa551c9b   Thomas Gleixner   genirq: add threa...
63
64
65
66
67
68
  
  	/*
  	 * We made sure that no hardirq handler is running. Now verify
  	 * that no threaded handlers are active.
  	 */
  	wait_event(desc->wait_for_threads, !atomic_read(&desc->threads_active));
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
69
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
70
  EXPORT_SYMBOL(synchronize_irq);
3aa551c9b   Thomas Gleixner   genirq: add threa...
71
72
  #ifdef CONFIG_SMP
  cpumask_var_t irq_default_affinity;
771ee3b04   Thomas Gleixner   [PATCH] Add a fun...
73
74
75
76
77
78
79
  /**
   *	irq_can_set_affinity - Check if the affinity of a given irq can be set
   *	@irq:		Interrupt to check
   *
   */
  int irq_can_set_affinity(unsigned int irq)
  {
08678b084   Yinghai Lu   generic: sparse i...
80
  	struct irq_desc *desc = irq_to_desc(irq);
771ee3b04   Thomas Gleixner   [PATCH] Add a fun...
81

bce43032a   Thomas Gleixner   genirq: Reuse exi...
82
83
  	if (!desc || !irqd_can_balance(&desc->irq_data) ||
  	    !desc->irq_data.chip || !desc->irq_data.chip->irq_set_affinity)
771ee3b04   Thomas Gleixner   [PATCH] Add a fun...
84
85
86
87
  		return 0;
  
  	return 1;
  }
591d2fb02   Thomas Gleixner   genirq: Delegate ...
88
89
90
91
92
93
94
95
96
97
  /**
   *	irq_set_thread_affinity - Notify irq threads to adjust affinity
   *	@desc:		irq descriptor which has affitnity changed
   *
   *	We just set IRQTF_AFFINITY and delegate the affinity setting
   *	to the interrupt thread itself. We can not call
   *	set_cpus_allowed_ptr() here as we hold desc->lock and this
   *	code can be called from hard interrupt context.
   */
  void irq_set_thread_affinity(struct irq_desc *desc)
3aa551c9b   Thomas Gleixner   genirq: add threa...
98
99
100
101
102
  {
  	struct irqaction *action = desc->action;
  
  	while (action) {
  		if (action->thread)
591d2fb02   Thomas Gleixner   genirq: Delegate ...
103
  			set_bit(IRQTF_AFFINITY, &action->thread_flags);
3aa551c9b   Thomas Gleixner   genirq: add threa...
104
105
106
  		action = action->next;
  	}
  }
1fa46f1f0   Thomas Gleixner   genirq: Simplify ...
107
  #ifdef CONFIG_GENERIC_PENDING_IRQ
0ef5ca1e1   Thomas Gleixner   genirq; Fix clean...
108
  static inline bool irq_can_move_pcntxt(struct irq_data *data)
1fa46f1f0   Thomas Gleixner   genirq: Simplify ...
109
  {
0ef5ca1e1   Thomas Gleixner   genirq; Fix clean...
110
  	return irqd_can_move_in_process_context(data);
1fa46f1f0   Thomas Gleixner   genirq: Simplify ...
111
  }
0ef5ca1e1   Thomas Gleixner   genirq; Fix clean...
112
  static inline bool irq_move_pending(struct irq_data *data)
1fa46f1f0   Thomas Gleixner   genirq: Simplify ...
113
  {
0ef5ca1e1   Thomas Gleixner   genirq; Fix clean...
114
  	return irqd_is_setaffinity_pending(data);
1fa46f1f0   Thomas Gleixner   genirq: Simplify ...
115
116
117
118
119
120
121
122
123
124
125
126
  }
  static inline void
  irq_copy_pending(struct irq_desc *desc, const struct cpumask *mask)
  {
  	cpumask_copy(desc->pending_mask, mask);
  }
  static inline void
  irq_get_pending(struct cpumask *mask, struct irq_desc *desc)
  {
  	cpumask_copy(mask, desc->pending_mask);
  }
  #else
0ef5ca1e1   Thomas Gleixner   genirq; Fix clean...
127
  static inline bool irq_can_move_pcntxt(struct irq_data *data) { return true; }
cd22c0e44   Thomas Gleixner   genirq: Fix harml...
128
  static inline bool irq_move_pending(struct irq_data *data) { return false; }
1fa46f1f0   Thomas Gleixner   genirq: Simplify ...
129
130
131
132
133
  static inline void
  irq_copy_pending(struct irq_desc *desc, const struct cpumask *mask) { }
  static inline void
  irq_get_pending(struct cpumask *mask, struct irq_desc *desc) { }
  #endif
c2d0c555c   David Daney   genirq: Split irq...
134
  int __irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask)
771ee3b04   Thomas Gleixner   [PATCH] Add a fun...
135
  {
c2d0c555c   David Daney   genirq: Split irq...
136
137
  	struct irq_chip *chip = irq_data_get_irq_chip(data);
  	struct irq_desc *desc = irq_data_to_desc(data);
1fa46f1f0   Thomas Gleixner   genirq: Simplify ...
138
  	int ret = 0;
771ee3b04   Thomas Gleixner   [PATCH] Add a fun...
139

c2d0c555c   David Daney   genirq: Split irq...
140
  	if (!chip || !chip->irq_set_affinity)
771ee3b04   Thomas Gleixner   [PATCH] Add a fun...
141
  		return -EINVAL;
0ef5ca1e1   Thomas Gleixner   genirq; Fix clean...
142
  	if (irq_can_move_pcntxt(data)) {
c2d0c555c   David Daney   genirq: Split irq...
143
  		ret = chip->irq_set_affinity(data, mask, false);
3b8249e75   Thomas Gleixner   genirq: Do not co...
144
145
  		switch (ret) {
  		case IRQ_SET_MASK_OK:
c2d0c555c   David Daney   genirq: Split irq...
146
  			cpumask_copy(data->affinity, mask);
3b8249e75   Thomas Gleixner   genirq: Do not co...
147
  		case IRQ_SET_MASK_OK_NOCOPY:
591d2fb02   Thomas Gleixner   genirq: Delegate ...
148
  			irq_set_thread_affinity(desc);
3b8249e75   Thomas Gleixner   genirq: Do not co...
149
  			ret = 0;
57b150cce   Yinghai Lu   irq: only update ...
150
  		}
1fa46f1f0   Thomas Gleixner   genirq: Simplify ...
151
  	} else {
c2d0c555c   David Daney   genirq: Split irq...
152
  		irqd_set_move_pending(data);
1fa46f1f0   Thomas Gleixner   genirq: Simplify ...
153
  		irq_copy_pending(desc, mask);
57b150cce   Yinghai Lu   irq: only update ...
154
  	}
1fa46f1f0   Thomas Gleixner   genirq: Simplify ...
155

cd7eab44e   Ben Hutchings   genirq: Add IRQ a...
156
157
158
159
  	if (desc->affinity_notify) {
  		kref_get(&desc->affinity_notify->kref);
  		schedule_work(&desc->affinity_notify->work);
  	}
c2d0c555c   David Daney   genirq: Split irq...
160
161
162
163
164
165
166
167
  	irqd_set(data, IRQD_AFFINITY_SET);
  
  	return ret;
  }
  
  /**
   *	irq_set_affinity - Set the irq affinity of a given irq
   *	@irq:		Interrupt to set affinity
30398bf6c   Randy Dunlap   genirq: Fix new k...
168
   *	@mask:		cpumask
c2d0c555c   David Daney   genirq: Split irq...
169
170
171
172
173
174
175
176
177
178
179
180
181
   *
   */
  int irq_set_affinity(unsigned int irq, const struct cpumask *mask)
  {
  	struct irq_desc *desc = irq_to_desc(irq);
  	unsigned long flags;
  	int ret;
  
  	if (!desc)
  		return -EINVAL;
  
  	raw_spin_lock_irqsave(&desc->lock, flags);
  	ret =  __irq_set_affinity_locked(irq_desc_get_irq_data(desc), mask);
239007b84   Thomas Gleixner   genirq: Convert i...
182
  	raw_spin_unlock_irqrestore(&desc->lock, flags);
1fa46f1f0   Thomas Gleixner   genirq: Simplify ...
183
  	return ret;
771ee3b04   Thomas Gleixner   [PATCH] Add a fun...
184
  }
e7a297b0d   Peter P Waskiewicz Jr   genirq: Add CPU m...
185
186
  int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m)
  {
e7a297b0d   Peter P Waskiewicz Jr   genirq: Add CPU m...
187
  	unsigned long flags;
31d9d9b6d   Marc Zyngier   genirq: Add suppo...
188
  	struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
e7a297b0d   Peter P Waskiewicz Jr   genirq: Add CPU m...
189
190
191
  
  	if (!desc)
  		return -EINVAL;
e7a297b0d   Peter P Waskiewicz Jr   genirq: Add CPU m...
192
  	desc->affinity_hint = m;
02725e747   Thomas Gleixner   genirq: Use irq_g...
193
  	irq_put_desc_unlock(desc, flags);
e7a297b0d   Peter P Waskiewicz Jr   genirq: Add CPU m...
194
195
196
  	return 0;
  }
  EXPORT_SYMBOL_GPL(irq_set_affinity_hint);
cd7eab44e   Ben Hutchings   genirq: Add IRQ a...
197
198
199
200
201
202
203
  static void irq_affinity_notify(struct work_struct *work)
  {
  	struct irq_affinity_notify *notify =
  		container_of(work, struct irq_affinity_notify, work);
  	struct irq_desc *desc = irq_to_desc(notify->irq);
  	cpumask_var_t cpumask;
  	unsigned long flags;
1fa46f1f0   Thomas Gleixner   genirq: Simplify ...
204
  	if (!desc || !alloc_cpumask_var(&cpumask, GFP_KERNEL))
cd7eab44e   Ben Hutchings   genirq: Add IRQ a...
205
206
207
  		goto out;
  
  	raw_spin_lock_irqsave(&desc->lock, flags);
0ef5ca1e1   Thomas Gleixner   genirq; Fix clean...
208
  	if (irq_move_pending(&desc->irq_data))
1fa46f1f0   Thomas Gleixner   genirq: Simplify ...
209
  		irq_get_pending(cpumask, desc);
cd7eab44e   Ben Hutchings   genirq: Add IRQ a...
210
  	else
1fb0ef31f   Thomas Gleixner   genirq: Fix affin...
211
  		cpumask_copy(cpumask, desc->irq_data.affinity);
cd7eab44e   Ben Hutchings   genirq: Add IRQ a...
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
  	raw_spin_unlock_irqrestore(&desc->lock, flags);
  
  	notify->notify(notify, cpumask);
  
  	free_cpumask_var(cpumask);
  out:
  	kref_put(&notify->kref, notify->release);
  }
  
  /**
   *	irq_set_affinity_notifier - control notification of IRQ affinity changes
   *	@irq:		Interrupt for which to enable/disable notification
   *	@notify:	Context for notification, or %NULL to disable
   *			notification.  Function pointers must be initialised;
   *			the other fields will be initialised by this function.
   *
   *	Must be called in process context.  Notification may only be enabled
   *	after the IRQ is allocated and must be disabled before the IRQ is
   *	freed using free_irq().
   */
  int
  irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify)
  {
  	struct irq_desc *desc = irq_to_desc(irq);
  	struct irq_affinity_notify *old_notify;
  	unsigned long flags;
  
  	/* The release function is promised process context */
  	might_sleep();
  
  	if (!desc)
  		return -EINVAL;
  
  	/* Complete initialisation of *notify */
  	if (notify) {
  		notify->irq = irq;
  		kref_init(&notify->kref);
  		INIT_WORK(&notify->work, irq_affinity_notify);
  	}
  
  	raw_spin_lock_irqsave(&desc->lock, flags);
  	old_notify = desc->affinity_notify;
  	desc->affinity_notify = notify;
  	raw_spin_unlock_irqrestore(&desc->lock, flags);
  
  	if (old_notify)
  		kref_put(&old_notify->kref, old_notify->release);
  
  	return 0;
  }
  EXPORT_SYMBOL_GPL(irq_set_affinity_notifier);
184047567   Max Krasnyansky   genirq: Expose de...
263
264
265
266
  #ifndef CONFIG_AUTO_IRQ_AFFINITY
  /*
   * Generic version of the affinity autoselector.
   */
3b8249e75   Thomas Gleixner   genirq: Do not co...
267
268
  static int
  setup_affinity(unsigned int irq, struct irq_desc *desc, struct cpumask *mask)
184047567   Max Krasnyansky   genirq: Expose de...
269
  {
35e857cbe   Thomas Gleixner   genirq: Fixup cor...
270
  	struct irq_chip *chip = irq_desc_get_chip(desc);
569bda8df   Thomas Gleixner   genirq: Always ap...
271
  	struct cpumask *set = irq_default_affinity;
3b8249e75   Thomas Gleixner   genirq: Do not co...
272
  	int ret;
569bda8df   Thomas Gleixner   genirq: Always ap...
273

b008207cb   Thomas Gleixner   genirq: Rremove r...
274
  	/* Excludes PER_CPU and NO_BALANCE interrupts */
184047567   Max Krasnyansky   genirq: Expose de...
275
276
  	if (!irq_can_set_affinity(irq))
  		return 0;
f6d87f4bd   Thomas Gleixner   genirq: keep affi...
277
278
279
280
  	/*
  	 * Preserve an userspace affinity setup, but make sure that
  	 * one of the targets is online.
  	 */
2bdd10558   Thomas Gleixner   genirq: Move IRQ_...
281
  	if (irqd_has_set(&desc->irq_data, IRQD_AFFINITY_SET)) {
569bda8df   Thomas Gleixner   genirq: Always ap...
282
283
284
  		if (cpumask_intersects(desc->irq_data.affinity,
  				       cpu_online_mask))
  			set = desc->irq_data.affinity;
0c6f8a8b9   Thomas Gleixner   genirq: Remove co...
285
  		else
2bdd10558   Thomas Gleixner   genirq: Move IRQ_...
286
  			irqd_clear(&desc->irq_data, IRQD_AFFINITY_SET);
f6d87f4bd   Thomas Gleixner   genirq: keep affi...
287
  	}
184047567   Max Krasnyansky   genirq: Expose de...
288

3b8249e75   Thomas Gleixner   genirq: Do not co...
289
290
291
292
293
294
295
296
  	cpumask_and(mask, cpu_online_mask, set);
  	ret = chip->irq_set_affinity(&desc->irq_data, mask, false);
  	switch (ret) {
  	case IRQ_SET_MASK_OK:
  		cpumask_copy(desc->irq_data.affinity, mask);
  	case IRQ_SET_MASK_OK_NOCOPY:
  		irq_set_thread_affinity(desc);
  	}
184047567   Max Krasnyansky   genirq: Expose de...
297
298
  	return 0;
  }
f6d87f4bd   Thomas Gleixner   genirq: keep affi...
299
  #else
3b8249e75   Thomas Gleixner   genirq: Do not co...
300
301
  static inline int
  setup_affinity(unsigned int irq, struct irq_desc *d, struct cpumask *mask)
f6d87f4bd   Thomas Gleixner   genirq: keep affi...
302
303
304
  {
  	return irq_select_affinity(irq);
  }
184047567   Max Krasnyansky   genirq: Expose de...
305
  #endif
f6d87f4bd   Thomas Gleixner   genirq: keep affi...
306
307
308
  /*
   * Called when affinity is set via /proc/irq
   */
3b8249e75   Thomas Gleixner   genirq: Do not co...
309
  int irq_select_affinity_usr(unsigned int irq, struct cpumask *mask)
f6d87f4bd   Thomas Gleixner   genirq: keep affi...
310
311
312
313
  {
  	struct irq_desc *desc = irq_to_desc(irq);
  	unsigned long flags;
  	int ret;
239007b84   Thomas Gleixner   genirq: Convert i...
314
  	raw_spin_lock_irqsave(&desc->lock, flags);
3b8249e75   Thomas Gleixner   genirq: Do not co...
315
  	ret = setup_affinity(irq, desc, mask);
239007b84   Thomas Gleixner   genirq: Convert i...
316
  	raw_spin_unlock_irqrestore(&desc->lock, flags);
f6d87f4bd   Thomas Gleixner   genirq: keep affi...
317
318
319
320
  	return ret;
  }
  
  #else
3b8249e75   Thomas Gleixner   genirq: Do not co...
321
322
  static inline int
  setup_affinity(unsigned int irq, struct irq_desc *desc, struct cpumask *mask)
f6d87f4bd   Thomas Gleixner   genirq: keep affi...
323
324
325
  {
  	return 0;
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
326
  #endif
0a0c5168d   Rafael J. Wysocki   PM: Introduce fun...
327
328
329
  void __disable_irq(struct irq_desc *desc, unsigned int irq, bool suspend)
  {
  	if (suspend) {
685fd0b4e   Ian Campbell   irq: Add new IRQ ...
330
  		if (!desc->action || (desc->action->flags & IRQF_NO_SUSPEND))
0a0c5168d   Rafael J. Wysocki   PM: Introduce fun...
331
  			return;
c531e8361   Thomas Gleixner   genirq: Move IRQ_...
332
  		desc->istate |= IRQS_SUSPENDED;
0a0c5168d   Rafael J. Wysocki   PM: Introduce fun...
333
  	}
3aae994fb   Thomas Gleixner   genirq: Consolida...
334
  	if (!desc->depth++)
87923470c   Thomas Gleixner   genirq: Consolida...
335
  		irq_disable(desc);
0a0c5168d   Rafael J. Wysocki   PM: Introduce fun...
336
  }
02725e747   Thomas Gleixner   genirq: Use irq_g...
337
338
339
  static int __disable_irq_nosync(unsigned int irq)
  {
  	unsigned long flags;
31d9d9b6d   Marc Zyngier   genirq: Add suppo...
340
  	struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
02725e747   Thomas Gleixner   genirq: Use irq_g...
341
342
343
344
345
346
347
  
  	if (!desc)
  		return -EINVAL;
  	__disable_irq(desc, irq, false);
  	irq_put_desc_busunlock(desc, flags);
  	return 0;
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
348
349
350
351
352
353
354
355
356
357
358
359
360
  /**
   *	disable_irq_nosync - disable an irq without waiting
   *	@irq: Interrupt to disable
   *
   *	Disable the selected interrupt line.  Disables and Enables are
   *	nested.
   *	Unlike disable_irq(), this function does not ensure existing
   *	instances of the IRQ handler have completed before returning.
   *
   *	This function may be called from IRQ context.
   */
  void disable_irq_nosync(unsigned int irq)
  {
02725e747   Thomas Gleixner   genirq: Use irq_g...
361
  	__disable_irq_nosync(irq);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
362
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
  EXPORT_SYMBOL(disable_irq_nosync);
  
  /**
   *	disable_irq - disable an irq and wait for completion
   *	@irq: Interrupt to disable
   *
   *	Disable the selected interrupt line.  Enables and Disables are
   *	nested.
   *	This function waits for any pending IRQ handlers for this interrupt
   *	to complete before returning. If you use this function while
   *	holding a resource the IRQ handler may need you will deadlock.
   *
   *	This function may be called - with care - from IRQ context.
   */
  void disable_irq(unsigned int irq)
  {
02725e747   Thomas Gleixner   genirq: Use irq_g...
379
  	if (!__disable_irq_nosync(irq))
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
380
381
  		synchronize_irq(irq);
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
382
  EXPORT_SYMBOL(disable_irq);
0a0c5168d   Rafael J. Wysocki   PM: Introduce fun...
383
  void __enable_irq(struct irq_desc *desc, unsigned int irq, bool resume)
1adb0850a   Thomas Gleixner   genirq: reenable ...
384
  {
dc5f219e8   Thomas Gleixner   genirq: Add IRQF_...
385
  	if (resume) {
c531e8361   Thomas Gleixner   genirq: Move IRQ_...
386
  		if (!(desc->istate & IRQS_SUSPENDED)) {
dc5f219e8   Thomas Gleixner   genirq: Add IRQF_...
387
388
389
390
391
392
393
  			if (!desc->action)
  				return;
  			if (!(desc->action->flags & IRQF_FORCE_RESUME))
  				return;
  			/* Pretend that it got disabled ! */
  			desc->depth++;
  		}
c531e8361   Thomas Gleixner   genirq: Move IRQ_...
394
  		desc->istate &= ~IRQS_SUSPENDED;
dc5f219e8   Thomas Gleixner   genirq: Add IRQF_...
395
  	}
0a0c5168d   Rafael J. Wysocki   PM: Introduce fun...
396

1adb0850a   Thomas Gleixner   genirq: reenable ...
397
398
  	switch (desc->depth) {
  	case 0:
0a0c5168d   Rafael J. Wysocki   PM: Introduce fun...
399
   err_out:
b8c512f61   Arjan van de Ven   Use WARN() in ker...
400
401
  		WARN(1, KERN_WARNING "Unbalanced enable for IRQ %d
  ", irq);
1adb0850a   Thomas Gleixner   genirq: reenable ...
402
403
  		break;
  	case 1: {
c531e8361   Thomas Gleixner   genirq: Move IRQ_...
404
  		if (desc->istate & IRQS_SUSPENDED)
0a0c5168d   Rafael J. Wysocki   PM: Introduce fun...
405
  			goto err_out;
1adb0850a   Thomas Gleixner   genirq: reenable ...
406
  		/* Prevent probing on this irq: */
1ccb4e612   Thomas Gleixner   genirq: Wrap the ...
407
  		irq_settings_set_noprobe(desc);
3aae994fb   Thomas Gleixner   genirq: Consolida...
408
  		irq_enable(desc);
1adb0850a   Thomas Gleixner   genirq: reenable ...
409
410
411
412
413
414
415
  		check_irq_resend(desc, irq);
  		/* fall-through */
  	}
  	default:
  		desc->depth--;
  	}
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
416
417
418
419
420
421
422
423
  /**
   *	enable_irq - enable handling of an irq
   *	@irq: Interrupt to enable
   *
   *	Undoes the effect of one call to disable_irq().  If this
   *	matches the last disable, processing of interrupts on this
   *	IRQ line is re-enabled.
   *
70aedd24d   Thomas Gleixner   genirq: Add buslo...
424
   *	This function may be called from IRQ context only when
6b8ff3120   Thomas Gleixner   genirq: Convert c...
425
   *	desc->irq_data.chip->bus_lock and desc->chip->bus_sync_unlock are NULL !
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
426
427
428
   */
  void enable_irq(unsigned int irq)
  {
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
429
  	unsigned long flags;
31d9d9b6d   Marc Zyngier   genirq: Add suppo...
430
  	struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
431

7d94f7ca4   Yinghai Lu   irq: remove >= nr...
432
  	if (!desc)
c2b5a251b   Matthew Wilcox   [PATCH] Check the...
433
  		return;
50f7c0327   Thomas Gleixner   genirq: Remove de...
434
435
436
  	if (WARN(!desc->irq_data.chip,
  		 KERN_ERR "enable_irq before setup/request_irq: irq %u
  ", irq))
02725e747   Thomas Gleixner   genirq: Use irq_g...
437
  		goto out;
2656c3669   Thomas Gleixner   genirq: Warn if e...
438

0a0c5168d   Rafael J. Wysocki   PM: Introduce fun...
439
  	__enable_irq(desc, irq, false);
02725e747   Thomas Gleixner   genirq: Use irq_g...
440
441
  out:
  	irq_put_desc_busunlock(desc, flags);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
442
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
443
  EXPORT_SYMBOL(enable_irq);
0c5d1eb77   David Brownell   genirq: record tr...
444
  static int set_irq_wake_real(unsigned int irq, unsigned int on)
2db873211   Uwe Kleine-König   set_irq_wake: fix...
445
  {
08678b084   Yinghai Lu   generic: sparse i...
446
  	struct irq_desc *desc = irq_to_desc(irq);
2db873211   Uwe Kleine-König   set_irq_wake: fix...
447
  	int ret = -ENXIO;
60f96b41f   Santosh Shilimkar   genirq: Add IRQCH...
448
449
  	if (irq_desc_get_chip(desc)->flags &  IRQCHIP_SKIP_SET_WAKE)
  		return 0;
2f7e99bb9   Thomas Gleixner   genirq: Provide c...
450
451
  	if (desc->irq_data.chip->irq_set_wake)
  		ret = desc->irq_data.chip->irq_set_wake(&desc->irq_data, on);
2db873211   Uwe Kleine-König   set_irq_wake: fix...
452
453
454
  
  	return ret;
  }
ba9a2331b   Thomas Gleixner   [PATCH] genirq: a...
455
  /**
a0cd9ca2b   Thomas Gleixner   genirq: Namespace...
456
   *	irq_set_irq_wake - control irq power management wakeup
ba9a2331b   Thomas Gleixner   [PATCH] genirq: a...
457
458
459
   *	@irq:	interrupt to control
   *	@on:	enable/disable power management wakeup
   *
15a647eba   David Brownell   [PATCH] genirq: {...
460
461
462
463
464
465
   *	Enable/disable power management wakeup mode, which is
   *	disabled by default.  Enables and disables must match,
   *	just as they match for non-wakeup mode support.
   *
   *	Wakeup mode lets this IRQ wake the system from sleep
   *	states like "suspend to RAM".
ba9a2331b   Thomas Gleixner   [PATCH] genirq: a...
466
   */
a0cd9ca2b   Thomas Gleixner   genirq: Namespace...
467
  int irq_set_irq_wake(unsigned int irq, unsigned int on)
ba9a2331b   Thomas Gleixner   [PATCH] genirq: a...
468
  {
ba9a2331b   Thomas Gleixner   [PATCH] genirq: a...
469
  	unsigned long flags;
31d9d9b6d   Marc Zyngier   genirq: Add suppo...
470
  	struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
2db873211   Uwe Kleine-König   set_irq_wake: fix...
471
  	int ret = 0;
ba9a2331b   Thomas Gleixner   [PATCH] genirq: a...
472

13863a66c   Jesper Juhl   genirq: Prevent p...
473
474
  	if (!desc)
  		return -EINVAL;
15a647eba   David Brownell   [PATCH] genirq: {...
475
476
477
  	/* wakeup-capable irqs can be shared between drivers that
  	 * don't need to have the same sleep mode behaviors.
  	 */
15a647eba   David Brownell   [PATCH] genirq: {...
478
  	if (on) {
2db873211   Uwe Kleine-König   set_irq_wake: fix...
479
480
481
482
483
  		if (desc->wake_depth++ == 0) {
  			ret = set_irq_wake_real(irq, on);
  			if (ret)
  				desc->wake_depth = 0;
  			else
7f94226f0   Thomas Gleixner   genirq: Move wake...
484
  				irqd_set(&desc->irq_data, IRQD_WAKEUP_STATE);
2db873211   Uwe Kleine-König   set_irq_wake: fix...
485
  		}
15a647eba   David Brownell   [PATCH] genirq: {...
486
487
  	} else {
  		if (desc->wake_depth == 0) {
7a2c47706   Arjan van de Ven   kernel/irq/manage...
488
489
  			WARN(1, "Unbalanced IRQ %d wake disable
  ", irq);
2db873211   Uwe Kleine-König   set_irq_wake: fix...
490
491
492
493
494
  		} else if (--desc->wake_depth == 0) {
  			ret = set_irq_wake_real(irq, on);
  			if (ret)
  				desc->wake_depth = 1;
  			else
7f94226f0   Thomas Gleixner   genirq: Move wake...
495
  				irqd_clear(&desc->irq_data, IRQD_WAKEUP_STATE);
2db873211   Uwe Kleine-König   set_irq_wake: fix...
496
  		}
15a647eba   David Brownell   [PATCH] genirq: {...
497
  	}
02725e747   Thomas Gleixner   genirq: Use irq_g...
498
  	irq_put_desc_busunlock(desc, flags);
ba9a2331b   Thomas Gleixner   [PATCH] genirq: a...
499
500
  	return ret;
  }
a0cd9ca2b   Thomas Gleixner   genirq: Namespace...
501
  EXPORT_SYMBOL(irq_set_irq_wake);
ba9a2331b   Thomas Gleixner   [PATCH] genirq: a...
502

1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
503
504
505
506
507
508
509
  /*
   * Internal function that tells the architecture code whether a
   * particular irq has been exclusively allocated or is available
   * for driver use.
   */
  int can_request_irq(unsigned int irq, unsigned long irqflags)
  {
cc8c3b784   Thomas Gleixner   genirq: Protect a...
510
  	unsigned long flags;
31d9d9b6d   Marc Zyngier   genirq: Add suppo...
511
  	struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
02725e747   Thomas Gleixner   genirq: Use irq_g...
512
  	int canrequest = 0;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
513

7d94f7ca4   Yinghai Lu   irq: remove >= nr...
514
515
  	if (!desc)
  		return 0;
02725e747   Thomas Gleixner   genirq: Use irq_g...
516
517
518
519
520
521
522
  	if (irq_settings_can_request(desc)) {
  		if (desc->action)
  			if (irqflags & desc->action->flags & IRQF_SHARED)
  				canrequest =1;
  	}
  	irq_put_desc_unlock(desc, flags);
  	return canrequest;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
523
  }
0c5d1eb77   David Brownell   genirq: record tr...
524
  int __irq_set_trigger(struct irq_desc *desc, unsigned int irq,
b2ba2c300   Thomas Gleixner   genirq: Provide c...
525
  		      unsigned long flags)
82736f4d1   Uwe Kleine-König   generic irqs: han...
526
  {
6b8ff3120   Thomas Gleixner   genirq: Convert c...
527
  	struct irq_chip *chip = desc->irq_data.chip;
d4d5e0896   Thomas Gleixner   genirq: Add IRQCH...
528
  	int ret, unmask = 0;
82736f4d1   Uwe Kleine-König   generic irqs: han...
529

b2ba2c300   Thomas Gleixner   genirq: Provide c...
530
  	if (!chip || !chip->irq_set_type) {
82736f4d1   Uwe Kleine-König   generic irqs: han...
531
532
533
534
  		/*
  		 * IRQF_TRIGGER_* but the PIC does not support multiple
  		 * flow-types?
  		 */
3ff68a6a1   Mark Nelson   genirq: __irq_set...
535
536
  		pr_debug("No set_type function for IRQ %d (%s)
  ", irq,
82736f4d1   Uwe Kleine-König   generic irqs: han...
537
538
539
  				chip ? (chip->name ? : "unknown") : "unknown");
  		return 0;
  	}
876dbd4cc   Thomas Gleixner   genirq: Mirror ir...
540
  	flags &= IRQ_TYPE_SENSE_MASK;
d4d5e0896   Thomas Gleixner   genirq: Add IRQCH...
541
542
  
  	if (chip->flags & IRQCHIP_SET_TYPE_MASKED) {
32f4125eb   Thomas Gleixner   genirq: Move INPR...
543
  		if (!irqd_irq_masked(&desc->irq_data))
d4d5e0896   Thomas Gleixner   genirq: Add IRQCH...
544
  			mask_irq(desc);
32f4125eb   Thomas Gleixner   genirq: Move INPR...
545
  		if (!irqd_irq_disabled(&desc->irq_data))
d4d5e0896   Thomas Gleixner   genirq: Add IRQCH...
546
547
  			unmask = 1;
  	}
f2b662da8   David Brownell   genirq: record IR...
548
  	/* caller masked out all except trigger mode flags */
b2ba2c300   Thomas Gleixner   genirq: Provide c...
549
  	ret = chip->irq_set_type(&desc->irq_data, flags);
82736f4d1   Uwe Kleine-König   generic irqs: han...
550

876dbd4cc   Thomas Gleixner   genirq: Mirror ir...
551
552
553
554
555
556
557
558
559
560
561
562
563
564
  	switch (ret) {
  	case IRQ_SET_MASK_OK:
  		irqd_clear(&desc->irq_data, IRQD_TRIGGER_MASK);
  		irqd_set(&desc->irq_data, flags);
  
  	case IRQ_SET_MASK_OK_NOCOPY:
  		flags = irqd_get_trigger_type(&desc->irq_data);
  		irq_settings_set_trigger_mask(desc, flags);
  		irqd_clear(&desc->irq_data, IRQD_LEVEL);
  		irq_settings_clr_level(desc);
  		if (flags & IRQ_TYPE_LEVEL_MASK) {
  			irq_settings_set_level(desc);
  			irqd_set(&desc->irq_data, IRQD_LEVEL);
  		}
467324756   Thomas Gleixner   genirq: Deal with...
565

d4d5e0896   Thomas Gleixner   genirq: Add IRQCH...
566
  		ret = 0;
8fff39e06   Thomas Gleixner   genirq: Add missi...
567
  		break;
876dbd4cc   Thomas Gleixner   genirq: Mirror ir...
568
569
570
571
  	default:
  		pr_err("setting trigger mode %lu for irq %u failed (%pF)
  ",
  		       flags, irq, chip->irq_set_type);
0c5d1eb77   David Brownell   genirq: record tr...
572
  	}
d4d5e0896   Thomas Gleixner   genirq: Add IRQCH...
573
574
  	if (unmask)
  		unmask_irq(desc);
82736f4d1   Uwe Kleine-König   generic irqs: han...
575
576
  	return ret;
  }
b25c340c1   Thomas Gleixner   genirq: Add onesh...
577
578
579
580
581
582
583
584
585
  /*
   * Default primary interrupt handler for threaded interrupts. Is
   * assigned as primary handler when request_threaded_irq is called
   * with handler == NULL. Useful for oneshot interrupts.
   */
  static irqreturn_t irq_default_primary_handler(int irq, void *dev_id)
  {
  	return IRQ_WAKE_THREAD;
  }
399b5da29   Thomas Gleixner   genirq: Support n...
586
587
588
589
590
591
592
593
594
595
  /*
   * Primary handler for nested threaded interrupts. Should never be
   * called.
   */
  static irqreturn_t irq_nested_primary_handler(int irq, void *dev_id)
  {
  	WARN(1, "Primary handler called for nested irq %d
  ", irq);
  	return IRQ_NONE;
  }
3aa551c9b   Thomas Gleixner   genirq: add threa...
596
597
  static int irq_wait_for_interrupt(struct irqaction *action)
  {
550acb192   Ido Yariv   genirq: Fix race ...
598
  	set_current_state(TASK_INTERRUPTIBLE);
3aa551c9b   Thomas Gleixner   genirq: add threa...
599
  	while (!kthread_should_stop()) {
f48fe81e5   Thomas Gleixner   genirq: threaded ...
600
601
602
  
  		if (test_and_clear_bit(IRQTF_RUNTHREAD,
  				       &action->thread_flags)) {
3aa551c9b   Thomas Gleixner   genirq: add threa...
603
604
  			__set_current_state(TASK_RUNNING);
  			return 0;
f48fe81e5   Thomas Gleixner   genirq: threaded ...
605
606
  		}
  		schedule();
550acb192   Ido Yariv   genirq: Fix race ...
607
  		set_current_state(TASK_INTERRUPTIBLE);
3aa551c9b   Thomas Gleixner   genirq: add threa...
608
  	}
550acb192   Ido Yariv   genirq: Fix race ...
609
  	__set_current_state(TASK_RUNNING);
3aa551c9b   Thomas Gleixner   genirq: add threa...
610
611
  	return -1;
  }
b25c340c1   Thomas Gleixner   genirq: Add onesh...
612
613
614
615
616
  /*
   * Oneshot interrupts keep the irq line masked until the threaded
   * handler finished. unmask if the interrupt has not been disabled and
   * is marked MASKED.
   */
b5faba21a   Thomas Gleixner   genirq: Prepare t...
617
618
  static void irq_finalize_oneshot(struct irq_desc *desc,
  				 struct irqaction *action, bool force)
b25c340c1   Thomas Gleixner   genirq: Add onesh...
619
  {
b5faba21a   Thomas Gleixner   genirq: Prepare t...
620
621
  	if (!(desc->istate & IRQS_ONESHOT))
  		return;
0b1adaa03   Thomas Gleixner   genirq: Prevent o...
622
  again:
3876ec9ef   Thomas Gleixner   genirq: Provide c...
623
  	chip_bus_lock(desc);
239007b84   Thomas Gleixner   genirq: Convert i...
624
  	raw_spin_lock_irq(&desc->lock);
0b1adaa03   Thomas Gleixner   genirq: Prevent o...
625
626
627
628
629
630
631
632
  
  	/*
  	 * Implausible though it may be we need to protect us against
  	 * the following scenario:
  	 *
  	 * The thread is faster done than the hard interrupt handler
  	 * on the other CPU. If we unmask the irq line then the
  	 * interrupt can come in again and masks the line, leaves due
009b4c3b8   Thomas Gleixner   genirq: Add IRQ_I...
633
  	 * to IRQS_INPROGRESS and the irq line is masked forever.
b5faba21a   Thomas Gleixner   genirq: Prepare t...
634
635
636
637
638
  	 *
  	 * This also serializes the state of shared oneshot handlers
  	 * versus "desc->threads_onehsot |= action->thread_mask;" in
  	 * irq_wake_thread(). See the comment there which explains the
  	 * serialization.
0b1adaa03   Thomas Gleixner   genirq: Prevent o...
639
  	 */
32f4125eb   Thomas Gleixner   genirq: Move INPR...
640
  	if (unlikely(irqd_irq_inprogress(&desc->irq_data))) {
0b1adaa03   Thomas Gleixner   genirq: Prevent o...
641
  		raw_spin_unlock_irq(&desc->lock);
3876ec9ef   Thomas Gleixner   genirq: Provide c...
642
  		chip_bus_sync_unlock(desc);
0b1adaa03   Thomas Gleixner   genirq: Prevent o...
643
644
645
  		cpu_relax();
  		goto again;
  	}
b5faba21a   Thomas Gleixner   genirq: Prepare t...
646
647
648
649
650
651
652
653
654
  	/*
  	 * Now check again, whether the thread should run. Otherwise
  	 * we would clear the threads_oneshot bit of this thread which
  	 * was just set.
  	 */
  	if (!force && test_bit(IRQTF_RUNTHREAD, &action->thread_flags))
  		goto out_unlock;
  
  	desc->threads_oneshot &= ~action->thread_mask;
32f4125eb   Thomas Gleixner   genirq: Move INPR...
655
656
657
  	if (!desc->threads_oneshot && !irqd_irq_disabled(&desc->irq_data) &&
  	    irqd_irq_masked(&desc->irq_data))
  		unmask_irq(desc);
b5faba21a   Thomas Gleixner   genirq: Prepare t...
658
  out_unlock:
239007b84   Thomas Gleixner   genirq: Convert i...
659
  	raw_spin_unlock_irq(&desc->lock);
3876ec9ef   Thomas Gleixner   genirq: Provide c...
660
  	chip_bus_sync_unlock(desc);
b25c340c1   Thomas Gleixner   genirq: Add onesh...
661
  }
61f382613   Bruno Premont   genirq: Fix UP co...
662
  #ifdef CONFIG_SMP
3aa551c9b   Thomas Gleixner   genirq: add threa...
663
  /*
d4d5e0896   Thomas Gleixner   genirq: Add IRQCH...
664
   * Check whether we need to chasnge the affinity of the interrupt thread.
591d2fb02   Thomas Gleixner   genirq: Delegate ...
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
   */
  static void
  irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action)
  {
  	cpumask_var_t mask;
  
  	if (!test_and_clear_bit(IRQTF_AFFINITY, &action->thread_flags))
  		return;
  
  	/*
  	 * In case we are out of memory we set IRQTF_AFFINITY again and
  	 * try again next time
  	 */
  	if (!alloc_cpumask_var(&mask, GFP_KERNEL)) {
  		set_bit(IRQTF_AFFINITY, &action->thread_flags);
  		return;
  	}
239007b84   Thomas Gleixner   genirq: Convert i...
682
  	raw_spin_lock_irq(&desc->lock);
6b8ff3120   Thomas Gleixner   genirq: Convert c...
683
  	cpumask_copy(mask, desc->irq_data.affinity);
239007b84   Thomas Gleixner   genirq: Convert i...
684
  	raw_spin_unlock_irq(&desc->lock);
591d2fb02   Thomas Gleixner   genirq: Delegate ...
685
686
687
688
  
  	set_cpus_allowed_ptr(current, mask);
  	free_cpumask_var(mask);
  }
61f382613   Bruno Premont   genirq: Fix UP co...
689
690
691
692
  #else
  static inline void
  irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) { }
  #endif
591d2fb02   Thomas Gleixner   genirq: Delegate ...
693
694
  
  /*
8d32a307e   Thomas Gleixner   genirq: Provide f...
695
696
697
698
699
   * Interrupts which are not explicitely requested as threaded
   * interrupts rely on the implicit bh/preempt disable of the hard irq
   * context. So we need to disable bh here to avoid deadlocks and other
   * side effects.
   */
3a43e05f4   Sebastian Andrzej Siewior   irq: Handle spuri...
700
  static irqreturn_t
8d32a307e   Thomas Gleixner   genirq: Provide f...
701
702
  irq_forced_thread_fn(struct irq_desc *desc, struct irqaction *action)
  {
3a43e05f4   Sebastian Andrzej Siewior   irq: Handle spuri...
703
  	irqreturn_t ret;
8d32a307e   Thomas Gleixner   genirq: Provide f...
704
  	local_bh_disable();
3a43e05f4   Sebastian Andrzej Siewior   irq: Handle spuri...
705
  	ret = action->thread_fn(action->irq, action->dev_id);
8d32a307e   Thomas Gleixner   genirq: Provide f...
706
707
  	irq_finalize_oneshot(desc, action, false);
  	local_bh_enable();
3a43e05f4   Sebastian Andrzej Siewior   irq: Handle spuri...
708
  	return ret;
8d32a307e   Thomas Gleixner   genirq: Provide f...
709
710
711
712
713
714
715
  }
  
  /*
   * Interrupts explicitely requested as threaded interupts want to be
   * preemtible - many of them need to sleep and wait for slow busses to
   * complete.
   */
3a43e05f4   Sebastian Andrzej Siewior   irq: Handle spuri...
716
717
  static irqreturn_t irq_thread_fn(struct irq_desc *desc,
  		struct irqaction *action)
8d32a307e   Thomas Gleixner   genirq: Provide f...
718
  {
3a43e05f4   Sebastian Andrzej Siewior   irq: Handle spuri...
719
720
721
  	irqreturn_t ret;
  
  	ret = action->thread_fn(action->irq, action->dev_id);
8d32a307e   Thomas Gleixner   genirq: Provide f...
722
  	irq_finalize_oneshot(desc, action, false);
3a43e05f4   Sebastian Andrzej Siewior   irq: Handle spuri...
723
  	return ret;
8d32a307e   Thomas Gleixner   genirq: Provide f...
724
725
726
  }
  
  /*
3aa551c9b   Thomas Gleixner   genirq: add threa...
727
728
729
730
   * Interrupt handler thread
   */
  static int irq_thread(void *data)
  {
c9b5f501e   Peter Zijlstra   sched: Constify f...
731
  	static const struct sched_param param = {
fe7de49f9   KOSAKI Motohiro   sched: Make sched...
732
733
  		.sched_priority = MAX_USER_RT_PRIO/2,
  	};
3aa551c9b   Thomas Gleixner   genirq: add threa...
734
735
  	struct irqaction *action = data;
  	struct irq_desc *desc = irq_to_desc(action->irq);
3a43e05f4   Sebastian Andrzej Siewior   irq: Handle spuri...
736
737
  	irqreturn_t (*handler_fn)(struct irq_desc *desc,
  			struct irqaction *action);
b5faba21a   Thomas Gleixner   genirq: Prepare t...
738
  	int wake;
3aa551c9b   Thomas Gleixner   genirq: add threa...
739

8d32a307e   Thomas Gleixner   genirq: Provide f...
740
741
742
743
744
  	if (force_irqthreads & test_bit(IRQTF_FORCED_THREAD,
  					&action->thread_flags))
  		handler_fn = irq_forced_thread_fn;
  	else
  		handler_fn = irq_thread_fn;
3aa551c9b   Thomas Gleixner   genirq: add threa...
745
746
747
748
  	sched_setscheduler(current, SCHED_FIFO, &param);
  	current->irqaction = action;
  
  	while (!irq_wait_for_interrupt(action)) {
591d2fb02   Thomas Gleixner   genirq: Delegate ...
749
  		irq_thread_check_affinity(desc, action);
3aa551c9b   Thomas Gleixner   genirq: add threa...
750
  		atomic_inc(&desc->threads_active);
239007b84   Thomas Gleixner   genirq: Convert i...
751
  		raw_spin_lock_irq(&desc->lock);
32f4125eb   Thomas Gleixner   genirq: Move INPR...
752
  		if (unlikely(irqd_irq_disabled(&desc->irq_data))) {
3aa551c9b   Thomas Gleixner   genirq: add threa...
753
754
755
756
  			/*
  			 * CHECKME: We might need a dedicated
  			 * IRQ_THREAD_PENDING flag here, which
  			 * retriggers the thread in check_irq_resend()
2a0d6fb33   Thomas Gleixner   genirq: Move IRQ_...
757
  			 * but AFAICT IRQS_PENDING should be fine as it
3aa551c9b   Thomas Gleixner   genirq: add threa...
758
759
  			 * retriggers the interrupt itself --- tglx
  			 */
2a0d6fb33   Thomas Gleixner   genirq: Move IRQ_...
760
  			desc->istate |= IRQS_PENDING;
239007b84   Thomas Gleixner   genirq: Convert i...
761
  			raw_spin_unlock_irq(&desc->lock);
3aa551c9b   Thomas Gleixner   genirq: add threa...
762
  		} else {
3a43e05f4   Sebastian Andrzej Siewior   irq: Handle spuri...
763
  			irqreturn_t action_ret;
239007b84   Thomas Gleixner   genirq: Convert i...
764
  			raw_spin_unlock_irq(&desc->lock);
3a43e05f4   Sebastian Andrzej Siewior   irq: Handle spuri...
765
766
767
  			action_ret = handler_fn(desc, action);
  			if (!noirqdebug)
  				note_interrupt(action->irq, desc, action_ret);
3aa551c9b   Thomas Gleixner   genirq: add threa...
768
769
770
771
772
773
774
  		}
  
  		wake = atomic_dec_and_test(&desc->threads_active);
  
  		if (wake && waitqueue_active(&desc->wait_for_threads))
  			wake_up(&desc->wait_for_threads);
  	}
b5faba21a   Thomas Gleixner   genirq: Prepare t...
775
776
  	/* Prevent a stale desc->threads_oneshot */
  	irq_finalize_oneshot(desc, action, true);
3aa551c9b   Thomas Gleixner   genirq: add threa...
777
778
779
780
781
782
783
784
785
786
787
788
789
790
  	/*
  	 * Clear irqaction. Otherwise exit_irq_thread() would make
  	 * fuzz about an active irq thread going into nirvana.
  	 */
  	current->irqaction = NULL;
  	return 0;
  }
  
  /*
   * Called from do_exit()
   */
  void exit_irq_thread(void)
  {
  	struct task_struct *tsk = current;
b5faba21a   Thomas Gleixner   genirq: Prepare t...
791
  	struct irq_desc *desc;
3aa551c9b   Thomas Gleixner   genirq: add threa...
792
793
794
795
796
797
798
799
  
  	if (!tsk->irqaction)
  		return;
  
  	printk(KERN_ERR
  	       "exiting task \"%s\" (%d) is an active IRQ thread (irq %d)
  ",
  	       tsk->comm ? tsk->comm : "", tsk->pid, tsk->irqaction->irq);
b5faba21a   Thomas Gleixner   genirq: Prepare t...
800
801
802
803
804
805
806
  	desc = irq_to_desc(tsk->irqaction->irq);
  
  	/*
  	 * Prevent a stale desc->threads_oneshot. Must be called
  	 * before setting the IRQTF_DIED flag.
  	 */
  	irq_finalize_oneshot(desc, tsk->irqaction, true);
3aa551c9b   Thomas Gleixner   genirq: add threa...
807
808
809
810
811
812
  	/*
  	 * Set the THREAD DIED flag to prevent further wakeups of the
  	 * soon to be gone threaded handler.
  	 */
  	set_bit(IRQTF_DIED, &tsk->irqaction->flags);
  }
8d32a307e   Thomas Gleixner   genirq: Provide f...
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
  static void irq_setup_forced_threading(struct irqaction *new)
  {
  	if (!force_irqthreads)
  		return;
  	if (new->flags & (IRQF_NO_THREAD | IRQF_PERCPU | IRQF_ONESHOT))
  		return;
  
  	new->flags |= IRQF_ONESHOT;
  
  	if (!new->thread_fn) {
  		set_bit(IRQTF_FORCED_THREAD, &new->thread_flags);
  		new->thread_fn = new->handler;
  		new->handler = irq_default_primary_handler;
  	}
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
828
829
830
831
  /*
   * Internal function to register an irqaction - typically used to
   * allocate special interrupts that are part of the architecture.
   */
d3c60047b   Thomas Gleixner   genirq: cleanup t...
832
  static int
327ec5699   Ingo Molnar   irq: clean up man...
833
  __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
834
  {
f17c75453   Ingo Molnar   irq: name 'p' var...
835
  	struct irqaction *old, **old_ptr;
8b126b775   Andrew Morton   [PATCH] setup_irq...
836
  	const char *old_name = NULL;
b5faba21a   Thomas Gleixner   genirq: Prepare t...
837
  	unsigned long flags, thread_mask = 0;
3b8249e75   Thomas Gleixner   genirq: Do not co...
838
839
  	int ret, nested, shared = 0;
  	cpumask_var_t mask;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
840

7d94f7ca4   Yinghai Lu   irq: remove >= nr...
841
  	if (!desc)
c2b5a251b   Matthew Wilcox   [PATCH] Check the...
842
  		return -EINVAL;
6b8ff3120   Thomas Gleixner   genirq: Convert c...
843
  	if (desc->irq_data.chip == &no_irq_chip)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
844
  		return -ENOSYS;
b6873807a   Sebastian Andrzej Siewior   irq: Track the ow...
845
846
  	if (!try_module_get(desc->owner))
  		return -ENODEV;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
847
848
849
850
851
  	/*
  	 * Some drivers like serial.c use request_irq() heavily,
  	 * so we have to be careful not to interfere with a
  	 * running system.
  	 */
3cca53b02   Thomas Gleixner   [PATCH] irq-flags...
852
  	if (new->flags & IRQF_SAMPLE_RANDOM) {
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
853
854
855
856
857
858
859
860
861
862
863
864
  		/*
  		 * This function might sleep, we want to call it first,
  		 * outside of the atomic block.
  		 * Yes, this might clear the entropy pool if the wrong
  		 * driver is attempted to be loaded, without actually
  		 * installing a new handler, but is this really a problem,
  		 * only the sysadmin is able to do this.
  		 */
  		rand_initialize_irq(irq);
  	}
  
  	/*
399b5da29   Thomas Gleixner   genirq: Support n...
865
866
867
  	 * Check whether the interrupt nests into another interrupt
  	 * thread.
  	 */
1ccb4e612   Thomas Gleixner   genirq: Wrap the ...
868
  	nested = irq_settings_is_nested_thread(desc);
399b5da29   Thomas Gleixner   genirq: Support n...
869
  	if (nested) {
b6873807a   Sebastian Andrzej Siewior   irq: Track the ow...
870
871
872
873
  		if (!new->thread_fn) {
  			ret = -EINVAL;
  			goto out_mput;
  		}
399b5da29   Thomas Gleixner   genirq: Support n...
874
875
876
877
878
879
  		/*
  		 * Replace the primary handler which was provided from
  		 * the driver for non nested interrupt handling by the
  		 * dummy function which warns when called.
  		 */
  		new->handler = irq_nested_primary_handler;
8d32a307e   Thomas Gleixner   genirq: Provide f...
880
  	} else {
7f1b1244e   Paul Mundt   genirq: Support p...
881
882
  		if (irq_settings_can_thread(desc))
  			irq_setup_forced_threading(new);
399b5da29   Thomas Gleixner   genirq: Support n...
883
  	}
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
884
  	/*
399b5da29   Thomas Gleixner   genirq: Support n...
885
886
887
  	 * Create a handler thread when a thread function is supplied
  	 * and the interrupt does not nest into another interrupt
  	 * thread.
3aa551c9b   Thomas Gleixner   genirq: add threa...
888
  	 */
399b5da29   Thomas Gleixner   genirq: Support n...
889
  	if (new->thread_fn && !nested) {
3aa551c9b   Thomas Gleixner   genirq: add threa...
890
891
892
893
  		struct task_struct *t;
  
  		t = kthread_create(irq_thread, new, "irq/%d-%s", irq,
  				   new->name);
b6873807a   Sebastian Andrzej Siewior   irq: Track the ow...
894
895
896
897
  		if (IS_ERR(t)) {
  			ret = PTR_ERR(t);
  			goto out_mput;
  		}
3aa551c9b   Thomas Gleixner   genirq: add threa...
898
899
900
901
902
903
904
  		/*
  		 * We keep the reference to the task struct even if
  		 * the thread dies to avoid that the interrupt code
  		 * references an already freed task_struct.
  		 */
  		get_task_struct(t);
  		new->thread = t;
3aa551c9b   Thomas Gleixner   genirq: add threa...
905
  	}
3b8249e75   Thomas Gleixner   genirq: Do not co...
906
907
908
909
  	if (!alloc_cpumask_var(&mask, GFP_KERNEL)) {
  		ret = -ENOMEM;
  		goto out_thread;
  	}
3aa551c9b   Thomas Gleixner   genirq: add threa...
910
  	/*
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
911
912
  	 * The following block of code has to be executed atomically
  	 */
239007b84   Thomas Gleixner   genirq: Convert i...
913
  	raw_spin_lock_irqsave(&desc->lock, flags);
f17c75453   Ingo Molnar   irq: name 'p' var...
914
915
  	old_ptr = &desc->action;
  	old = *old_ptr;
06fcb0c6f   Ingo Molnar   [PATCH] genirq: c...
916
  	if (old) {
e76de9f8e   Thomas Gleixner   [PATCH] genirq: a...
917
918
919
  		/*
  		 * Can't share interrupts unless both agree to and are
  		 * the same type (level, edge, polarity). So both flag
3cca53b02   Thomas Gleixner   [PATCH] irq-flags...
920
  		 * fields must have IRQF_SHARED set and the bits which
9d591edd0   Thomas Gleixner   genirq: Allow sha...
921
922
  		 * set the trigger type must match. Also all must
  		 * agree on ONESHOT.
e76de9f8e   Thomas Gleixner   [PATCH] genirq: a...
923
  		 */
3cca53b02   Thomas Gleixner   [PATCH] irq-flags...
924
  		if (!((old->flags & new->flags) & IRQF_SHARED) ||
9d591edd0   Thomas Gleixner   genirq: Allow sha...
925
926
  		    ((old->flags ^ new->flags) & IRQF_TRIGGER_MASK) ||
  		    ((old->flags ^ new->flags) & IRQF_ONESHOT)) {
8b126b775   Andrew Morton   [PATCH] setup_irq...
927
  			old_name = old->name;
f51634274   Dimitri Sivanich   [PATCH] Add SA_PE...
928
  			goto mismatch;
8b126b775   Andrew Morton   [PATCH] setup_irq...
929
  		}
f51634274   Dimitri Sivanich   [PATCH] Add SA_PE...
930

f51634274   Dimitri Sivanich   [PATCH] Add SA_PE...
931
  		/* All handlers must agree on per-cpuness */
3cca53b02   Thomas Gleixner   [PATCH] irq-flags...
932
933
  		if ((old->flags & IRQF_PERCPU) !=
  		    (new->flags & IRQF_PERCPU))
f51634274   Dimitri Sivanich   [PATCH] Add SA_PE...
934
  			goto mismatch;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
935
936
937
  
  		/* add new interrupt at end of irq queue */
  		do {
b5faba21a   Thomas Gleixner   genirq: Prepare t...
938
  			thread_mask |= old->thread_mask;
f17c75453   Ingo Molnar   irq: name 'p' var...
939
940
  			old_ptr = &old->next;
  			old = *old_ptr;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
941
942
943
  		} while (old);
  		shared = 1;
  	}
b5faba21a   Thomas Gleixner   genirq: Prepare t...
944
945
946
947
948
949
950
951
952
  	/*
  	 * Setup the thread mask for this irqaction. Unlikely to have
  	 * 32 resp 64 irqs sharing one line, but who knows.
  	 */
  	if (new->flags & IRQF_ONESHOT && thread_mask == ~0UL) {
  		ret = -EBUSY;
  		goto out_mask;
  	}
  	new->thread_mask = 1 << ffz(thread_mask);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
953
  	if (!shared) {
3aa551c9b   Thomas Gleixner   genirq: add threa...
954
  		init_waitqueue_head(&desc->wait_for_threads);
e76de9f8e   Thomas Gleixner   [PATCH] genirq: a...
955
  		/* Setup the type (level, edge polarity) if configured: */
3cca53b02   Thomas Gleixner   [PATCH] irq-flags...
956
  		if (new->flags & IRQF_TRIGGER_MASK) {
f2b662da8   David Brownell   genirq: record IR...
957
958
  			ret = __irq_set_trigger(desc, irq,
  					new->flags & IRQF_TRIGGER_MASK);
82736f4d1   Uwe Kleine-König   generic irqs: han...
959

3aa551c9b   Thomas Gleixner   genirq: add threa...
960
  			if (ret)
3b8249e75   Thomas Gleixner   genirq: Do not co...
961
  				goto out_mask;
091738a26   Thomas Gleixner   genirq: Remove re...
962
  		}
6a6de9ef5   Thomas Gleixner   [PATCH] genirq: core
963

009b4c3b8   Thomas Gleixner   genirq: Add IRQ_I...
964
  		desc->istate &= ~(IRQS_AUTODETECT | IRQS_SPURIOUS_DISABLED | \
32f4125eb   Thomas Gleixner   genirq: Move INPR...
965
966
  				  IRQS_ONESHOT | IRQS_WAITING);
  		irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS);
94d39e1f6   Thomas Gleixner   [PATCH] genirq: a...
967

a005677b3   Thomas Gleixner   genirq: Mirror IR...
968
969
970
971
  		if (new->flags & IRQF_PERCPU) {
  			irqd_set(&desc->irq_data, IRQD_PER_CPU);
  			irq_settings_set_per_cpu(desc);
  		}
6a58fb3ba   Thomas Gleixner   genirq: Remove CO...
972

b25c340c1   Thomas Gleixner   genirq: Add onesh...
973
  		if (new->flags & IRQF_ONESHOT)
3d67baec7   Thomas Gleixner   genirq: Move IRQ_...
974
  			desc->istate |= IRQS_ONESHOT;
b25c340c1   Thomas Gleixner   genirq: Add onesh...
975

1ccb4e612   Thomas Gleixner   genirq: Wrap the ...
976
  		if (irq_settings_can_autoenable(desc))
469992386   Thomas Gleixner   genirq: Consolida...
977
978
  			irq_startup(desc);
  		else
e76de9f8e   Thomas Gleixner   [PATCH] genirq: a...
979
980
  			/* Undo nested disables: */
  			desc->depth = 1;
184047567   Max Krasnyansky   genirq: Expose de...
981

612e3684c   Thomas Gleixner   genirq: fix the a...
982
  		/* Exclude IRQ from balancing if requested */
a005677b3   Thomas Gleixner   genirq: Mirror IR...
983
984
985
986
  		if (new->flags & IRQF_NOBALANCING) {
  			irq_settings_set_no_balancing(desc);
  			irqd_set(&desc->irq_data, IRQD_NO_BALANCING);
  		}
612e3684c   Thomas Gleixner   genirq: fix the a...
987

184047567   Max Krasnyansky   genirq: Expose de...
988
  		/* Set default affinity mask once everything is setup */
3b8249e75   Thomas Gleixner   genirq: Do not co...
989
  		setup_affinity(irq, desc, mask);
0c5d1eb77   David Brownell   genirq: record tr...
990

876dbd4cc   Thomas Gleixner   genirq: Mirror ir...
991
992
993
994
995
996
997
998
999
  	} else if (new->flags & IRQF_TRIGGER_MASK) {
  		unsigned int nmsk = new->flags & IRQF_TRIGGER_MASK;
  		unsigned int omsk = irq_settings_get_trigger_mask(desc);
  
  		if (nmsk != omsk)
  			/* hope the handler works with current  trigger mode */
  			pr_warning("IRQ %d uses trigger mode %u; requested %u
  ",
  				   irq, nmsk, omsk);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1000
  	}
82736f4d1   Uwe Kleine-König   generic irqs: han...
1001

69ab84943   Thomas Gleixner   genirq: Wake up i...
1002
  	new->irq = irq;
f17c75453   Ingo Molnar   irq: name 'p' var...
1003
  	*old_ptr = new;
82736f4d1   Uwe Kleine-König   generic irqs: han...
1004

8528b0f1d   Linus Torvalds   Clear spurious ir...
1005
1006
1007
  	/* Reset broken irq detection when installing new handler */
  	desc->irq_count = 0;
  	desc->irqs_unhandled = 0;
1adb0850a   Thomas Gleixner   genirq: reenable ...
1008
1009
1010
1011
1012
  
  	/*
  	 * Check whether we disabled the irq via the spurious handler
  	 * before. Reenable it and give it another chance.
  	 */
7acdd53e5   Thomas Gleixner   genirq: Move IRQ_...
1013
1014
  	if (shared && (desc->istate & IRQS_SPURIOUS_DISABLED)) {
  		desc->istate &= ~IRQS_SPURIOUS_DISABLED;
0a0c5168d   Rafael J. Wysocki   PM: Introduce fun...
1015
  		__enable_irq(desc, irq, false);
1adb0850a   Thomas Gleixner   genirq: reenable ...
1016
  	}
239007b84   Thomas Gleixner   genirq: Convert i...
1017
  	raw_spin_unlock_irqrestore(&desc->lock, flags);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1018

69ab84943   Thomas Gleixner   genirq: Wake up i...
1019
1020
1021
1022
1023
1024
  	/*
  	 * Strictly no need to wake it up, but hung_task complains
  	 * when no hard interrupt wakes the thread up.
  	 */
  	if (new->thread)
  		wake_up_process(new->thread);
2c6927a38   Yinghai Lu   irq: replace loop...
1025
  	register_irq_proc(irq, desc);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1026
1027
  	new->dir = NULL;
  	register_handler_proc(irq, new);
4f5058c3b   Xiaotian Feng   genirq: Fix cpuma...
1028
  	free_cpumask_var(mask);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1029
1030
  
  	return 0;
f51634274   Dimitri Sivanich   [PATCH] Add SA_PE...
1031
1032
  
  mismatch:
3f0504471   Alan Cox   [PATCH] kernel: s...
1033
  #ifdef CONFIG_DEBUG_SHIRQ
3cca53b02   Thomas Gleixner   [PATCH] irq-flags...
1034
  	if (!(new->flags & IRQF_PROBE_SHARED)) {
e8c4b9d00   Bjorn Helgaas   [PATCH] IRQ: warn...
1035
1036
  		printk(KERN_ERR "IRQ handler type mismatch for IRQ %d
  ", irq);
8b126b775   Andrew Morton   [PATCH] setup_irq...
1037
1038
1039
  		if (old_name)
  			printk(KERN_ERR "current handler: %s
  ", old_name);
13e87ec68   Andrew Morton   [PATCH] request_i...
1040
1041
  		dump_stack();
  	}
3f0504471   Alan Cox   [PATCH] kernel: s...
1042
  #endif
3aa551c9b   Thomas Gleixner   genirq: add threa...
1043
  	ret = -EBUSY;
3b8249e75   Thomas Gleixner   genirq: Do not co...
1044
  out_mask:
1c389795c   Dan Carpenter   genirq: Fix incor...
1045
  	raw_spin_unlock_irqrestore(&desc->lock, flags);
3b8249e75   Thomas Gleixner   genirq: Do not co...
1046
  	free_cpumask_var(mask);
3aa551c9b   Thomas Gleixner   genirq: add threa...
1047
  out_thread:
3aa551c9b   Thomas Gleixner   genirq: add threa...
1048
1049
1050
1051
1052
1053
1054
1055
  	if (new->thread) {
  		struct task_struct *t = new->thread;
  
  		new->thread = NULL;
  		if (likely(!test_bit(IRQTF_DIED, &new->thread_flags)))
  			kthread_stop(t);
  		put_task_struct(t);
  	}
b6873807a   Sebastian Andrzej Siewior   irq: Track the ow...
1056
1057
  out_mput:
  	module_put(desc->owner);
3aa551c9b   Thomas Gleixner   genirq: add threa...
1058
  	return ret;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1059
1060
1061
  }
  
  /**
d3c60047b   Thomas Gleixner   genirq: cleanup t...
1062
1063
1064
1065
1066
1067
1068
1069
   *	setup_irq - setup an interrupt
   *	@irq: Interrupt line to setup
   *	@act: irqaction for the interrupt
   *
   * Used to statically setup interrupts in the early boot process.
   */
  int setup_irq(unsigned int irq, struct irqaction *act)
  {
986c011dd   David Daney   genirq: Call bus_...
1070
  	int retval;
d3c60047b   Thomas Gleixner   genirq: cleanup t...
1071
  	struct irq_desc *desc = irq_to_desc(irq);
31d9d9b6d   Marc Zyngier   genirq: Add suppo...
1072
1073
  	if (WARN_ON(irq_settings_is_per_cpu_devid(desc)))
  		return -EINVAL;
986c011dd   David Daney   genirq: Call bus_...
1074
1075
1076
1077
1078
  	chip_bus_lock(desc);
  	retval = __setup_irq(irq, desc, act);
  	chip_bus_sync_unlock(desc);
  
  	return retval;
d3c60047b   Thomas Gleixner   genirq: cleanup t...
1079
  }
eb53b4e8f   Magnus Damm   irq: export remov...
1080
  EXPORT_SYMBOL_GPL(setup_irq);
d3c60047b   Thomas Gleixner   genirq: cleanup t...
1081

31d9d9b6d   Marc Zyngier   genirq: Add suppo...
1082
  /*
cbf94f068   Magnus Damm   irq: match remove...
1083
1084
   * Internal function to unregister an irqaction - used to free
   * regular and special interrupts that are part of the architecture.
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1085
   */
cbf94f068   Magnus Damm   irq: match remove...
1086
  static struct irqaction *__free_irq(unsigned int irq, void *dev_id)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1087
  {
d3c60047b   Thomas Gleixner   genirq: cleanup t...
1088
  	struct irq_desc *desc = irq_to_desc(irq);
f17c75453   Ingo Molnar   irq: name 'p' var...
1089
  	struct irqaction *action, **action_ptr;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1090
  	unsigned long flags;
ae88a23b3   Ingo Molnar   irq: refactor and...
1091
1092
  	WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!
  ", irq);
7d94f7ca4   Yinghai Lu   irq: remove >= nr...
1093

7d94f7ca4   Yinghai Lu   irq: remove >= nr...
1094
  	if (!desc)
f21cfb258   Magnus Damm   irq: add remove_i...
1095
  		return NULL;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1096

239007b84   Thomas Gleixner   genirq: Convert i...
1097
  	raw_spin_lock_irqsave(&desc->lock, flags);
ae88a23b3   Ingo Molnar   irq: refactor and...
1098
1099
1100
1101
1102
  
  	/*
  	 * There can be multiple actions per IRQ descriptor, find the right
  	 * one based on the dev_id:
  	 */
f17c75453   Ingo Molnar   irq: name 'p' var...
1103
  	action_ptr = &desc->action;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1104
  	for (;;) {
f17c75453   Ingo Molnar   irq: name 'p' var...
1105
  		action = *action_ptr;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1106

ae88a23b3   Ingo Molnar   irq: refactor and...
1107
1108
1109
  		if (!action) {
  			WARN(1, "Trying to free already-free IRQ %d
  ", irq);
239007b84   Thomas Gleixner   genirq: Convert i...
1110
  			raw_spin_unlock_irqrestore(&desc->lock, flags);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1111

f21cfb258   Magnus Damm   irq: add remove_i...
1112
  			return NULL;
ae88a23b3   Ingo Molnar   irq: refactor and...
1113
  		}
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1114

8316e3810   Ingo Molnar   irq: further clea...
1115
1116
  		if (action->dev_id == dev_id)
  			break;
f17c75453   Ingo Molnar   irq: name 'p' var...
1117
  		action_ptr = &action->next;
ae88a23b3   Ingo Molnar   irq: refactor and...
1118
  	}
dbce706e2   Paolo 'Blaisorblade' Giarrusso   [PATCH] uml: add ...
1119

ae88a23b3   Ingo Molnar   irq: refactor and...
1120
  	/* Found it - now remove it from the list of entries: */
f17c75453   Ingo Molnar   irq: name 'p' var...
1121
  	*action_ptr = action->next;
ae88a23b3   Ingo Molnar   irq: refactor and...
1122
1123
  
  	/* Currently used only by UML, might disappear one day: */
b77d6adc9   Paolo 'Blaisorblade' Giarrusso   [PATCH] uml: make...
1124
  #ifdef CONFIG_IRQ_RELEASE_METHOD
6b8ff3120   Thomas Gleixner   genirq: Convert c...
1125
1126
  	if (desc->irq_data.chip->release)
  		desc->irq_data.chip->release(irq, dev_id);
b77d6adc9   Paolo 'Blaisorblade' Giarrusso   [PATCH] uml: make...
1127
  #endif
dbce706e2   Paolo 'Blaisorblade' Giarrusso   [PATCH] uml: add ...
1128

ae88a23b3   Ingo Molnar   irq: refactor and...
1129
  	/* If this was the last handler, shut down the IRQ line: */
469992386   Thomas Gleixner   genirq: Consolida...
1130
1131
  	if (!desc->action)
  		irq_shutdown(desc);
3aa551c9b   Thomas Gleixner   genirq: add threa...
1132

e7a297b0d   Peter P Waskiewicz Jr   genirq: Add CPU m...
1133
1134
1135
1136
1137
  #ifdef CONFIG_SMP
  	/* make sure affinity_hint is cleaned up */
  	if (WARN_ON_ONCE(desc->affinity_hint))
  		desc->affinity_hint = NULL;
  #endif
239007b84   Thomas Gleixner   genirq: Convert i...
1138
  	raw_spin_unlock_irqrestore(&desc->lock, flags);
ae88a23b3   Ingo Molnar   irq: refactor and...
1139
1140
1141
1142
1143
  
  	unregister_handler_proc(irq, action);
  
  	/* Make sure it's not being used on another CPU: */
  	synchronize_irq(irq);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1144

70edcd77a   Ingo Molnar   genirq: stackdump...
1145
  #ifdef CONFIG_DEBUG_SHIRQ
ae88a23b3   Ingo Molnar   irq: refactor and...
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
  	/*
  	 * It's a shared IRQ -- the driver ought to be prepared for an IRQ
  	 * event to happen even now it's being freed, so let's make sure that
  	 * is so by doing an extra call to the handler ....
  	 *
  	 * ( We do this after actually deregistering it, to make sure that a
  	 *   'real' IRQ doesn't run in * parallel with our fake. )
  	 */
  	if (action->flags & IRQF_SHARED) {
  		local_irq_save(flags);
  		action->handler(irq, dev_id);
  		local_irq_restore(flags);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1158
  	}
ae88a23b3   Ingo Molnar   irq: refactor and...
1159
  #endif
2d860ad76   Linus Torvalds   genirq: prevent w...
1160
1161
1162
1163
1164
1165
  
  	if (action->thread) {
  		if (!test_bit(IRQTF_DIED, &action->thread_flags))
  			kthread_stop(action->thread);
  		put_task_struct(action->thread);
  	}
b6873807a   Sebastian Andrzej Siewior   irq: Track the ow...
1166
  	module_put(desc->owner);
f21cfb258   Magnus Damm   irq: add remove_i...
1167
1168
1169
1170
  	return action;
  }
  
  /**
cbf94f068   Magnus Damm   irq: match remove...
1171
1172
1173
1174
1175
1176
1177
1178
   *	remove_irq - free an interrupt
   *	@irq: Interrupt line to free
   *	@act: irqaction for the interrupt
   *
   * Used to remove interrupts statically setup by the early boot process.
   */
  void remove_irq(unsigned int irq, struct irqaction *act)
  {
31d9d9b6d   Marc Zyngier   genirq: Add suppo...
1179
1180
1181
1182
  	struct irq_desc *desc = irq_to_desc(irq);
  
  	if (desc && !WARN_ON(irq_settings_is_per_cpu_devid(desc)))
  	    __free_irq(irq, act->dev_id);
cbf94f068   Magnus Damm   irq: match remove...
1183
  }
eb53b4e8f   Magnus Damm   irq: export remov...
1184
  EXPORT_SYMBOL_GPL(remove_irq);
cbf94f068   Magnus Damm   irq: match remove...
1185
1186
  
  /**
f21cfb258   Magnus Damm   irq: add remove_i...
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
   *	free_irq - free an interrupt allocated with request_irq
   *	@irq: Interrupt line to free
   *	@dev_id: Device identity to free
   *
   *	Remove an interrupt handler. The handler is removed and if the
   *	interrupt line is no longer in use by any driver it is disabled.
   *	On a shared IRQ the caller must ensure the interrupt is disabled
   *	on the card it drives before calling this function. The function
   *	does not return until any executing interrupts for this IRQ
   *	have completed.
   *
   *	This function must not be called from interrupt context.
   */
  void free_irq(unsigned int irq, void *dev_id)
  {
70aedd24d   Thomas Gleixner   genirq: Add buslo...
1202
  	struct irq_desc *desc = irq_to_desc(irq);
31d9d9b6d   Marc Zyngier   genirq: Add suppo...
1203
  	if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc)))
70aedd24d   Thomas Gleixner   genirq: Add buslo...
1204
  		return;
cd7eab44e   Ben Hutchings   genirq: Add IRQ a...
1205
1206
1207
1208
  #ifdef CONFIG_SMP
  	if (WARN_ON(desc->affinity_notify))
  		desc->affinity_notify = NULL;
  #endif
3876ec9ef   Thomas Gleixner   genirq: Provide c...
1209
  	chip_bus_lock(desc);
cbf94f068   Magnus Damm   irq: match remove...
1210
  	kfree(__free_irq(irq, dev_id));
3876ec9ef   Thomas Gleixner   genirq: Provide c...
1211
  	chip_bus_sync_unlock(desc);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1212
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1213
1214
1215
  EXPORT_SYMBOL(free_irq);
  
  /**
3aa551c9b   Thomas Gleixner   genirq: add threa...
1216
   *	request_threaded_irq - allocate an interrupt line
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1217
   *	@irq: Interrupt line to allocate
3aa551c9b   Thomas Gleixner   genirq: add threa...
1218
1219
   *	@handler: Function to be called when the IRQ occurs.
   *		  Primary handler for threaded interrupts
b25c340c1   Thomas Gleixner   genirq: Add onesh...
1220
1221
   *		  If NULL and thread_fn != NULL the default
   *		  primary handler is installed
f48fe81e5   Thomas Gleixner   genirq: threaded ...
1222
1223
   *	@thread_fn: Function called from the irq handler thread
   *		    If NULL, no irq thread is created
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
   *	@irqflags: Interrupt type flags
   *	@devname: An ascii name for the claiming device
   *	@dev_id: A cookie passed back to the handler function
   *
   *	This call allocates interrupt resources and enables the
   *	interrupt line and IRQ handling. From the point this
   *	call is made your handler function may be invoked. Since
   *	your handler function must clear any interrupt the board
   *	raises, you must take care both to initialise your hardware
   *	and to set up the interrupt handler in the right order.
   *
3aa551c9b   Thomas Gleixner   genirq: add threa...
1235
   *	If you want to set up a threaded irq handler for your device
6d21af4f7   Javi Merino   irq: Fix comment ...
1236
   *	then you need to supply @handler and @thread_fn. @handler is
3aa551c9b   Thomas Gleixner   genirq: add threa...
1237
1238
1239
   *	still called in hard interrupt context and has to check
   *	whether the interrupt originates from the device. If yes it
   *	needs to disable the interrupt on the device and return
39a2eddb9   Steven Rostedt   genirq: fix comme...
1240
   *	IRQ_WAKE_THREAD which will wake up the handler thread and run
3aa551c9b   Thomas Gleixner   genirq: add threa...
1241
1242
1243
   *	@thread_fn. This split handler design is necessary to support
   *	shared interrupts.
   *
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1244
1245
1246
1247
1248
1249
1250
1251
1252
   *	Dev_id must be globally unique. Normally the address of the
   *	device data structure is used as the cookie. Since the handler
   *	receives this value it makes sense to use it.
   *
   *	If your interrupt is shared you must pass a non NULL dev_id
   *	as this is required when freeing the interrupt.
   *
   *	Flags:
   *
3cca53b02   Thomas Gleixner   [PATCH] irq-flags...
1253
   *	IRQF_SHARED		Interrupt is shared
3cca53b02   Thomas Gleixner   [PATCH] irq-flags...
1254
   *	IRQF_SAMPLE_RANDOM	The interrupt can be used for entropy
0c5d1eb77   David Brownell   genirq: record tr...
1255
   *	IRQF_TRIGGER_*		Specify active edge(s) or level
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1256
1257
   *
   */
3aa551c9b   Thomas Gleixner   genirq: add threa...
1258
1259
1260
  int request_threaded_irq(unsigned int irq, irq_handler_t handler,
  			 irq_handler_t thread_fn, unsigned long irqflags,
  			 const char *devname, void *dev_id)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1261
  {
06fcb0c6f   Ingo Molnar   [PATCH] genirq: c...
1262
  	struct irqaction *action;
08678b084   Yinghai Lu   generic: sparse i...
1263
  	struct irq_desc *desc;
d3c60047b   Thomas Gleixner   genirq: cleanup t...
1264
  	int retval;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1265

470c66239   David Brownell   genirq: warn when...
1266
  	/*
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1267
1268
1269
1270
1271
  	 * Sanity-check: shared interrupts must pass in a real dev-ID,
  	 * otherwise we'll have trouble later trying to figure out
  	 * which interrupt is which (messes up the interrupt freeing
  	 * logic etc).
  	 */
3cca53b02   Thomas Gleixner   [PATCH] irq-flags...
1272
  	if ((irqflags & IRQF_SHARED) && !dev_id)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1273
  		return -EINVAL;
7d94f7ca4   Yinghai Lu   irq: remove >= nr...
1274

cb5bc8322   Yinghai Lu   x86_64: rename ir...
1275
  	desc = irq_to_desc(irq);
7d94f7ca4   Yinghai Lu   irq: remove >= nr...
1276
  	if (!desc)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1277
  		return -EINVAL;
7d94f7ca4   Yinghai Lu   irq: remove >= nr...
1278

31d9d9b6d   Marc Zyngier   genirq: Add suppo...
1279
1280
  	if (!irq_settings_can_request(desc) ||
  	    WARN_ON(irq_settings_is_per_cpu_devid(desc)))
6550c775c   Thomas Gleixner   [PATCH] genirq: a...
1281
  		return -EINVAL;
b25c340c1   Thomas Gleixner   genirq: Add onesh...
1282
1283
1284
1285
1286
1287
  
  	if (!handler) {
  		if (!thread_fn)
  			return -EINVAL;
  		handler = irq_default_primary_handler;
  	}
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1288

455357327   Thomas Gleixner   genirq: use kzall...
1289
  	action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1290
1291
1292
1293
  	if (!action)
  		return -ENOMEM;
  
  	action->handler = handler;
3aa551c9b   Thomas Gleixner   genirq: add threa...
1294
  	action->thread_fn = thread_fn;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1295
  	action->flags = irqflags;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1296
  	action->name = devname;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1297
  	action->dev_id = dev_id;
3876ec9ef   Thomas Gleixner   genirq: Provide c...
1298
  	chip_bus_lock(desc);
d3c60047b   Thomas Gleixner   genirq: cleanup t...
1299
  	retval = __setup_irq(irq, desc, action);
3876ec9ef   Thomas Gleixner   genirq: Provide c...
1300
  	chip_bus_sync_unlock(desc);
70aedd24d   Thomas Gleixner   genirq: Add buslo...
1301

377bf1e4a   Anton Vorontsov   genirq: fix irq_d...
1302
1303
  	if (retval)
  		kfree(action);
6d83f94db   Thomas Gleixner   genirq: Disable t...
1304
  #ifdef CONFIG_DEBUG_SHIRQ_FIXME
6ce51c431   Luis Henriques   genirq: do not ex...
1305
  	if (!retval && (irqflags & IRQF_SHARED)) {
a304e1b82   David Woodhouse   [PATCH] Debug sha...
1306
1307
1308
  		/*
  		 * It's a shared IRQ -- the driver ought to be prepared for it
  		 * to happen immediately, so let's make sure....
377bf1e4a   Anton Vorontsov   genirq: fix irq_d...
1309
1310
  		 * We disable the irq to make sure that a 'real' IRQ doesn't
  		 * run in parallel with our fake.
a304e1b82   David Woodhouse   [PATCH] Debug sha...
1311
  		 */
59845b1ff   Jarek Poplawski   request_irq: fix ...
1312
  		unsigned long flags;
a304e1b82   David Woodhouse   [PATCH] Debug sha...
1313

377bf1e4a   Anton Vorontsov   genirq: fix irq_d...
1314
  		disable_irq(irq);
59845b1ff   Jarek Poplawski   request_irq: fix ...
1315
  		local_irq_save(flags);
377bf1e4a   Anton Vorontsov   genirq: fix irq_d...
1316

59845b1ff   Jarek Poplawski   request_irq: fix ...
1317
  		handler(irq, dev_id);
377bf1e4a   Anton Vorontsov   genirq: fix irq_d...
1318

59845b1ff   Jarek Poplawski   request_irq: fix ...
1319
  		local_irq_restore(flags);
377bf1e4a   Anton Vorontsov   genirq: fix irq_d...
1320
  		enable_irq(irq);
a304e1b82   David Woodhouse   [PATCH] Debug sha...
1321
1322
  	}
  #endif
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1323
1324
  	return retval;
  }
3aa551c9b   Thomas Gleixner   genirq: add threa...
1325
  EXPORT_SYMBOL(request_threaded_irq);
ae731f8d0   Marc Zyngier   genirq: Introduce...
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
  
  /**
   *	request_any_context_irq - allocate an interrupt line
   *	@irq: Interrupt line to allocate
   *	@handler: Function to be called when the IRQ occurs.
   *		  Threaded handler for threaded interrupts.
   *	@flags: Interrupt type flags
   *	@name: An ascii name for the claiming device
   *	@dev_id: A cookie passed back to the handler function
   *
   *	This call allocates interrupt resources and enables the
   *	interrupt line and IRQ handling. It selects either a
   *	hardirq or threaded handling method depending on the
   *	context.
   *
   *	On failure, it returns a negative value. On success,
   *	it returns either IRQC_IS_HARDIRQ or IRQC_IS_NESTED.
   */
  int request_any_context_irq(unsigned int irq, irq_handler_t handler,
  			    unsigned long flags, const char *name, void *dev_id)
  {
  	struct irq_desc *desc = irq_to_desc(irq);
  	int ret;
  
  	if (!desc)
  		return -EINVAL;
1ccb4e612   Thomas Gleixner   genirq: Wrap the ...
1352
  	if (irq_settings_is_nested_thread(desc)) {
ae731f8d0   Marc Zyngier   genirq: Introduce...
1353
1354
1355
1356
1357
1358
1359
1360
1361
  		ret = request_threaded_irq(irq, NULL, handler,
  					   flags, name, dev_id);
  		return !ret ? IRQC_IS_NESTED : ret;
  	}
  
  	ret = request_irq(irq, handler, flags, name, dev_id);
  	return !ret ? IRQC_IS_HARDIRQ : ret;
  }
  EXPORT_SYMBOL_GPL(request_any_context_irq);
31d9d9b6d   Marc Zyngier   genirq: Add suppo...
1362

1e7c5fd29   Marc Zyngier   genirq: percpu: a...
1363
  void enable_percpu_irq(unsigned int irq, unsigned int type)
31d9d9b6d   Marc Zyngier   genirq: Add suppo...
1364
1365
1366
1367
1368
1369
1370
  {
  	unsigned int cpu = smp_processor_id();
  	unsigned long flags;
  	struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU);
  
  	if (!desc)
  		return;
1e7c5fd29   Marc Zyngier   genirq: percpu: a...
1371
1372
1373
1374
1375
1376
1377
  	type &= IRQ_TYPE_SENSE_MASK;
  	if (type != IRQ_TYPE_NONE) {
  		int ret;
  
  		ret = __irq_set_trigger(desc, irq, type);
  
  		if (ret) {
32cffdde4   Thomas Gleixner   genirq: Fix fatfi...
1378
1379
  			WARN(1, "failed to set type for IRQ%d
  ", irq);
1e7c5fd29   Marc Zyngier   genirq: percpu: a...
1380
1381
1382
  			goto out;
  		}
  	}
31d9d9b6d   Marc Zyngier   genirq: Add suppo...
1383
  	irq_percpu_enable(desc, cpu);
1e7c5fd29   Marc Zyngier   genirq: percpu: a...
1384
  out:
31d9d9b6d   Marc Zyngier   genirq: Add suppo...
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
  	irq_put_desc_unlock(desc, flags);
  }
  
  void disable_percpu_irq(unsigned int irq)
  {
  	unsigned int cpu = smp_processor_id();
  	unsigned long flags;
  	struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU);
  
  	if (!desc)
  		return;
  
  	irq_percpu_disable(desc, cpu);
  	irq_put_desc_unlock(desc, flags);
  }
  
  /*
   * Internal function to unregister a percpu irqaction.
   */
  static struct irqaction *__free_percpu_irq(unsigned int irq, void __percpu *dev_id)
  {
  	struct irq_desc *desc = irq_to_desc(irq);
  	struct irqaction *action;
  	unsigned long flags;
  
  	WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!
  ", irq);
  
  	if (!desc)
  		return NULL;
  
  	raw_spin_lock_irqsave(&desc->lock, flags);
  
  	action = desc->action;
  	if (!action || action->percpu_dev_id != dev_id) {
  		WARN(1, "Trying to free already-free IRQ %d
  ", irq);
  		goto bad;
  	}
  
  	if (!cpumask_empty(desc->percpu_enabled)) {
  		WARN(1, "percpu IRQ %d still enabled on CPU%d!
  ",
  		     irq, cpumask_first(desc->percpu_enabled));
  		goto bad;
  	}
  
  	/* Found it - now remove it from the list of entries: */
  	desc->action = NULL;
  
  	raw_spin_unlock_irqrestore(&desc->lock, flags);
  
  	unregister_handler_proc(irq, action);
  
  	module_put(desc->owner);
  	return action;
  
  bad:
  	raw_spin_unlock_irqrestore(&desc->lock, flags);
  	return NULL;
  }
  
  /**
   *	remove_percpu_irq - free a per-cpu interrupt
   *	@irq: Interrupt line to free
   *	@act: irqaction for the interrupt
   *
   * Used to remove interrupts statically setup by the early boot process.
   */
  void remove_percpu_irq(unsigned int irq, struct irqaction *act)
  {
  	struct irq_desc *desc = irq_to_desc(irq);
  
  	if (desc && irq_settings_is_per_cpu_devid(desc))
  	    __free_percpu_irq(irq, act->percpu_dev_id);
  }
  
  /**
   *	free_percpu_irq - free an interrupt allocated with request_percpu_irq
   *	@irq: Interrupt line to free
   *	@dev_id: Device identity to free
   *
   *	Remove a percpu interrupt handler. The handler is removed, but
   *	the interrupt line is not disabled. This must be done on each
   *	CPU before calling this function. The function does not return
   *	until any executing interrupts for this IRQ have completed.
   *
   *	This function must not be called from interrupt context.
   */
  void free_percpu_irq(unsigned int irq, void __percpu *dev_id)
  {
  	struct irq_desc *desc = irq_to_desc(irq);
  
  	if (!desc || !irq_settings_is_per_cpu_devid(desc))
  		return;
  
  	chip_bus_lock(desc);
  	kfree(__free_percpu_irq(irq, dev_id));
  	chip_bus_sync_unlock(desc);
  }
  
  /**
   *	setup_percpu_irq - setup a per-cpu interrupt
   *	@irq: Interrupt line to setup
   *	@act: irqaction for the interrupt
   *
   * Used to statically setup per-cpu interrupts in the early boot process.
   */
  int setup_percpu_irq(unsigned int irq, struct irqaction *act)
  {
  	struct irq_desc *desc = irq_to_desc(irq);
  	int retval;
  
  	if (!desc || !irq_settings_is_per_cpu_devid(desc))
  		return -EINVAL;
  	chip_bus_lock(desc);
  	retval = __setup_irq(irq, desc, act);
  	chip_bus_sync_unlock(desc);
  
  	return retval;
  }
  
  /**
   *	request_percpu_irq - allocate a percpu interrupt line
   *	@irq: Interrupt line to allocate
   *	@handler: Function to be called when the IRQ occurs.
   *	@devname: An ascii name for the claiming device
   *	@dev_id: A percpu cookie passed back to the handler function
   *
   *	This call allocates interrupt resources, but doesn't
   *	automatically enable the interrupt. It has to be done on each
   *	CPU using enable_percpu_irq().
   *
   *	Dev_id must be globally unique. It is a per-cpu variable, and
   *	the handler gets called with the interrupted CPU's instance of
   *	that variable.
   */
  int request_percpu_irq(unsigned int irq, irq_handler_t handler,
  		       const char *devname, void __percpu *dev_id)
  {
  	struct irqaction *action;
  	struct irq_desc *desc;
  	int retval;
  
  	if (!dev_id)
  		return -EINVAL;
  
  	desc = irq_to_desc(irq);
  	if (!desc || !irq_settings_can_request(desc) ||
  	    !irq_settings_is_per_cpu_devid(desc))
  		return -EINVAL;
  
  	action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
  	if (!action)
  		return -ENOMEM;
  
  	action->handler = handler;
2ed0e645f   Marc Zyngier   genirq: Don't all...
1542
  	action->flags = IRQF_PERCPU | IRQF_NO_SUSPEND;
31d9d9b6d   Marc Zyngier   genirq: Add suppo...
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
  	action->name = devname;
  	action->percpu_dev_id = dev_id;
  
  	chip_bus_lock(desc);
  	retval = __setup_irq(irq, desc, action);
  	chip_bus_sync_unlock(desc);
  
  	if (retval)
  		kfree(action);
  
  	return retval;
  }