Blame view

kernel/irq/spurious.c 11.9 KB
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1
2
3
4
5
6
7
  /*
   * linux/kernel/irq/spurious.c
   *
   * Copyright (C) 1992, 1998-2004 Linus Torvalds, Ingo Molnar
   *
   * This file contains spurious interrupt handling.
   */
188fd89d5   S.Çağlar Onur   genirq: spurious....
8
  #include <linux/jiffies.h>
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
9
10
11
12
  #include <linux/irq.h>
  #include <linux/module.h>
  #include <linux/kallsyms.h>
  #include <linux/interrupt.h>
9e094c17e   Andi Kleen   genirq: turn irq ...
13
  #include <linux/moduleparam.h>
f84dbb912   Eric W. Biederman   genirq: enable po...
14
  #include <linux/timer.h>
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
15

bd1514122   Thomas Gleixner   genirq: Provide c...
16
  #include "internals.h"
83d4e6e7f   Andreas Mohr   [PATCH] make noir...
17
  static int irqfixup __read_mostly;
200803dfe   Alan Cox   [PATCH] irqpoll
18

f84dbb912   Eric W. Biederman   genirq: enable po...
19
20
21
  #define POLL_SPURIOUS_IRQ_INTERVAL (HZ/10)
  static void poll_spurious_irqs(unsigned long dummy);
  static DEFINE_TIMER(poll_spurious_irq_timer, poll_spurious_irqs, 0, 0);
d05c65fff   Thomas Gleixner   genirq: spurious:...
22
23
  static int irq_poll_cpu;
  static atomic_t irq_poll_active;
f84dbb912   Eric W. Biederman   genirq: enable po...
24

200803dfe   Alan Cox   [PATCH] irqpoll
25
  /*
fe200ae48   Thomas Gleixner   genirq: Mark poll...
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
   * We wait here for a poller to finish.
   *
   * If the poll runs on this CPU, then we yell loudly and return
   * false. That will leave the interrupt line disabled in the worst
   * case, but it should never happen.
   *
   * We wait until the poller is done and then recheck disabled and
   * action (about to be disabled). Only if it's still active, we return
   * true and let the handler run.
   */
  bool irq_wait_for_poll(struct irq_desc *desc)
  {
  	if (WARN_ONCE(irq_poll_cpu == smp_processor_id(),
  		      "irq poll in progress on cpu %d for irq %d
  ",
  		      smp_processor_id(), desc->irq_data.irq))
  		return false;
  
  #ifdef CONFIG_SMP
  	do {
  		raw_spin_unlock(&desc->lock);
32f4125eb   Thomas Gleixner   genirq: Move INPR...
47
  		while (irqd_irq_inprogress(&desc->irq_data))
fe200ae48   Thomas Gleixner   genirq: Mark poll...
48
49
  			cpu_relax();
  		raw_spin_lock(&desc->lock);
a6aeddd1c   Thomas Gleixner   genirq: Fix typo ...
50
  	} while (irqd_irq_inprogress(&desc->irq_data));
fe200ae48   Thomas Gleixner   genirq: Mark poll...
51
  	/* Might have been disabled in meantime */
32f4125eb   Thomas Gleixner   genirq: Move INPR...
52
  	return !irqd_irq_disabled(&desc->irq_data) && desc->action;
fe200ae48   Thomas Gleixner   genirq: Mark poll...
53
54
55
56
  #else
  	return false;
  #endif
  }
0877d6625   Thomas Gleixner   genirq: Use handl...
57

fe200ae48   Thomas Gleixner   genirq: Mark poll...
58
  /*
200803dfe   Alan Cox   [PATCH] irqpoll
59
60
   * Recovery handler for misrouted interrupts.
   */
c7259cd7a   Thomas Gleixner   genirq: Do not po...
61
  static int try_one_irq(int irq, struct irq_desc *desc, bool force)
200803dfe   Alan Cox   [PATCH] irqpoll
62
  {
0877d6625   Thomas Gleixner   genirq: Use handl...
63
  	irqreturn_t ret = IRQ_NONE;
f84dbb912   Eric W. Biederman   genirq: enable po...
64
  	struct irqaction *action;
200803dfe   Alan Cox   [PATCH] irqpoll
65

239007b84   Thomas Gleixner   genirq: Convert i...
66
  	raw_spin_lock(&desc->lock);
c7259cd7a   Thomas Gleixner   genirq: Do not po...
67

b39898cd4   Thomas Gleixner   genirq: Prevent s...
68
69
70
71
72
73
74
  	/*
  	 * PER_CPU, nested thread interrupts and interrupts explicitely
  	 * marked polled are excluded from polling.
  	 */
  	if (irq_settings_is_per_cpu(desc) ||
  	    irq_settings_is_nested_thread(desc) ||
  	    irq_settings_is_polled(desc))
c7259cd7a   Thomas Gleixner   genirq: Do not po...
75
76
77
78
79
80
  		goto out;
  
  	/*
  	 * Do not poll disabled interrupts unless the spurious
  	 * disabled poller asks explicitely.
  	 */
32f4125eb   Thomas Gleixner   genirq: Move INPR...
81
  	if (irqd_irq_disabled(&desc->irq_data) && !force)
c7259cd7a   Thomas Gleixner   genirq: Do not po...
82
83
84
85
  		goto out;
  
  	/*
  	 * All handlers must agree on IRQF_SHARED, so we test just the
e716efde7   Thomas Gleixner   genirq: Avoid dea...
86
  	 * first.
c7259cd7a   Thomas Gleixner   genirq: Do not po...
87
88
89
  	 */
  	action = desc->action;
  	if (!action || !(action->flags & IRQF_SHARED) ||
e716efde7   Thomas Gleixner   genirq: Avoid dea...
90
  	    (action->flags & __IRQF_TIMER))
c7259cd7a   Thomas Gleixner   genirq: Do not po...
91
  		goto out;
f84dbb912   Eric W. Biederman   genirq: enable po...
92
  	/* Already running on another processor */
32f4125eb   Thomas Gleixner   genirq: Move INPR...
93
  	if (irqd_irq_inprogress(&desc->irq_data)) {
f84dbb912   Eric W. Biederman   genirq: enable po...
94
95
96
97
  		/*
  		 * Already running: If it is shared get the other
  		 * CPU to go looking for our mystery interrupt too
  		 */
2a0d6fb33   Thomas Gleixner   genirq: Move IRQ_...
98
  		desc->istate |= IRQS_PENDING;
fa27271bc   Thomas Gleixner   genirq: Fixup pol...
99
  		goto out;
c7259cd7a   Thomas Gleixner   genirq: Do not po...
100
  	}
fa27271bc   Thomas Gleixner   genirq: Fixup pol...
101

0877d6625   Thomas Gleixner   genirq: Use handl...
102
  	/* Mark it poll in progress */
6954b75b4   Thomas Gleixner   genirq: Move IRQ_...
103
  	desc->istate |= IRQS_POLL_INPROGRESS;
fa27271bc   Thomas Gleixner   genirq: Fixup pol...
104
  	do {
0877d6625   Thomas Gleixner   genirq: Use handl...
105
106
  		if (handle_irq_event(desc) == IRQ_HANDLED)
  			ret = IRQ_HANDLED;
e716efde7   Thomas Gleixner   genirq: Avoid dea...
107
  		/* Make sure that there is still a valid action */
fa27271bc   Thomas Gleixner   genirq: Fixup pol...
108
  		action = desc->action;
2a0d6fb33   Thomas Gleixner   genirq: Move IRQ_...
109
  	} while ((desc->istate & IRQS_PENDING) && action);
6954b75b4   Thomas Gleixner   genirq: Move IRQ_...
110
  	desc->istate &= ~IRQS_POLL_INPROGRESS;
fa27271bc   Thomas Gleixner   genirq: Fixup pol...
111
112
  out:
  	raw_spin_unlock(&desc->lock);
0877d6625   Thomas Gleixner   genirq: Use handl...
113
  	return ret == IRQ_HANDLED;
f84dbb912   Eric W. Biederman   genirq: enable po...
114
115
116
117
  }
  
  static int misrouted_irq(int irq)
  {
e00585bb7   Yinghai Lu   irq: fix irqpoll ...
118
  	struct irq_desc *desc;
d3c60047b   Thomas Gleixner   genirq: cleanup t...
119
  	int i, ok = 0;
f84dbb912   Eric W. Biederman   genirq: enable po...
120

c75d720fc   Edward Donovan   genirq: Fix irqfi...
121
  	if (atomic_inc_return(&irq_poll_active) != 1)
d05c65fff   Thomas Gleixner   genirq: spurious:...
122
123
124
  		goto out;
  
  	irq_poll_cpu = smp_processor_id();
e00585bb7   Yinghai Lu   irq: fix irqpoll ...
125
126
127
  	for_each_irq_desc(i, desc) {
  		if (!i)
  			 continue;
f84dbb912   Eric W. Biederman   genirq: enable po...
128
129
130
  
  		if (i == irq)	/* Already tried */
  			continue;
c7259cd7a   Thomas Gleixner   genirq: Do not po...
131
  		if (try_one_irq(i, desc, false))
f84dbb912   Eric W. Biederman   genirq: enable po...
132
  			ok = 1;
200803dfe   Alan Cox   [PATCH] irqpoll
133
  	}
d05c65fff   Thomas Gleixner   genirq: spurious:...
134
135
  out:
  	atomic_dec(&irq_poll_active);
200803dfe   Alan Cox   [PATCH] irqpoll
136
137
138
  	/* So the caller can adjust the irq error counts */
  	return ok;
  }
663e69592   Thomas Gleixner   irq: Remove unuse...
139
  static void poll_spurious_irqs(unsigned long dummy)
f84dbb912   Eric W. Biederman   genirq: enable po...
140
  {
e00585bb7   Yinghai Lu   irq: fix irqpoll ...
141
  	struct irq_desc *desc;
d3c60047b   Thomas Gleixner   genirq: cleanup t...
142
  	int i;
e00585bb7   Yinghai Lu   irq: fix irqpoll ...
143

d05c65fff   Thomas Gleixner   genirq: spurious:...
144
145
146
  	if (atomic_inc_return(&irq_poll_active) != 1)
  		goto out;
  	irq_poll_cpu = smp_processor_id();
e00585bb7   Yinghai Lu   irq: fix irqpoll ...
147
  	for_each_irq_desc(i, desc) {
7acdd53e5   Thomas Gleixner   genirq: Move IRQ_...
148
  		unsigned int state;
f84dbb912   Eric W. Biederman   genirq: enable po...
149

e00585bb7   Yinghai Lu   irq: fix irqpoll ...
150
151
  		if (!i)
  			 continue;
f84dbb912   Eric W. Biederman   genirq: enable po...
152
  		/* Racy but it doesn't matter */
7acdd53e5   Thomas Gleixner   genirq: Move IRQ_...
153
  		state = desc->istate;
f84dbb912   Eric W. Biederman   genirq: enable po...
154
  		barrier();
7acdd53e5   Thomas Gleixner   genirq: Move IRQ_...
155
  		if (!(state & IRQS_SPURIOUS_DISABLED))
f84dbb912   Eric W. Biederman   genirq: enable po...
156
  			continue;
e7e7e0c08   Yong Zhang   genirq: try_one_i...
157
  		local_irq_disable();
c7259cd7a   Thomas Gleixner   genirq: Do not po...
158
  		try_one_irq(i, desc, true);
e7e7e0c08   Yong Zhang   genirq: try_one_i...
159
  		local_irq_enable();
f84dbb912   Eric W. Biederman   genirq: enable po...
160
  	}
d05c65fff   Thomas Gleixner   genirq: spurious:...
161
162
  out:
  	atomic_dec(&irq_poll_active);
d3c60047b   Thomas Gleixner   genirq: cleanup t...
163
164
  	mod_timer(&poll_spurious_irq_timer,
  		  jiffies + POLL_SPURIOUS_IRQ_INTERVAL);
f84dbb912   Eric W. Biederman   genirq: enable po...
165
  }
3a43e05f4   Sebastian Andrzej Siewior   irq: Handle spuri...
166
167
168
169
170
171
  static inline int bad_action_ret(irqreturn_t action_ret)
  {
  	if (likely(action_ret <= (IRQ_HANDLED | IRQ_WAKE_THREAD)))
  		return 0;
  	return 1;
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
172
173
174
175
176
177
178
  /*
   * If 99,900 of the previous 100,000 interrupts have not been handled
   * then assume that the IRQ is stuck in some manner. Drop a diagnostic
   * and try to turn the IRQ off.
   *
   * (The other 100-of-100,000 interrupts may have been a correctly
   *  functioning device sharing an IRQ with the failing one)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
179
   */
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
180
  static void
34ffdb723   Ingo Molnar   [PATCH] genirq: c...
181
182
  __report_bad_irq(unsigned int irq, struct irq_desc *desc,
  		 irqreturn_t action_ret)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
183
184
  {
  	struct irqaction *action;
1082687e8   Thomas Gleixner   genirq: Plug race...
185
  	unsigned long flags;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
186

3a43e05f4   Sebastian Andrzej Siewior   irq: Handle spuri...
187
  	if (bad_action_ret(action_ret)) {
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
188
189
190
191
  		printk(KERN_ERR "irq event %d: bogus return value %x
  ",
  				irq, action_ret);
  	} else {
200803dfe   Alan Cox   [PATCH] irqpoll
192
193
194
  		printk(KERN_ERR "irq %d: nobody cared (try booting with "
  				"the \"irqpoll\" option)
  ", irq);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
195
196
197
198
  	}
  	dump_stack();
  	printk(KERN_ERR "handlers:
  ");
06fcb0c6f   Ingo Molnar   [PATCH] genirq: c...
199

1082687e8   Thomas Gleixner   genirq: Plug race...
200
201
202
203
204
205
206
  	/*
  	 * We need to take desc->lock here. note_interrupt() is called
  	 * w/o desc->lock held, but IRQ_PROGRESS set. We might race
  	 * with something else removing an action. It's ok to take
  	 * desc->lock here. See synchronize_irq().
  	 */
  	raw_spin_lock_irqsave(&desc->lock, flags);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
207
208
  	action = desc->action;
  	while (action) {
ef26f20cd   Sebastian Andrzej Siewior   genirq: Print thr...
209
210
211
212
213
214
  		printk(KERN_ERR "[<%p>] %pf", action->handler, action->handler);
  		if (action->thread_fn)
  			printk(KERN_CONT " threaded [<%p>] %pf",
  					action->thread_fn, action->thread_fn);
  		printk(KERN_CONT "
  ");
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
215
216
  		action = action->next;
  	}
1082687e8   Thomas Gleixner   genirq: Plug race...
217
  	raw_spin_unlock_irqrestore(&desc->lock, flags);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
218
  }
06fcb0c6f   Ingo Molnar   [PATCH] genirq: c...
219
  static void
34ffdb723   Ingo Molnar   [PATCH] genirq: c...
220
  report_bad_irq(unsigned int irq, struct irq_desc *desc, irqreturn_t action_ret)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
221
222
223
224
225
226
227
228
  {
  	static int count = 100;
  
  	if (count > 0) {
  		count--;
  		__report_bad_irq(irq, desc, action_ret);
  	}
  }
d3c60047b   Thomas Gleixner   genirq: cleanup t...
229
230
231
  static inline int
  try_misrouted_irq(unsigned int irq, struct irq_desc *desc,
  		  irqreturn_t action_ret)
92ea77275   Linus Torvalds   Fix crash with ir...
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
  {
  	struct irqaction *action;
  
  	if (!irqfixup)
  		return 0;
  
  	/* We didn't actually handle the IRQ - see if it was misrouted? */
  	if (action_ret == IRQ_NONE)
  		return 1;
  
  	/*
  	 * But for 'irqfixup == 2' we also do it for handled interrupts if
  	 * they are marked as IRQF_IRQPOLL (or for irq zero, which is the
  	 * traditional PC timer interrupt.. Legacy)
  	 */
  	if (irqfixup < 2)
  		return 0;
  
  	if (!irq)
  		return 1;
  
  	/*
  	 * Since we don't get the descriptor lock, "action" can
  	 * change under us.  We don't really care, but we don't
  	 * want to follow a NULL pointer. So tell the compiler to
  	 * just load it once by using a barrier.
  	 */
  	action = desc->action;
  	barrier();
  	return action && (action->flags & IRQF_IRQPOLL);
  }
1e77d0a1e   Thomas Gleixner   genirq: Sanitize ...
263
  #define SPURIOUS_DEFERRED	0x80000000
34ffdb723   Ingo Molnar   [PATCH] genirq: c...
264
  void note_interrupt(unsigned int irq, struct irq_desc *desc,
7d12e780e   David Howells   IRQ: Maintain reg...
265
  		    irqreturn_t action_ret)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
266
  {
b39898cd4   Thomas Gleixner   genirq: Prevent s...
267
268
  	if (desc->istate & IRQS_POLL_INPROGRESS ||
  	    irq_settings_is_polled(desc))
fe200ae48   Thomas Gleixner   genirq: Mark poll...
269
  		return;
3a43e05f4   Sebastian Andrzej Siewior   irq: Handle spuri...
270
271
272
273
  	if (bad_action_ret(action_ret)) {
  		report_bad_irq(irq, desc, action_ret);
  		return;
  	}
1e77d0a1e   Thomas Gleixner   genirq: Sanitize ...
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
  	/*
  	 * We cannot call note_interrupt from the threaded handler
  	 * because we need to look at the compound of all handlers
  	 * (primary and threaded). Aside of that in the threaded
  	 * shared case we have no serialization against an incoming
  	 * hardware interrupt while we are dealing with a threaded
  	 * result.
  	 *
  	 * So in case a thread is woken, we just note the fact and
  	 * defer the analysis to the next hardware interrupt.
  	 *
  	 * The threaded handlers store whether they sucessfully
  	 * handled an interrupt and we check whether that number
  	 * changed versus the last invocation.
  	 *
  	 * We could handle all interrupts with the delayed by one
  	 * mechanism, but for the non forced threaded case we'd just
  	 * add pointless overhead to the straight hardirq interrupts
  	 * for the sake of a few lines less code.
  	 */
  	if (action_ret & IRQ_WAKE_THREAD) {
  		/*
  		 * There is a thread woken. Check whether one of the
  		 * shared primary handlers returned IRQ_HANDLED. If
  		 * not we defer the spurious detection to the next
  		 * interrupt.
  		 */
  		if (action_ret == IRQ_WAKE_THREAD) {
  			int handled;
  			/*
  			 * We use bit 31 of thread_handled_last to
  			 * denote the deferred spurious detection
  			 * active. No locking necessary as
  			 * thread_handled_last is only accessed here
  			 * and we have the guarantee that hard
  			 * interrupts are not reentrant.
  			 */
  			if (!(desc->threads_handled_last & SPURIOUS_DEFERRED)) {
  				desc->threads_handled_last |= SPURIOUS_DEFERRED;
  				return;
  			}
  			/*
  			 * Check whether one of the threaded handlers
  			 * returned IRQ_HANDLED since the last
  			 * interrupt happened.
  			 *
  			 * For simplicity we just set bit 31, as it is
  			 * set in threads_handled_last as well. So we
  			 * avoid extra masking. And we really do not
  			 * care about the high bits of the handled
  			 * count. We just care about the count being
  			 * different than the one we saw before.
  			 */
  			handled = atomic_read(&desc->threads_handled);
  			handled |= SPURIOUS_DEFERRED;
  			if (handled != desc->threads_handled_last) {
  				action_ret = IRQ_HANDLED;
  				/*
  				 * Note: We keep the SPURIOUS_DEFERRED
  				 * bit set. We are handling the
  				 * previous invocation right now.
  				 * Keep it for the current one, so the
  				 * next hardware interrupt will
  				 * account for it.
  				 */
  				desc->threads_handled_last = handled;
  			} else {
  				/*
  				 * None of the threaded handlers felt
  				 * responsible for the last interrupt
  				 *
  				 * We keep the SPURIOUS_DEFERRED bit
  				 * set in threads_handled_last as we
  				 * need to account for the current
  				 * interrupt as well.
  				 */
  				action_ret = IRQ_NONE;
  			}
  		} else {
  			/*
  			 * One of the primary handlers returned
  			 * IRQ_HANDLED. So we don't care about the
  			 * threaded handlers on the same line. Clear
  			 * the deferred detection bit.
  			 *
  			 * In theory we could/should check whether the
  			 * deferred bit is set and take the result of
  			 * the previous run into account here as
  			 * well. But it's really not worth the
  			 * trouble. If every other interrupt is
  			 * handled we never trigger the spurious
  			 * detector. And if this is just the one out
  			 * of 100k unhandled ones which is handled
  			 * then we merily delay the spurious detection
  			 * by one hard interrupt. Not a real problem.
  			 */
  			desc->threads_handled_last &= ~SPURIOUS_DEFERRED;
  		}
  	}
3a43e05f4   Sebastian Andrzej Siewior   irq: Handle spuri...
373
  	if (unlikely(action_ret == IRQ_NONE)) {
4f27c00bf   Alan Cox   Improve behaviour...
374
375
376
  		/*
  		 * If we are seeing only the odd spurious IRQ caused by
  		 * bus asynchronicity then don't eventually trigger an error,
fbfecd371   Uwe Kleine-König   tree-wide: fix ty...
377
  		 * otherwise the counter becomes a doomsday timer for otherwise
4f27c00bf   Alan Cox   Improve behaviour...
378
379
  		 * working systems
  		 */
188fd89d5   S.Çağlar Onur   genirq: spurious....
380
  		if (time_after(jiffies, desc->last_unhandled + HZ/10))
4f27c00bf   Alan Cox   Improve behaviour...
381
382
383
384
  			desc->irqs_unhandled = 1;
  		else
  			desc->irqs_unhandled++;
  		desc->last_unhandled = jiffies;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
385
  	}
92ea77275   Linus Torvalds   Fix crash with ir...
386
387
388
389
  	if (unlikely(try_misrouted_irq(irq, desc, action_ret))) {
  		int ok = misrouted_irq(irq);
  		if (action_ret == IRQ_NONE)
  			desc->irqs_unhandled -= ok;
200803dfe   Alan Cox   [PATCH] irqpoll
390
  	}
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
391
  	desc->irq_count++;
83d4e6e7f   Andreas Mohr   [PATCH] make noir...
392
  	if (likely(desc->irq_count < 100000))
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
393
394
395
  		return;
  
  	desc->irq_count = 0;
83d4e6e7f   Andreas Mohr   [PATCH] make noir...
396
  	if (unlikely(desc->irqs_unhandled > 99900)) {
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
397
398
399
400
401
402
403
404
405
  		/*
  		 * The interrupt is stuck
  		 */
  		__report_bad_irq(irq, desc, action_ret);
  		/*
  		 * Now kill the IRQ
  		 */
  		printk(KERN_EMERG "Disabling IRQ #%d
  ", irq);
7acdd53e5   Thomas Gleixner   genirq: Move IRQ_...
406
  		desc->istate |= IRQS_SPURIOUS_DISABLED;
1adb0850a   Thomas Gleixner   genirq: reenable ...
407
  		desc->depth++;
87923470c   Thomas Gleixner   genirq: Consolida...
408
  		irq_disable(desc);
f84dbb912   Eric W. Biederman   genirq: enable po...
409

d3c60047b   Thomas Gleixner   genirq: cleanup t...
410
411
  		mod_timer(&poll_spurious_irq_timer,
  			  jiffies + POLL_SPURIOUS_IRQ_INTERVAL);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
412
413
414
  	}
  	desc->irqs_unhandled = 0;
  }
2329abfa3   Rusty Russell   module_param: mak...
415
  bool noirqdebug __read_mostly;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
416

343cde51b   Vivek Goyal   [PATCH] x86-64: M...
417
  int noirqdebug_setup(char *str)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
418
419
420
421
  {
  	noirqdebug = 1;
  	printk(KERN_INFO "IRQ lockup detection disabled
  ");
06fcb0c6f   Ingo Molnar   [PATCH] genirq: c...
422

1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
423
424
425
426
  	return 1;
  }
  
  __setup("noirqdebug", noirqdebug_setup);
9e094c17e   Andi Kleen   genirq: turn irq ...
427
428
  module_param(noirqdebug, bool, 0644);
  MODULE_PARM_DESC(noirqdebug, "Disable irq lockup detection when true");
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
429

200803dfe   Alan Cox   [PATCH] irqpoll
430
431
432
433
434
435
436
  static int __init irqfixup_setup(char *str)
  {
  	irqfixup = 1;
  	printk(KERN_WARNING "Misrouted IRQ fixup support enabled.
  ");
  	printk(KERN_WARNING "This may impact system performance.
  ");
06fcb0c6f   Ingo Molnar   [PATCH] genirq: c...
437

200803dfe   Alan Cox   [PATCH] irqpoll
438
439
440
441
  	return 1;
  }
  
  __setup("irqfixup", irqfixup_setup);
9e094c17e   Andi Kleen   genirq: turn irq ...
442
  module_param(irqfixup, int, 0644);
200803dfe   Alan Cox   [PATCH] irqpoll
443
444
445
446
447
448
449
450
451
452
453
454
455
456
  
  static int __init irqpoll_setup(char *str)
  {
  	irqfixup = 2;
  	printk(KERN_WARNING "Misrouted IRQ fixup and polling support "
  				"enabled
  ");
  	printk(KERN_WARNING "This may significantly impact system "
  				"performance
  ");
  	return 1;
  }
  
  __setup("irqpoll", irqpoll_setup);