Blame view

kernel/irq/spurious.c 8.6 KB
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1
2
3
4
5
6
7
  /*
   * linux/kernel/irq/spurious.c
   *
   * Copyright (C) 1992, 1998-2004 Linus Torvalds, Ingo Molnar
   *
   * This file contains spurious interrupt handling.
   */
188fd89d5   S.Çağlar Onur   genirq: spurious....
8
  #include <linux/jiffies.h>
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
9
10
11
12
  #include <linux/irq.h>
  #include <linux/module.h>
  #include <linux/kallsyms.h>
  #include <linux/interrupt.h>
9e094c17e   Andi Kleen   genirq: turn irq ...
13
  #include <linux/moduleparam.h>
f84dbb912   Eric W. Biederman   genirq: enable po...
14
  #include <linux/timer.h>
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
15

bd1514122   Thomas Gleixner   genirq: Provide c...
16
  #include "internals.h"
83d4e6e7f   Andreas Mohr   [PATCH] make noir...
17
  static int irqfixup __read_mostly;
200803dfe   Alan Cox   [PATCH] irqpoll
18

f84dbb912   Eric W. Biederman   genirq: enable po...
19
20
21
  #define POLL_SPURIOUS_IRQ_INTERVAL (HZ/10)
  static void poll_spurious_irqs(unsigned long dummy);
  static DEFINE_TIMER(poll_spurious_irq_timer, poll_spurious_irqs, 0, 0);
d05c65fff   Thomas Gleixner   genirq: spurious:...
22
23
  static int irq_poll_cpu;
  static atomic_t irq_poll_active;
f84dbb912   Eric W. Biederman   genirq: enable po...
24

200803dfe   Alan Cox   [PATCH] irqpoll
25
  /*
fe200ae48   Thomas Gleixner   genirq: Mark poll...
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
   * We wait here for a poller to finish.
   *
   * If the poll runs on this CPU, then we yell loudly and return
   * false. That will leave the interrupt line disabled in the worst
   * case, but it should never happen.
   *
   * We wait until the poller is done and then recheck disabled and
   * action (about to be disabled). Only if it's still active, we return
   * true and let the handler run.
   */
  bool irq_wait_for_poll(struct irq_desc *desc)
  {
  	if (WARN_ONCE(irq_poll_cpu == smp_processor_id(),
  		      "irq poll in progress on cpu %d for irq %d
  ",
  		      smp_processor_id(), desc->irq_data.irq))
  		return false;
  
  #ifdef CONFIG_SMP
  	do {
  		raw_spin_unlock(&desc->lock);
32f4125eb   Thomas Gleixner   genirq: Move INPR...
47
  		while (irqd_irq_inprogress(&desc->irq_data))
fe200ae48   Thomas Gleixner   genirq: Mark poll...
48
49
  			cpu_relax();
  		raw_spin_lock(&desc->lock);
a6aeddd1c   Thomas Gleixner   genirq: Fix typo ...
50
  	} while (irqd_irq_inprogress(&desc->irq_data));
fe200ae48   Thomas Gleixner   genirq: Mark poll...
51
  	/* Might have been disabled in meantime */
32f4125eb   Thomas Gleixner   genirq: Move INPR...
52
  	return !irqd_irq_disabled(&desc->irq_data) && desc->action;
fe200ae48   Thomas Gleixner   genirq: Mark poll...
53
54
55
56
  #else
  	return false;
  #endif
  }
0877d6625   Thomas Gleixner   genirq: Use handl...
57

fe200ae48   Thomas Gleixner   genirq: Mark poll...
58
  /*
200803dfe   Alan Cox   [PATCH] irqpoll
59
60
   * Recovery handler for misrouted interrupts.
   */
c7259cd7a   Thomas Gleixner   genirq: Do not po...
61
  static int try_one_irq(int irq, struct irq_desc *desc, bool force)
200803dfe   Alan Cox   [PATCH] irqpoll
62
  {
0877d6625   Thomas Gleixner   genirq: Use handl...
63
  	irqreturn_t ret = IRQ_NONE;
f84dbb912   Eric W. Biederman   genirq: enable po...
64
  	struct irqaction *action;
200803dfe   Alan Cox   [PATCH] irqpoll
65

239007b84   Thomas Gleixner   genirq: Convert i...
66
  	raw_spin_lock(&desc->lock);
c7259cd7a   Thomas Gleixner   genirq: Do not po...
67
68
  
  	/* PER_CPU and nested thread interrupts are never polled */
1ccb4e612   Thomas Gleixner   genirq: Wrap the ...
69
  	if (irq_settings_is_per_cpu(desc) || irq_settings_is_nested_thread(desc))
c7259cd7a   Thomas Gleixner   genirq: Do not po...
70
71
72
73
74
75
  		goto out;
  
  	/*
  	 * Do not poll disabled interrupts unless the spurious
  	 * disabled poller asks explicitely.
  	 */
32f4125eb   Thomas Gleixner   genirq: Move INPR...
76
  	if (irqd_irq_disabled(&desc->irq_data) && !force)
c7259cd7a   Thomas Gleixner   genirq: Do not po...
77
78
79
80
81
82
83
84
  		goto out;
  
  	/*
  	 * All handlers must agree on IRQF_SHARED, so we test just the
  	 * first. Check for action->next as well.
  	 */
  	action = desc->action;
  	if (!action || !(action->flags & IRQF_SHARED) ||
52553ddff   Edward Donovan   genirq: fix regre...
85
86
87
  	    (action->flags & __IRQF_TIMER) ||
  	    (action->handler(irq, action->dev_id) == IRQ_HANDLED) ||
  	    !action->next)
c7259cd7a   Thomas Gleixner   genirq: Do not po...
88
  		goto out;
f84dbb912   Eric W. Biederman   genirq: enable po...
89
  	/* Already running on another processor */
32f4125eb   Thomas Gleixner   genirq: Move INPR...
90
  	if (irqd_irq_inprogress(&desc->irq_data)) {
f84dbb912   Eric W. Biederman   genirq: enable po...
91
92
93
94
  		/*
  		 * Already running: If it is shared get the other
  		 * CPU to go looking for our mystery interrupt too
  		 */
2a0d6fb33   Thomas Gleixner   genirq: Move IRQ_...
95
  		desc->istate |= IRQS_PENDING;
fa27271bc   Thomas Gleixner   genirq: Fixup pol...
96
  		goto out;
c7259cd7a   Thomas Gleixner   genirq: Do not po...
97
  	}
fa27271bc   Thomas Gleixner   genirq: Fixup pol...
98

0877d6625   Thomas Gleixner   genirq: Use handl...
99
  	/* Mark it poll in progress */
6954b75b4   Thomas Gleixner   genirq: Move IRQ_...
100
  	desc->istate |= IRQS_POLL_INPROGRESS;
fa27271bc   Thomas Gleixner   genirq: Fixup pol...
101
  	do {
0877d6625   Thomas Gleixner   genirq: Use handl...
102
103
  		if (handle_irq_event(desc) == IRQ_HANDLED)
  			ret = IRQ_HANDLED;
fa27271bc   Thomas Gleixner   genirq: Fixup pol...
104
  		action = desc->action;
2a0d6fb33   Thomas Gleixner   genirq: Move IRQ_...
105
  	} while ((desc->istate & IRQS_PENDING) && action);
6954b75b4   Thomas Gleixner   genirq: Move IRQ_...
106
  	desc->istate &= ~IRQS_POLL_INPROGRESS;
fa27271bc   Thomas Gleixner   genirq: Fixup pol...
107
108
  out:
  	raw_spin_unlock(&desc->lock);
0877d6625   Thomas Gleixner   genirq: Use handl...
109
  	return ret == IRQ_HANDLED;
f84dbb912   Eric W. Biederman   genirq: enable po...
110
111
112
113
  }
  
  static int misrouted_irq(int irq)
  {
e00585bb7   Yinghai Lu   irq: fix irqpoll ...
114
  	struct irq_desc *desc;
d3c60047b   Thomas Gleixner   genirq: cleanup t...
115
  	int i, ok = 0;
f84dbb912   Eric W. Biederman   genirq: enable po...
116

c75d720fc   Edward Donovan   genirq: Fix irqfi...
117
  	if (atomic_inc_return(&irq_poll_active) != 1)
d05c65fff   Thomas Gleixner   genirq: spurious:...
118
119
120
  		goto out;
  
  	irq_poll_cpu = smp_processor_id();
e00585bb7   Yinghai Lu   irq: fix irqpoll ...
121
122
123
  	for_each_irq_desc(i, desc) {
  		if (!i)
  			 continue;
f84dbb912   Eric W. Biederman   genirq: enable po...
124
125
126
  
  		if (i == irq)	/* Already tried */
  			continue;
c7259cd7a   Thomas Gleixner   genirq: Do not po...
127
  		if (try_one_irq(i, desc, false))
f84dbb912   Eric W. Biederman   genirq: enable po...
128
  			ok = 1;
200803dfe   Alan Cox   [PATCH] irqpoll
129
  	}
d05c65fff   Thomas Gleixner   genirq: spurious:...
130
131
  out:
  	atomic_dec(&irq_poll_active);
200803dfe   Alan Cox   [PATCH] irqpoll
132
133
134
  	/* So the caller can adjust the irq error counts */
  	return ok;
  }
663e69592   Thomas Gleixner   irq: Remove unuse...
135
  static void poll_spurious_irqs(unsigned long dummy)
f84dbb912   Eric W. Biederman   genirq: enable po...
136
  {
e00585bb7   Yinghai Lu   irq: fix irqpoll ...
137
  	struct irq_desc *desc;
d3c60047b   Thomas Gleixner   genirq: cleanup t...
138
  	int i;
e00585bb7   Yinghai Lu   irq: fix irqpoll ...
139

d05c65fff   Thomas Gleixner   genirq: spurious:...
140
141
142
  	if (atomic_inc_return(&irq_poll_active) != 1)
  		goto out;
  	irq_poll_cpu = smp_processor_id();
e00585bb7   Yinghai Lu   irq: fix irqpoll ...
143
  	for_each_irq_desc(i, desc) {
7acdd53e5   Thomas Gleixner   genirq: Move IRQ_...
144
  		unsigned int state;
f84dbb912   Eric W. Biederman   genirq: enable po...
145

e00585bb7   Yinghai Lu   irq: fix irqpoll ...
146
147
  		if (!i)
  			 continue;
f84dbb912   Eric W. Biederman   genirq: enable po...
148
  		/* Racy but it doesn't matter */
7acdd53e5   Thomas Gleixner   genirq: Move IRQ_...
149
  		state = desc->istate;
f84dbb912   Eric W. Biederman   genirq: enable po...
150
  		barrier();
7acdd53e5   Thomas Gleixner   genirq: Move IRQ_...
151
  		if (!(state & IRQS_SPURIOUS_DISABLED))
f84dbb912   Eric W. Biederman   genirq: enable po...
152
  			continue;
e7e7e0c08   Yong Zhang   genirq: try_one_i...
153
  		local_irq_disable();
c7259cd7a   Thomas Gleixner   genirq: Do not po...
154
  		try_one_irq(i, desc, true);
e7e7e0c08   Yong Zhang   genirq: try_one_i...
155
  		local_irq_enable();
f84dbb912   Eric W. Biederman   genirq: enable po...
156
  	}
d05c65fff   Thomas Gleixner   genirq: spurious:...
157
158
  out:
  	atomic_dec(&irq_poll_active);
d3c60047b   Thomas Gleixner   genirq: cleanup t...
159
160
  	mod_timer(&poll_spurious_irq_timer,
  		  jiffies + POLL_SPURIOUS_IRQ_INTERVAL);
f84dbb912   Eric W. Biederman   genirq: enable po...
161
  }
3a43e05f4   Sebastian Andrzej Siewior   irq: Handle spuri...
162
163
164
165
166
167
  static inline int bad_action_ret(irqreturn_t action_ret)
  {
  	if (likely(action_ret <= (IRQ_HANDLED | IRQ_WAKE_THREAD)))
  		return 0;
  	return 1;
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
168
169
170
171
172
173
174
  /*
   * If 99,900 of the previous 100,000 interrupts have not been handled
   * then assume that the IRQ is stuck in some manner. Drop a diagnostic
   * and try to turn the IRQ off.
   *
   * (The other 100-of-100,000 interrupts may have been a correctly
   *  functioning device sharing an IRQ with the failing one)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
175
   */
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
176
  static void
34ffdb723   Ingo Molnar   [PATCH] genirq: c...
177
178
  __report_bad_irq(unsigned int irq, struct irq_desc *desc,
  		 irqreturn_t action_ret)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
179
180
  {
  	struct irqaction *action;
1082687e8   Thomas Gleixner   genirq: Plug race...
181
  	unsigned long flags;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
182

3a43e05f4   Sebastian Andrzej Siewior   irq: Handle spuri...
183
  	if (bad_action_ret(action_ret)) {
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
184
185
186
187
  		printk(KERN_ERR "irq event %d: bogus return value %x
  ",
  				irq, action_ret);
  	} else {
200803dfe   Alan Cox   [PATCH] irqpoll
188
189
190
  		printk(KERN_ERR "irq %d: nobody cared (try booting with "
  				"the \"irqpoll\" option)
  ", irq);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
191
192
193
194
  	}
  	dump_stack();
  	printk(KERN_ERR "handlers:
  ");
06fcb0c6f   Ingo Molnar   [PATCH] genirq: c...
195

1082687e8   Thomas Gleixner   genirq: Plug race...
196
197
198
199
200
201
202
  	/*
  	 * We need to take desc->lock here. note_interrupt() is called
  	 * w/o desc->lock held, but IRQ_PROGRESS set. We might race
  	 * with something else removing an action. It's ok to take
  	 * desc->lock here. See synchronize_irq().
  	 */
  	raw_spin_lock_irqsave(&desc->lock, flags);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
203
204
  	action = desc->action;
  	while (action) {
ef26f20cd   Sebastian Andrzej Siewior   genirq: Print thr...
205
206
207
208
209
210
  		printk(KERN_ERR "[<%p>] %pf", action->handler, action->handler);
  		if (action->thread_fn)
  			printk(KERN_CONT " threaded [<%p>] %pf",
  					action->thread_fn, action->thread_fn);
  		printk(KERN_CONT "
  ");
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
211
212
  		action = action->next;
  	}
1082687e8   Thomas Gleixner   genirq: Plug race...
213
  	raw_spin_unlock_irqrestore(&desc->lock, flags);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
214
  }
06fcb0c6f   Ingo Molnar   [PATCH] genirq: c...
215
  static void
34ffdb723   Ingo Molnar   [PATCH] genirq: c...
216
  report_bad_irq(unsigned int irq, struct irq_desc *desc, irqreturn_t action_ret)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
217
218
219
220
221
222
223
224
  {
  	static int count = 100;
  
  	if (count > 0) {
  		count--;
  		__report_bad_irq(irq, desc, action_ret);
  	}
  }
d3c60047b   Thomas Gleixner   genirq: cleanup t...
225
226
227
  static inline int
  try_misrouted_irq(unsigned int irq, struct irq_desc *desc,
  		  irqreturn_t action_ret)
92ea77275   Linus Torvalds   Fix crash with ir...
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
  {
  	struct irqaction *action;
  
  	if (!irqfixup)
  		return 0;
  
  	/* We didn't actually handle the IRQ - see if it was misrouted? */
  	if (action_ret == IRQ_NONE)
  		return 1;
  
  	/*
  	 * But for 'irqfixup == 2' we also do it for handled interrupts if
  	 * they are marked as IRQF_IRQPOLL (or for irq zero, which is the
  	 * traditional PC timer interrupt.. Legacy)
  	 */
  	if (irqfixup < 2)
  		return 0;
  
  	if (!irq)
  		return 1;
  
  	/*
  	 * Since we don't get the descriptor lock, "action" can
  	 * change under us.  We don't really care, but we don't
  	 * want to follow a NULL pointer. So tell the compiler to
  	 * just load it once by using a barrier.
  	 */
  	action = desc->action;
  	barrier();
  	return action && (action->flags & IRQF_IRQPOLL);
  }
34ffdb723   Ingo Molnar   [PATCH] genirq: c...
259
  void note_interrupt(unsigned int irq, struct irq_desc *desc,
7d12e780e   David Howells   IRQ: Maintain reg...
260
  		    irqreturn_t action_ret)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
261
  {
6954b75b4   Thomas Gleixner   genirq: Move IRQ_...
262
  	if (desc->istate & IRQS_POLL_INPROGRESS)
fe200ae48   Thomas Gleixner   genirq: Mark poll...
263
  		return;
3a43e05f4   Sebastian Andrzej Siewior   irq: Handle spuri...
264
265
266
267
268
269
270
271
272
273
  	/* we get here again via the threaded handler */
  	if (action_ret == IRQ_WAKE_THREAD)
  		return;
  
  	if (bad_action_ret(action_ret)) {
  		report_bad_irq(irq, desc, action_ret);
  		return;
  	}
  
  	if (unlikely(action_ret == IRQ_NONE)) {
4f27c00bf   Alan Cox   Improve behaviour...
274
275
276
  		/*
  		 * If we are seeing only the odd spurious IRQ caused by
  		 * bus asynchronicity then don't eventually trigger an error,
fbfecd371   Uwe Kleine-König   tree-wide: fix ty...
277
  		 * otherwise the counter becomes a doomsday timer for otherwise
4f27c00bf   Alan Cox   Improve behaviour...
278
279
  		 * working systems
  		 */
188fd89d5   S.Çağlar Onur   genirq: spurious....
280
  		if (time_after(jiffies, desc->last_unhandled + HZ/10))
4f27c00bf   Alan Cox   Improve behaviour...
281
282
283
284
  			desc->irqs_unhandled = 1;
  		else
  			desc->irqs_unhandled++;
  		desc->last_unhandled = jiffies;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
285
  	}
92ea77275   Linus Torvalds   Fix crash with ir...
286
287
288
289
  	if (unlikely(try_misrouted_irq(irq, desc, action_ret))) {
  		int ok = misrouted_irq(irq);
  		if (action_ret == IRQ_NONE)
  			desc->irqs_unhandled -= ok;
200803dfe   Alan Cox   [PATCH] irqpoll
290
  	}
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
291
  	desc->irq_count++;
83d4e6e7f   Andreas Mohr   [PATCH] make noir...
292
  	if (likely(desc->irq_count < 100000))
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
293
294
295
  		return;
  
  	desc->irq_count = 0;
83d4e6e7f   Andreas Mohr   [PATCH] make noir...
296
  	if (unlikely(desc->irqs_unhandled > 99900)) {
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
297
298
299
300
301
302
303
304
305
  		/*
  		 * The interrupt is stuck
  		 */
  		__report_bad_irq(irq, desc, action_ret);
  		/*
  		 * Now kill the IRQ
  		 */
  		printk(KERN_EMERG "Disabling IRQ #%d
  ", irq);
7acdd53e5   Thomas Gleixner   genirq: Move IRQ_...
306
  		desc->istate |= IRQS_SPURIOUS_DISABLED;
1adb0850a   Thomas Gleixner   genirq: reenable ...
307
  		desc->depth++;
87923470c   Thomas Gleixner   genirq: Consolida...
308
  		irq_disable(desc);
f84dbb912   Eric W. Biederman   genirq: enable po...
309

d3c60047b   Thomas Gleixner   genirq: cleanup t...
310
311
  		mod_timer(&poll_spurious_irq_timer,
  			  jiffies + POLL_SPURIOUS_IRQ_INTERVAL);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
312
313
314
  	}
  	desc->irqs_unhandled = 0;
  }
2329abfa3   Rusty Russell   module_param: mak...
315
  bool noirqdebug __read_mostly;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
316

343cde51b   Vivek Goyal   [PATCH] x86-64: M...
317
  int noirqdebug_setup(char *str)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
318
319
320
321
  {
  	noirqdebug = 1;
  	printk(KERN_INFO "IRQ lockup detection disabled
  ");
06fcb0c6f   Ingo Molnar   [PATCH] genirq: c...
322

1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
323
324
325
326
  	return 1;
  }
  
  __setup("noirqdebug", noirqdebug_setup);
9e094c17e   Andi Kleen   genirq: turn irq ...
327
328
  module_param(noirqdebug, bool, 0644);
  MODULE_PARM_DESC(noirqdebug, "Disable irq lockup detection when true");
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
329

200803dfe   Alan Cox   [PATCH] irqpoll
330
331
332
333
334
335
336
  static int __init irqfixup_setup(char *str)
  {
  	irqfixup = 1;
  	printk(KERN_WARNING "Misrouted IRQ fixup support enabled.
  ");
  	printk(KERN_WARNING "This may impact system performance.
  ");
06fcb0c6f   Ingo Molnar   [PATCH] genirq: c...
337

200803dfe   Alan Cox   [PATCH] irqpoll
338
339
340
341
  	return 1;
  }
  
  __setup("irqfixup", irqfixup_setup);
9e094c17e   Andi Kleen   genirq: turn irq ...
342
  module_param(irqfixup, int, 0644);
200803dfe   Alan Cox   [PATCH] irqpoll
343
344
345
346
347
348
349
350
351
352
353
354
355
356
  
  static int __init irqpoll_setup(char *str)
  {
  	irqfixup = 2;
  	printk(KERN_WARNING "Misrouted IRQ fixup and polling support "
  				"enabled
  ");
  	printk(KERN_WARNING "This may significantly impact system "
  				"performance
  ");
  	return 1;
  }
  
  __setup("irqpoll", irqpoll_setup);