Commit b39898cd4077f4b6ec706e717c938751c34e1dc4
1 parent
9b66bfb280
Exists in
master
and in
16 other branches
genirq: Prevent spurious detection for unconditionally polled interrupts
On a 68k platform a couple of interrupts are demultiplexed and "polled" from a top level interrupt. Unfortunately there is no way to determine which of the sub interrupts raised the top level interrupt, so all of the demultiplexed interrupt handlers need to be invoked. Given a high enough frequency this can trigger the spurious interrupt detection mechanism, if one of the demultiplex interrupts returns IRQ_NONE continuously. But this is a false positive as the polling causes this behaviour and not buggy hardware/software. Introduce IRQ_POLLED which can be set at interrupt chip setup time via irq_set_status_flags(). The flag excludes the interrupt from the spurious detector and from all core polling activities. Reported-and-tested-by: Michael Schmitz <schmitzmic@gmail.com> Cc: Geert Uytterhoeven <geert@linux-m68k.org> Cc: linux-m68k@vger.kernel.org Link: http://lkml.kernel.org/r/alpine.DEB.2.02.1311061149250.23353@ionos.tec.linutronix.de
Showing 3 changed files with 22 additions and 4 deletions Side-by-side Diff
include/linux/irq.h
... | ... | @@ -70,6 +70,9 @@ |
70 | 70 | * IRQ_MOVE_PCNTXT - Interrupt can be migrated from process context |
71 | 71 | * IRQ_NESTED_TRHEAD - Interrupt nests into another thread |
72 | 72 | * IRQ_PER_CPU_DEVID - Dev_id is a per-cpu variable |
73 | + * IRQ_IS_POLLED - Always polled by another interrupt. Exclude | |
74 | + * it from the spurious interrupt detection | |
75 | + * mechanism and from core side polling. | |
73 | 76 | */ |
74 | 77 | enum { |
75 | 78 | IRQ_TYPE_NONE = 0x00000000, |
76 | 79 | |
... | ... | @@ -94,12 +97,14 @@ |
94 | 97 | IRQ_NESTED_THREAD = (1 << 15), |
95 | 98 | IRQ_NOTHREAD = (1 << 16), |
96 | 99 | IRQ_PER_CPU_DEVID = (1 << 17), |
100 | + IRQ_IS_POLLED = (1 << 18), | |
97 | 101 | }; |
98 | 102 | |
99 | 103 | #define IRQF_MODIFY_MASK \ |
100 | 104 | (IRQ_TYPE_SENSE_MASK | IRQ_NOPROBE | IRQ_NOREQUEST | \ |
101 | 105 | IRQ_NOAUTOEN | IRQ_MOVE_PCNTXT | IRQ_LEVEL | IRQ_NO_BALANCING | \ |
102 | - IRQ_PER_CPU | IRQ_NESTED_THREAD | IRQ_NOTHREAD | IRQ_PER_CPU_DEVID) | |
106 | + IRQ_PER_CPU | IRQ_NESTED_THREAD | IRQ_NOTHREAD | IRQ_PER_CPU_DEVID | \ | |
107 | + IRQ_IS_POLLED) | |
103 | 108 | |
104 | 109 | #define IRQ_NO_BALANCING_MASK (IRQ_PER_CPU | IRQ_NO_BALANCING) |
105 | 110 |
kernel/irq/settings.h
... | ... | @@ -14,6 +14,7 @@ |
14 | 14 | _IRQ_NO_BALANCING = IRQ_NO_BALANCING, |
15 | 15 | _IRQ_NESTED_THREAD = IRQ_NESTED_THREAD, |
16 | 16 | _IRQ_PER_CPU_DEVID = IRQ_PER_CPU_DEVID, |
17 | + _IRQ_IS_POLLED = IRQ_IS_POLLED, | |
17 | 18 | _IRQF_MODIFY_MASK = IRQF_MODIFY_MASK, |
18 | 19 | }; |
19 | 20 | |
... | ... | @@ -26,6 +27,7 @@ |
26 | 27 | #define IRQ_NOAUTOEN GOT_YOU_MORON |
27 | 28 | #define IRQ_NESTED_THREAD GOT_YOU_MORON |
28 | 29 | #define IRQ_PER_CPU_DEVID GOT_YOU_MORON |
30 | +#define IRQ_IS_POLLED GOT_YOU_MORON | |
29 | 31 | #undef IRQF_MODIFY_MASK |
30 | 32 | #define IRQF_MODIFY_MASK GOT_YOU_MORON |
31 | 33 | |
... | ... | @@ -146,5 +148,10 @@ |
146 | 148 | static inline bool irq_settings_is_nested_thread(struct irq_desc *desc) |
147 | 149 | { |
148 | 150 | return desc->status_use_accessors & _IRQ_NESTED_THREAD; |
151 | +} | |
152 | + | |
153 | +static inline bool irq_settings_is_polled(struct irq_desc *desc) | |
154 | +{ | |
155 | + return desc->status_use_accessors & _IRQ_IS_POLLED; | |
149 | 156 | } |
kernel/irq/spurious.c
... | ... | @@ -67,8 +67,13 @@ |
67 | 67 | |
68 | 68 | raw_spin_lock(&desc->lock); |
69 | 69 | |
70 | - /* PER_CPU and nested thread interrupts are never polled */ | |
71 | - if (irq_settings_is_per_cpu(desc) || irq_settings_is_nested_thread(desc)) | |
70 | + /* | |
71 | + * PER_CPU, nested thread interrupts and interrupts explicitely | |
72 | + * marked polled are excluded from polling. | |
73 | + */ | |
74 | + if (irq_settings_is_per_cpu(desc) || | |
75 | + irq_settings_is_nested_thread(desc) || | |
76 | + irq_settings_is_polled(desc)) | |
72 | 77 | goto out; |
73 | 78 | |
74 | 79 | /* |
... | ... | @@ -268,7 +273,8 @@ |
268 | 273 | void note_interrupt(unsigned int irq, struct irq_desc *desc, |
269 | 274 | irqreturn_t action_ret) |
270 | 275 | { |
271 | - if (desc->istate & IRQS_POLL_INPROGRESS) | |
276 | + if (desc->istate & IRQS_POLL_INPROGRESS || | |
277 | + irq_settings_is_polled(desc)) | |
272 | 278 | return; |
273 | 279 | |
274 | 280 | /* we get here again via the threaded handler */ |