Commit 4007162647b3b2e2e438904471b620aba013c4af
Exists in
master
and in
16 other branches
Merge branch 'irq-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull irq cleanups from Ingo Molnar: "This is a multi-arch cleanup series from Thomas Gleixner, which we kept to near the end of the merge window, to not interfere with architecture updates. This series (motivated by the -rt kernel) unifies more aspects of IRQ handling and generalizes PREEMPT_ACTIVE" * 'irq-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: preempt: Make PREEMPT_ACTIVE generic sparc: Use preempt_schedule_irq ia64: Use preempt_schedule_irq m32r: Use preempt_schedule_irq hardirq: Make hardirq bits generic m68k: Simplify low level interrupt handling code genirq: Prevent spurious detection for unconditionally polled interrupts
Showing 50 changed files Side-by-side Diff
- arch/alpha/include/asm/thread_info.h
- arch/arc/include/asm/thread_info.h
- arch/arm/include/asm/thread_info.h
- arch/arm64/include/asm/thread_info.h
- arch/avr32/include/asm/thread_info.h
- arch/blackfin/include/asm/hardirq.h
- arch/blackfin/include/asm/thread_info.h
- arch/c6x/include/asm/thread_info.h
- arch/cris/include/asm/hardirq.h
- arch/cris/include/asm/thread_info.h
- arch/frv/include/asm/thread_info.h
- arch/hexagon/include/asm/thread_info.h
- arch/ia64/include/asm/thread_info.h
- arch/ia64/kernel/entry.S
- arch/m32r/include/asm/hardirq.h
- arch/m32r/include/asm/thread_info.h
- arch/m32r/kernel/entry.S
- arch/m68k/include/asm/hardirq.h
- arch/m68k/include/asm/thread_info.h
- arch/m68k/kernel/entry.S
- arch/m68k/kernel/ints.c
- arch/m68k/platform/68000/entry.S
- arch/m68k/platform/68360/entry.S
- arch/metag/include/asm/thread_info.h
- arch/microblaze/include/asm/thread_info.h
- arch/mips/include/asm/thread_info.h
- arch/mn10300/include/asm/thread_info.h
- arch/parisc/include/asm/thread_info.h
- arch/powerpc/include/asm/thread_info.h
- arch/s390/include/asm/hardirq.h
- arch/s390/include/asm/thread_info.h
- arch/score/include/asm/thread_info.h
- arch/sh/include/asm/thread_info.h
- arch/sh/kernel/entry-common.S
- arch/sparc/include/asm/hardirq_32.h
- arch/sparc/include/asm/hardirq_64.h
- arch/sparc/include/asm/thread_info_32.h
- arch/sparc/include/asm/thread_info_64.h
- arch/sparc/kernel/rtrap_64.S
- arch/tile/include/asm/hardirq.h
- arch/tile/include/asm/thread_info.h
- arch/um/include/asm/thread_info.h
- arch/unicore32/include/asm/thread_info.h
- arch/x86/include/asm/thread_info.h
- arch/xtensa/include/asm/thread_info.h
- include/linux/irq.h
- include/linux/preempt_mask.h
- include/linux/sched.h
- kernel/irq/settings.h
- kernel/irq/spurious.c
arch/alpha/include/asm/thread_info.h
arch/arc/include/asm/thread_info.h
arch/arm/include/asm/thread_info.h
... | ... | @@ -141,12 +141,6 @@ |
141 | 141 | #endif |
142 | 142 | |
143 | 143 | /* |
144 | - * We use bit 30 of the preempt_count to indicate that kernel | |
145 | - * preemption is occurring. See <asm/hardirq.h>. | |
146 | - */ | |
147 | -#define PREEMPT_ACTIVE 0x40000000 | |
148 | - | |
149 | -/* | |
150 | 144 | * thread information flags: |
151 | 145 | * TIF_SYSCALL_TRACE - syscall trace active |
152 | 146 | * TIF_SYSCAL_AUDIT - syscall auditing active |
arch/arm64/include/asm/thread_info.h
... | ... | @@ -89,12 +89,6 @@ |
89 | 89 | #endif |
90 | 90 | |
91 | 91 | /* |
92 | - * We use bit 30 of the preempt_count to indicate that kernel | |
93 | - * preemption is occurring. See <asm/hardirq.h>. | |
94 | - */ | |
95 | -#define PREEMPT_ACTIVE 0x40000000 | |
96 | - | |
97 | -/* | |
98 | 92 | * thread information flags: |
99 | 93 | * TIF_SYSCALL_TRACE - syscall trace active |
100 | 94 | * TIF_SIGPENDING - signal pending |
arch/avr32/include/asm/thread_info.h
arch/blackfin/include/asm/hardirq.h
arch/blackfin/include/asm/thread_info.h
arch/c6x/include/asm/thread_info.h
arch/cris/include/asm/hardirq.h
... | ... | @@ -2,18 +2,6 @@ |
2 | 2 | #define __ASM_HARDIRQ_H |
3 | 3 | |
4 | 4 | #include <asm/irq.h> |
5 | - | |
6 | -#define HARDIRQ_BITS 8 | |
7 | - | |
8 | -/* | |
9 | - * The hardirq mask has to be large enough to have | |
10 | - * space for potentially all IRQ sources in the system | |
11 | - * nesting on a single CPU: | |
12 | - */ | |
13 | -#if (1 << HARDIRQ_BITS) < NR_IRQS | |
14 | -# error HARDIRQ_BITS is too low! | |
15 | -#endif | |
16 | - | |
17 | 5 | #include <asm-generic/hardirq.h> |
18 | 6 | |
19 | 7 | #endif /* __ASM_HARDIRQ_H */ |
arch/cris/include/asm/thread_info.h
arch/frv/include/asm/thread_info.h
arch/hexagon/include/asm/thread_info.h
arch/ia64/include/asm/thread_info.h
arch/ia64/kernel/entry.S
... | ... | @@ -1169,21 +1169,8 @@ |
1169 | 1169 | .work_pending: |
1170 | 1170 | tbit.z p6,p0=r31,TIF_NEED_RESCHED // is resched not needed? |
1171 | 1171 | (p6) br.cond.sptk.few .notify |
1172 | -#ifdef CONFIG_PREEMPT | |
1173 | -(pKStk) dep r21=-1,r0,PREEMPT_ACTIVE_BIT,1 | |
1174 | - ;; | |
1175 | -(pKStk) st4 [r20]=r21 | |
1176 | -#endif | |
1177 | - SSM_PSR_I(p0, p6, r2) // enable interrupts | |
1178 | - br.call.spnt.many rp=schedule | |
1172 | + br.call.spnt.many rp=preempt_schedule_irq | |
1179 | 1173 | .ret9: cmp.eq p6,p0=r0,r0 // p6 <- 1 (re-check) |
1180 | - RSM_PSR_I(p0, r2, r20) // disable interrupts | |
1181 | - ;; | |
1182 | -#ifdef CONFIG_PREEMPT | |
1183 | -(pKStk) adds r20=TI_PRE_COUNT+IA64_TASK_SIZE,r13 | |
1184 | - ;; | |
1185 | -(pKStk) st4 [r20]=r0 // preempt_count() <- 0 | |
1186 | -#endif | |
1187 | 1174 | (pLvSys)br.cond.sptk.few __paravirt_pending_syscall_end |
1188 | 1175 | br.cond.sptk.many .work_processed_kernel |
1189 | 1176 |
arch/m32r/include/asm/hardirq.h
... | ... | @@ -3,22 +3,6 @@ |
3 | 3 | #define __ASM_HARDIRQ_H |
4 | 4 | |
5 | 5 | #include <asm/irq.h> |
6 | - | |
7 | -#if NR_IRQS > 256 | |
8 | -#define HARDIRQ_BITS 9 | |
9 | -#else | |
10 | -#define HARDIRQ_BITS 8 | |
11 | -#endif | |
12 | - | |
13 | -/* | |
14 | - * The hardirq mask has to be large enough to have | |
15 | - * space for potentially all IRQ sources in the system | |
16 | - * nesting on a single CPU: | |
17 | - */ | |
18 | -#if (1 << HARDIRQ_BITS) < NR_IRQS | |
19 | -# error HARDIRQ_BITS is too low! | |
20 | -#endif | |
21 | - | |
22 | 6 | #include <asm-generic/hardirq.h> |
23 | 7 | |
24 | 8 | #endif /* __ASM_HARDIRQ_H */ |
arch/m32r/include/asm/thread_info.h
arch/m32r/kernel/entry.S
... | ... | @@ -182,13 +182,7 @@ |
182 | 182 | ld r4, PSW(sp) ; interrupts off (exception path) ? |
183 | 183 | and3 r4, r4, #0x4000 |
184 | 184 | beqz r4, restore_all |
185 | - LDIMM (r4, PREEMPT_ACTIVE) | |
186 | - st r4, @(TI_PRE_COUNT, r8) | |
187 | - ENABLE_INTERRUPTS(r4) | |
188 | - bl schedule | |
189 | - ldi r4, #0 | |
190 | - st r4, @(TI_PRE_COUNT, r8) | |
191 | - DISABLE_INTERRUPTS(r4) | |
185 | + bl preempt_schedule_irq | |
192 | 186 | bra need_resched |
193 | 187 | #endif |
194 | 188 |
arch/m68k/include/asm/hardirq.h
... | ... | @@ -5,17 +5,6 @@ |
5 | 5 | #include <linux/cache.h> |
6 | 6 | #include <asm/irq.h> |
7 | 7 | |
8 | -#define HARDIRQ_BITS 8 | |
9 | - | |
10 | -/* | |
11 | - * The hardirq mask has to be large enough to have | |
12 | - * space for potentially all IRQ sources in the system | |
13 | - * nesting on a single CPU: | |
14 | - */ | |
15 | -#if (1 << HARDIRQ_BITS) < NR_IRQS | |
16 | -# error HARDIRQ_BITS is too low! | |
17 | -#endif | |
18 | - | |
19 | 8 | #ifdef CONFIG_MMU |
20 | 9 | |
21 | 10 | static inline void ack_bad_irq(unsigned int irq) |
arch/m68k/include/asm/thread_info.h
arch/m68k/kernel/entry.S
... | ... | @@ -45,7 +45,7 @@ |
45 | 45 | .globl system_call, buserr, trap, resume |
46 | 46 | .globl sys_call_table |
47 | 47 | .globl __sys_fork, __sys_clone, __sys_vfork |
48 | -.globl ret_from_interrupt, bad_interrupt | |
48 | +.globl bad_interrupt | |
49 | 49 | .globl auto_irqhandler_fixup |
50 | 50 | .globl user_irqvec_fixup |
51 | 51 | |
... | ... | @@ -275,8 +275,6 @@ |
275 | 275 | ENTRY(auto_inthandler) |
276 | 276 | SAVE_ALL_INT |
277 | 277 | GET_CURRENT(%d0) |
278 | - movel %d0,%a1 | |
279 | - addqb #1,%a1@(TINFO_PREEMPT+1) | |
280 | 278 | | put exception # in d0 |
281 | 279 | bfextu %sp@(PT_OFF_FORMATVEC){#4,#10},%d0 |
282 | 280 | subw #VEC_SPUR,%d0 |
283 | 281 | |
284 | 282 | |
... | ... | @@ -286,32 +284,13 @@ |
286 | 284 | auto_irqhandler_fixup = . + 2 |
287 | 285 | jsr do_IRQ | process the IRQ |
288 | 286 | addql #8,%sp | pop parameters off stack |
287 | + jra ret_from_exception | |
289 | 288 | |
290 | -ret_from_interrupt: | |
291 | - movel %curptr@(TASK_STACK),%a1 | |
292 | - subqb #1,%a1@(TINFO_PREEMPT+1) | |
293 | - jeq ret_from_last_interrupt | |
294 | -2: RESTORE_ALL | |
295 | - | |
296 | - ALIGN | |
297 | -ret_from_last_interrupt: | |
298 | - moveq #(~ALLOWINT>>8)&0xff,%d0 | |
299 | - andb %sp@(PT_OFF_SR),%d0 | |
300 | - jne 2b | |
301 | - | |
302 | - /* check if we need to do software interrupts */ | |
303 | - tstl irq_stat+CPUSTAT_SOFTIRQ_PENDING | |
304 | - jeq .Lret_from_exception | |
305 | - pea ret_from_exception | |
306 | - jra do_softirq | |
307 | - | |
308 | 289 | /* Handler for user defined interrupt vectors */ |
309 | 290 | |
310 | 291 | ENTRY(user_inthandler) |
311 | 292 | SAVE_ALL_INT |
312 | 293 | GET_CURRENT(%d0) |
313 | - movel %d0,%a1 | |
314 | - addqb #1,%a1@(TINFO_PREEMPT+1) | |
315 | 294 | | put exception # in d0 |
316 | 295 | bfextu %sp@(PT_OFF_FORMATVEC){#4,#10},%d0 |
317 | 296 | user_irqvec_fixup = . + 2 |
318 | 297 | |
319 | 298 | |
320 | 299 | |
... | ... | @@ -321,29 +300,18 @@ |
321 | 300 | movel %d0,%sp@- | put vector # on stack |
322 | 301 | jsr do_IRQ | process the IRQ |
323 | 302 | addql #8,%sp | pop parameters off stack |
303 | + jra ret_from_exception | |
324 | 304 | |
325 | - movel %curptr@(TASK_STACK),%a1 | |
326 | - subqb #1,%a1@(TINFO_PREEMPT+1) | |
327 | - jeq ret_from_last_interrupt | |
328 | - RESTORE_ALL | |
329 | - | |
330 | 305 | /* Handler for uninitialized and spurious interrupts */ |
331 | 306 | |
332 | 307 | ENTRY(bad_inthandler) |
333 | 308 | SAVE_ALL_INT |
334 | 309 | GET_CURRENT(%d0) |
335 | - movel %d0,%a1 | |
336 | - addqb #1,%a1@(TINFO_PREEMPT+1) | |
337 | 310 | |
338 | 311 | movel %sp,%sp@- |
339 | 312 | jsr handle_badint |
340 | 313 | addql #4,%sp |
341 | - | |
342 | - movel %curptr@(TASK_STACK),%a1 | |
343 | - subqb #1,%a1@(TINFO_PREEMPT+1) | |
344 | - jeq ret_from_last_interrupt | |
345 | - RESTORE_ALL | |
346 | - | |
314 | + jra ret_from_exception | |
347 | 315 | |
348 | 316 | resume: |
349 | 317 | /* |
arch/m68k/kernel/ints.c
... | ... | @@ -58,12 +58,6 @@ |
58 | 58 | { |
59 | 59 | int i; |
60 | 60 | |
61 | - /* assembly irq entry code relies on this... */ | |
62 | - if (HARDIRQ_MASK != 0x00ff0000) { | |
63 | - extern void hardirq_mask_is_broken(void); | |
64 | - hardirq_mask_is_broken(); | |
65 | - } | |
66 | - | |
67 | 61 | for (i = IRQ_AUTO_1; i <= IRQ_AUTO_7; i++) |
68 | 62 | irq_set_chip_and_handler(i, &auto_irq_chip, handle_simple_irq); |
69 | 63 |
arch/m68k/platform/68000/entry.S
... | ... | @@ -27,7 +27,6 @@ |
27 | 27 | .globl ret_from_exception |
28 | 28 | .globl ret_from_signal |
29 | 29 | .globl sys_call_table |
30 | -.globl ret_from_interrupt | |
31 | 30 | .globl bad_interrupt |
32 | 31 | .globl inthandler1 |
33 | 32 | .globl inthandler2 |
... | ... | @@ -137,7 +136,7 @@ |
137 | 136 | movel #65,%sp@- /* put vector # on stack*/ |
138 | 137 | jbsr process_int /* process the IRQ*/ |
139 | 138 | 3: addql #8,%sp /* pop parameters off stack*/ |
140 | - bra ret_from_interrupt | |
139 | + bra ret_from_exception | |
141 | 140 | |
142 | 141 | inthandler2: |
143 | 142 | SAVE_ALL_INT |
... | ... | @@ -148,7 +147,7 @@ |
148 | 147 | movel #66,%sp@- /* put vector # on stack*/ |
149 | 148 | jbsr process_int /* process the IRQ*/ |
150 | 149 | 3: addql #8,%sp /* pop parameters off stack*/ |
151 | - bra ret_from_interrupt | |
150 | + bra ret_from_exception | |
152 | 151 | |
153 | 152 | inthandler3: |
154 | 153 | SAVE_ALL_INT |
... | ... | @@ -159,7 +158,7 @@ |
159 | 158 | movel #67,%sp@- /* put vector # on stack*/ |
160 | 159 | jbsr process_int /* process the IRQ*/ |
161 | 160 | 3: addql #8,%sp /* pop parameters off stack*/ |
162 | - bra ret_from_interrupt | |
161 | + bra ret_from_exception | |
163 | 162 | |
164 | 163 | inthandler4: |
165 | 164 | SAVE_ALL_INT |
... | ... | @@ -170,7 +169,7 @@ |
170 | 169 | movel #68,%sp@- /* put vector # on stack*/ |
171 | 170 | jbsr process_int /* process the IRQ*/ |
172 | 171 | 3: addql #8,%sp /* pop parameters off stack*/ |
173 | - bra ret_from_interrupt | |
172 | + bra ret_from_exception | |
174 | 173 | |
175 | 174 | inthandler5: |
176 | 175 | SAVE_ALL_INT |
... | ... | @@ -181,7 +180,7 @@ |
181 | 180 | movel #69,%sp@- /* put vector # on stack*/ |
182 | 181 | jbsr process_int /* process the IRQ*/ |
183 | 182 | 3: addql #8,%sp /* pop parameters off stack*/ |
184 | - bra ret_from_interrupt | |
183 | + bra ret_from_exception | |
185 | 184 | |
186 | 185 | inthandler6: |
187 | 186 | SAVE_ALL_INT |
... | ... | @@ -192,7 +191,7 @@ |
192 | 191 | movel #70,%sp@- /* put vector # on stack*/ |
193 | 192 | jbsr process_int /* process the IRQ*/ |
194 | 193 | 3: addql #8,%sp /* pop parameters off stack*/ |
195 | - bra ret_from_interrupt | |
194 | + bra ret_from_exception | |
196 | 195 | |
197 | 196 | inthandler7: |
198 | 197 | SAVE_ALL_INT |
... | ... | @@ -203,7 +202,7 @@ |
203 | 202 | movel #71,%sp@- /* put vector # on stack*/ |
204 | 203 | jbsr process_int /* process the IRQ*/ |
205 | 204 | 3: addql #8,%sp /* pop parameters off stack*/ |
206 | - bra ret_from_interrupt | |
205 | + bra ret_from_exception | |
207 | 206 | |
208 | 207 | inthandler: |
209 | 208 | SAVE_ALL_INT |
... | ... | @@ -214,23 +213,7 @@ |
214 | 213 | movel %d0,%sp@- /* put vector # on stack*/ |
215 | 214 | jbsr process_int /* process the IRQ*/ |
216 | 215 | 3: addql #8,%sp /* pop parameters off stack*/ |
217 | - bra ret_from_interrupt | |
218 | - | |
219 | -ret_from_interrupt: | |
220 | - jeq 1f | |
221 | -2: | |
222 | - RESTORE_ALL | |
223 | -1: | |
224 | - moveb %sp@(PT_OFF_SR), %d0 | |
225 | - and #7, %d0 | |
226 | - jhi 2b | |
227 | - | |
228 | - /* check if we need to do software interrupts */ | |
229 | - jeq ret_from_exception | |
230 | - | |
231 | - pea ret_from_exception | |
232 | - jra do_softirq | |
233 | - | |
216 | + bra ret_from_exception | |
234 | 217 | |
235 | 218 | /* |
236 | 219 | * Handler for uninitialized and spurious interrupts. |
arch/m68k/platform/68360/entry.S
... | ... | @@ -29,7 +29,6 @@ |
29 | 29 | .globl ret_from_exception |
30 | 30 | .globl ret_from_signal |
31 | 31 | .globl sys_call_table |
32 | -.globl ret_from_interrupt | |
33 | 32 | .globl bad_interrupt |
34 | 33 | .globl inthandler |
35 | 34 | |
... | ... | @@ -132,26 +131,9 @@ |
132 | 131 | |
133 | 132 | movel %sp,%sp@- |
134 | 133 | movel %d0,%sp@- /* put vector # on stack*/ |
135 | - jbsr do_IRQ /* process the IRQ*/ | |
136 | -3: addql #8,%sp /* pop parameters off stack*/ | |
137 | - bra ret_from_interrupt | |
138 | - | |
139 | -ret_from_interrupt: | |
140 | - jeq 1f | |
141 | -2: | |
142 | - RESTORE_ALL | |
143 | -1: | |
144 | - moveb %sp@(PT_OFF_SR), %d0 | |
145 | - and #7, %d0 | |
146 | - jhi 2b | |
147 | - /* check if we need to do software interrupts */ | |
148 | - | |
149 | - movel irq_stat+CPUSTAT_SOFTIRQ_PENDING,%d0 | |
150 | - jeq ret_from_exception | |
151 | - | |
152 | - pea ret_from_exception | |
153 | - jra do_softirq | |
154 | - | |
134 | + jbsr do_IRQ /* process the IRQ */ | |
135 | + addql #8,%sp /* pop parameters off stack*/ | |
136 | + jra ret_from_exception | |
155 | 137 | |
156 | 138 | /* |
157 | 139 | * Handler for uninitialized and spurious interrupts. |
arch/metag/include/asm/thread_info.h
arch/microblaze/include/asm/thread_info.h
arch/mips/include/asm/thread_info.h
arch/mn10300/include/asm/thread_info.h
arch/parisc/include/asm/thread_info.h
arch/powerpc/include/asm/thread_info.h
arch/s390/include/asm/hardirq.h
arch/s390/include/asm/thread_info.h
arch/score/include/asm/thread_info.h
arch/sh/include/asm/thread_info.h
arch/sh/kernel/entry-common.S
... | ... | @@ -108,7 +108,7 @@ |
108 | 108 | and #(0xf0>>1), r0 ! interrupts off (exception path)? |
109 | 109 | cmp/eq #(0xf0>>1), r0 |
110 | 110 | bt noresched |
111 | - mov.l 3f, r0 | |
111 | + mov.l 1f, r0 | |
112 | 112 | jsr @r0 ! call preempt_schedule_irq |
113 | 113 | nop |
114 | 114 | bra need_resched |
... | ... | @@ -119,9 +119,7 @@ |
119 | 119 | nop |
120 | 120 | |
121 | 121 | .align 2 |
122 | -1: .long PREEMPT_ACTIVE | |
123 | -2: .long schedule | |
124 | -3: .long preempt_schedule_irq | |
122 | +1: .long preempt_schedule_irq | |
125 | 123 | #endif |
126 | 124 | |
127 | 125 | ENTRY(resume_userspace) |
arch/sparc/include/asm/hardirq_32.h
arch/sparc/include/asm/hardirq_64.h
arch/sparc/include/asm/thread_info_32.h
arch/sparc/include/asm/thread_info_64.h
arch/sparc/kernel/rtrap_64.S
... | ... | @@ -312,12 +312,10 @@ |
312 | 312 | nop |
313 | 313 | cmp %l4, 0 |
314 | 314 | bne,pn %xcc, kern_fpucheck |
315 | - sethi %hi(PREEMPT_ACTIVE), %l6 | |
316 | - stw %l6, [%g6 + TI_PRE_COUNT] | |
317 | - call schedule | |
318 | 315 | nop |
316 | + call preempt_schedule_irq | |
317 | + nop | |
319 | 318 | ba,pt %xcc, rtrap |
320 | - stw %g0, [%g6 + TI_PRE_COUNT] | |
321 | 319 | #endif |
322 | 320 | kern_fpucheck: ldub [%g6 + TI_FPDEPTH], %l5 |
323 | 321 | brz,pt %l5, rt_continue |
arch/tile/include/asm/hardirq.h
arch/tile/include/asm/thread_info.h
arch/um/include/asm/thread_info.h
arch/unicore32/include/asm/thread_info.h
... | ... | @@ -118,12 +118,6 @@ |
118 | 118 | #endif |
119 | 119 | |
120 | 120 | /* |
121 | - * We use bit 30 of the preempt_count to indicate that kernel | |
122 | - * preemption is occurring. See <asm/hardirq.h>. | |
123 | - */ | |
124 | -#define PREEMPT_ACTIVE 0x40000000 | |
125 | - | |
126 | -/* | |
127 | 121 | * thread information flags: |
128 | 122 | * TIF_SYSCALL_TRACE - syscall trace active |
129 | 123 | * TIF_SIGPENDING - signal pending |
arch/x86/include/asm/thread_info.h
arch/xtensa/include/asm/thread_info.h
include/linux/irq.h
... | ... | @@ -70,6 +70,9 @@ |
70 | 70 | * IRQ_MOVE_PCNTXT - Interrupt can be migrated from process context |
71 | 71 | * IRQ_NESTED_TRHEAD - Interrupt nests into another thread |
72 | 72 | * IRQ_PER_CPU_DEVID - Dev_id is a per-cpu variable |
73 | + * IRQ_IS_POLLED - Always polled by another interrupt. Exclude | |
74 | + * it from the spurious interrupt detection | |
75 | + * mechanism and from core side polling. | |
73 | 76 | */ |
74 | 77 | enum { |
75 | 78 | IRQ_TYPE_NONE = 0x00000000, |
76 | 79 | |
... | ... | @@ -94,12 +97,14 @@ |
94 | 97 | IRQ_NESTED_THREAD = (1 << 15), |
95 | 98 | IRQ_NOTHREAD = (1 << 16), |
96 | 99 | IRQ_PER_CPU_DEVID = (1 << 17), |
100 | + IRQ_IS_POLLED = (1 << 18), | |
97 | 101 | }; |
98 | 102 | |
99 | 103 | #define IRQF_MODIFY_MASK \ |
100 | 104 | (IRQ_TYPE_SENSE_MASK | IRQ_NOPROBE | IRQ_NOREQUEST | \ |
101 | 105 | IRQ_NOAUTOEN | IRQ_MOVE_PCNTXT | IRQ_LEVEL | IRQ_NO_BALANCING | \ |
102 | - IRQ_PER_CPU | IRQ_NESTED_THREAD | IRQ_NOTHREAD | IRQ_PER_CPU_DEVID) | |
106 | + IRQ_PER_CPU | IRQ_NESTED_THREAD | IRQ_NOTHREAD | IRQ_PER_CPU_DEVID | \ | |
107 | + IRQ_IS_POLLED) | |
103 | 108 | |
104 | 109 | #define IRQ_NO_BALANCING_MASK (IRQ_PER_CPU | IRQ_NO_BALANCING) |
105 | 110 |
include/linux/preempt_mask.h
... | ... | @@ -11,36 +11,23 @@ |
11 | 11 | * - bits 0-7 are the preemption count (max preemption depth: 256) |
12 | 12 | * - bits 8-15 are the softirq count (max # of softirqs: 256) |
13 | 13 | * |
14 | - * The hardirq count can in theory reach the same as NR_IRQS. | |
15 | - * In reality, the number of nested IRQS is limited to the stack | |
16 | - * size as well. For archs with over 1000 IRQS it is not practical | |
17 | - * to expect that they will all nest. We give a max of 10 bits for | |
18 | - * hardirq nesting. An arch may choose to give less than 10 bits. | |
19 | - * m68k expects it to be 8. | |
14 | + * The hardirq count could in theory be the same as the number of | |
15 | + * interrupts in the system, but we run all interrupt handlers with | |
16 | + * interrupts disabled, so we cannot have nesting interrupts. Though | |
17 | + * there are a few palaeontologic drivers which reenable interrupts in | |
18 | + * the handler, so we need more than one bit here. | |
20 | 19 | * |
21 | - * - bits 16-25 are the hardirq count (max # of nested hardirqs: 1024) | |
22 | - * - bit 26 is the NMI_MASK | |
23 | - * - bit 27 is the PREEMPT_ACTIVE flag | |
24 | - * | |
25 | - * PREEMPT_MASK: 0x000000ff | |
26 | - * SOFTIRQ_MASK: 0x0000ff00 | |
27 | - * HARDIRQ_MASK: 0x03ff0000 | |
28 | - * NMI_MASK: 0x04000000 | |
20 | + * PREEMPT_MASK: 0x000000ff | |
21 | + * SOFTIRQ_MASK: 0x0000ff00 | |
22 | + * HARDIRQ_MASK: 0x000f0000 | |
23 | + * NMI_MASK: 0x00100000 | |
24 | + * PREEMPT_ACTIVE: 0x00200000 | |
29 | 25 | */ |
30 | 26 | #define PREEMPT_BITS 8 |
31 | 27 | #define SOFTIRQ_BITS 8 |
28 | +#define HARDIRQ_BITS 4 | |
32 | 29 | #define NMI_BITS 1 |
33 | 30 | |
34 | -#define MAX_HARDIRQ_BITS 10 | |
35 | - | |
36 | -#ifndef HARDIRQ_BITS | |
37 | -# define HARDIRQ_BITS MAX_HARDIRQ_BITS | |
38 | -#endif | |
39 | - | |
40 | -#if HARDIRQ_BITS > MAX_HARDIRQ_BITS | |
41 | -#error HARDIRQ_BITS too high! | |
42 | -#endif | |
43 | - | |
44 | 31 | #define PREEMPT_SHIFT 0 |
45 | 32 | #define SOFTIRQ_SHIFT (PREEMPT_SHIFT + PREEMPT_BITS) |
46 | 33 | #define HARDIRQ_SHIFT (SOFTIRQ_SHIFT + SOFTIRQ_BITS) |
47 | 34 | |
... | ... | @@ -60,15 +47,9 @@ |
60 | 47 | |
61 | 48 | #define SOFTIRQ_DISABLE_OFFSET (2 * SOFTIRQ_OFFSET) |
62 | 49 | |
63 | -#ifndef PREEMPT_ACTIVE | |
64 | 50 | #define PREEMPT_ACTIVE_BITS 1 |
65 | 51 | #define PREEMPT_ACTIVE_SHIFT (NMI_SHIFT + NMI_BITS) |
66 | 52 | #define PREEMPT_ACTIVE (__IRQ_MASK(PREEMPT_ACTIVE_BITS) << PREEMPT_ACTIVE_SHIFT) |
67 | -#endif | |
68 | - | |
69 | -#if PREEMPT_ACTIVE < (1 << (NMI_SHIFT + NMI_BITS)) | |
70 | -#error PREEMPT_ACTIVE is too low! | |
71 | -#endif | |
72 | 53 | |
73 | 54 | #define hardirq_count() (preempt_count() & HARDIRQ_MASK) |
74 | 55 | #define softirq_count() (preempt_count() & SOFTIRQ_MASK) |
include/linux/sched.h
kernel/irq/settings.h
... | ... | @@ -14,6 +14,7 @@ |
14 | 14 | _IRQ_NO_BALANCING = IRQ_NO_BALANCING, |
15 | 15 | _IRQ_NESTED_THREAD = IRQ_NESTED_THREAD, |
16 | 16 | _IRQ_PER_CPU_DEVID = IRQ_PER_CPU_DEVID, |
17 | + _IRQ_IS_POLLED = IRQ_IS_POLLED, | |
17 | 18 | _IRQF_MODIFY_MASK = IRQF_MODIFY_MASK, |
18 | 19 | }; |
19 | 20 | |
... | ... | @@ -26,6 +27,7 @@ |
26 | 27 | #define IRQ_NOAUTOEN GOT_YOU_MORON |
27 | 28 | #define IRQ_NESTED_THREAD GOT_YOU_MORON |
28 | 29 | #define IRQ_PER_CPU_DEVID GOT_YOU_MORON |
30 | +#define IRQ_IS_POLLED GOT_YOU_MORON | |
29 | 31 | #undef IRQF_MODIFY_MASK |
30 | 32 | #define IRQF_MODIFY_MASK GOT_YOU_MORON |
31 | 33 | |
... | ... | @@ -146,5 +148,10 @@ |
146 | 148 | static inline bool irq_settings_is_nested_thread(struct irq_desc *desc) |
147 | 149 | { |
148 | 150 | return desc->status_use_accessors & _IRQ_NESTED_THREAD; |
151 | +} | |
152 | + | |
153 | +static inline bool irq_settings_is_polled(struct irq_desc *desc) | |
154 | +{ | |
155 | + return desc->status_use_accessors & _IRQ_IS_POLLED; | |
149 | 156 | } |
kernel/irq/spurious.c
... | ... | @@ -67,8 +67,13 @@ |
67 | 67 | |
68 | 68 | raw_spin_lock(&desc->lock); |
69 | 69 | |
70 | - /* PER_CPU and nested thread interrupts are never polled */ | |
71 | - if (irq_settings_is_per_cpu(desc) || irq_settings_is_nested_thread(desc)) | |
70 | + /* | |
71 | + * PER_CPU, nested thread interrupts and interrupts explicitely | |
72 | + * marked polled are excluded from polling. | |
73 | + */ | |
74 | + if (irq_settings_is_per_cpu(desc) || | |
75 | + irq_settings_is_nested_thread(desc) || | |
76 | + irq_settings_is_polled(desc)) | |
72 | 77 | goto out; |
73 | 78 | |
74 | 79 | /* |
... | ... | @@ -268,7 +273,8 @@ |
268 | 273 | void note_interrupt(unsigned int irq, struct irq_desc *desc, |
269 | 274 | irqreturn_t action_ret) |
270 | 275 | { |
271 | - if (desc->istate & IRQS_POLL_INPROGRESS) | |
276 | + if (desc->istate & IRQS_POLL_INPROGRESS || | |
277 | + irq_settings_is_polled(desc)) | |
272 | 278 | return; |
273 | 279 | |
274 | 280 | /* we get here again via the threaded handler */ |