Commit 4007162647b3b2e2e438904471b620aba013c4af

Authored by Linus Torvalds

Merge branch 'irq-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull irq cleanups from Ingo Molnar:
 "This is a multi-arch cleanup series from Thomas Gleixner, which we
  kept to near the end of the merge window, to not interfere with
  architecture updates.

  This series (motivated by the -rt kernel) unifies more aspects of IRQ
  handling and generalizes PREEMPT_ACTIVE"

* 'irq-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  preempt: Make PREEMPT_ACTIVE generic
  sparc: Use preempt_schedule_irq
  ia64: Use preempt_schedule_irq
  m32r: Use preempt_schedule_irq
  hardirq: Make hardirq bits generic
  m68k: Simplify low level interrupt handling code
  genirq: Prevent spurious detection for unconditionally polled interrupts

Showing 50 changed files Side-by-side Diff

arch/alpha/include/asm/thread_info.h
... ... @@ -58,8 +58,6 @@
58 58 #define THREAD_SIZE_ORDER 1
59 59 #define THREAD_SIZE (2*PAGE_SIZE)
60 60  
61   -#define PREEMPT_ACTIVE 0x40000000
62   -
63 61 /*
64 62 * Thread information flags:
65 63 * - these are process state flags and used from assembly
arch/arc/include/asm/thread_info.h
... ... @@ -80,8 +80,6 @@
80 80  
81 81 #endif /* !__ASSEMBLY__ */
82 82  
83   -#define PREEMPT_ACTIVE 0x10000000
84   -
85 83 /*
86 84 * thread information flags
87 85 * - these are process state flags that various assembly files may need to
arch/arm/include/asm/thread_info.h
... ... @@ -141,12 +141,6 @@
141 141 #endif
142 142  
143 143 /*
144   - * We use bit 30 of the preempt_count to indicate that kernel
145   - * preemption is occurring. See <asm/hardirq.h>.
146   - */
147   -#define PREEMPT_ACTIVE 0x40000000
148   -
149   -/*
150 144 * thread information flags:
151 145 * TIF_SYSCALL_TRACE - syscall trace active
152 146 * TIF_SYSCAL_AUDIT - syscall auditing active
arch/arm64/include/asm/thread_info.h
... ... @@ -89,12 +89,6 @@
89 89 #endif
90 90  
91 91 /*
92   - * We use bit 30 of the preempt_count to indicate that kernel
93   - * preemption is occurring. See <asm/hardirq.h>.
94   - */
95   -#define PREEMPT_ACTIVE 0x40000000
96   -
97   -/*
98 92 * thread information flags:
99 93 * TIF_SYSCALL_TRACE - syscall trace active
100 94 * TIF_SIGPENDING - signal pending
arch/avr32/include/asm/thread_info.h
... ... @@ -66,8 +66,6 @@
66 66  
67 67 #endif /* !__ASSEMBLY__ */
68 68  
69   -#define PREEMPT_ACTIVE 0x40000000
70   -
71 69 /*
72 70 * Thread information flags
73 71 * - these are process state flags that various assembly files may need to access
arch/blackfin/include/asm/hardirq.h
... ... @@ -12,9 +12,6 @@
12 12 extern void ack_bad_irq(unsigned int irq);
13 13 #define ack_bad_irq ack_bad_irq
14 14  
15   -/* Define until common code gets sane defaults */
16   -#define HARDIRQ_BITS 9
17   -
18 15 #include <asm-generic/hardirq.h>
19 16  
20 17 #endif
arch/blackfin/include/asm/thread_info.h
... ... @@ -88,8 +88,6 @@
88 88 #define TI_CPU 12
89 89 #define TI_PREEMPT 16
90 90  
91   -#define PREEMPT_ACTIVE 0x4000000
92   -
93 91 /*
94 92 * thread information flag bit numbers
95 93 */
arch/c6x/include/asm/thread_info.h
... ... @@ -84,8 +84,6 @@
84 84 #define put_thread_info(ti) put_task_struct((ti)->task)
85 85 #endif /* __ASSEMBLY__ */
86 86  
87   -#define PREEMPT_ACTIVE 0x10000000
88   -
89 87 /*
90 88 * thread information flag bit numbers
91 89 * - pending work-to-be-done flags are in LSW
arch/cris/include/asm/hardirq.h
... ... @@ -2,18 +2,6 @@
2 2 #define __ASM_HARDIRQ_H
3 3  
4 4 #include <asm/irq.h>
5   -
6   -#define HARDIRQ_BITS 8
7   -
8   -/*
9   - * The hardirq mask has to be large enough to have
10   - * space for potentially all IRQ sources in the system
11   - * nesting on a single CPU:
12   - */
13   -#if (1 << HARDIRQ_BITS) < NR_IRQS
14   -# error HARDIRQ_BITS is too low!
15   -#endif
16   -
17 5 #include <asm-generic/hardirq.h>
18 6  
19 7 #endif /* __ASM_HARDIRQ_H */
arch/cris/include/asm/thread_info.h
... ... @@ -44,8 +44,6 @@
44 44  
45 45 #endif
46 46  
47   -#define PREEMPT_ACTIVE 0x10000000
48   -
49 47 /*
50 48 * macros/functions for gaining access to the thread information structure
51 49 */
arch/frv/include/asm/thread_info.h
... ... @@ -52,8 +52,6 @@
52 52  
53 53 #endif
54 54  
55   -#define PREEMPT_ACTIVE 0x10000000
56   -
57 55 /*
58 56 * macros/functions for gaining access to the thread information structure
59 57 */
arch/hexagon/include/asm/thread_info.h
... ... @@ -73,10 +73,6 @@
73 73  
74 74 #endif /* __ASSEMBLY__ */
75 75  
76   -/* looks like "linux/hardirq.h" uses this. */
77   -
78   -#define PREEMPT_ACTIVE 0x10000000
79   -
80 76 #ifndef __ASSEMBLY__
81 77  
82 78 #define INIT_THREAD_INFO(tsk) \
arch/ia64/include/asm/thread_info.h
... ... @@ -11,9 +11,6 @@
11 11 #include <asm/processor.h>
12 12 #include <asm/ptrace.h>
13 13  
14   -#define PREEMPT_ACTIVE_BIT 30
15   -#define PREEMPT_ACTIVE (1 << PREEMPT_ACTIVE_BIT)
16   -
17 14 #ifndef __ASSEMBLY__
18 15  
19 16 /*
arch/ia64/kernel/entry.S
... ... @@ -1169,21 +1169,8 @@
1169 1169 .work_pending:
1170 1170 tbit.z p6,p0=r31,TIF_NEED_RESCHED // is resched not needed?
1171 1171 (p6) br.cond.sptk.few .notify
1172   -#ifdef CONFIG_PREEMPT
1173   -(pKStk) dep r21=-1,r0,PREEMPT_ACTIVE_BIT,1
1174   - ;;
1175   -(pKStk) st4 [r20]=r21
1176   -#endif
1177   - SSM_PSR_I(p0, p6, r2) // enable interrupts
1178   - br.call.spnt.many rp=schedule
  1172 + br.call.spnt.many rp=preempt_schedule_irq
1179 1173 .ret9: cmp.eq p6,p0=r0,r0 // p6 <- 1 (re-check)
1180   - RSM_PSR_I(p0, r2, r20) // disable interrupts
1181   - ;;
1182   -#ifdef CONFIG_PREEMPT
1183   -(pKStk) adds r20=TI_PRE_COUNT+IA64_TASK_SIZE,r13
1184   - ;;
1185   -(pKStk) st4 [r20]=r0 // preempt_count() <- 0
1186   -#endif
1187 1174 (pLvSys)br.cond.sptk.few __paravirt_pending_syscall_end
1188 1175 br.cond.sptk.many .work_processed_kernel
1189 1176  
arch/m32r/include/asm/hardirq.h
... ... @@ -3,22 +3,6 @@
3 3 #define __ASM_HARDIRQ_H
4 4  
5 5 #include <asm/irq.h>
6   -
7   -#if NR_IRQS > 256
8   -#define HARDIRQ_BITS 9
9   -#else
10   -#define HARDIRQ_BITS 8
11   -#endif
12   -
13   -/*
14   - * The hardirq mask has to be large enough to have
15   - * space for potentially all IRQ sources in the system
16   - * nesting on a single CPU:
17   - */
18   -#if (1 << HARDIRQ_BITS) < NR_IRQS
19   -# error HARDIRQ_BITS is too low!
20   -#endif
21   -
22 6 #include <asm-generic/hardirq.h>
23 7  
24 8 #endif /* __ASM_HARDIRQ_H */
arch/m32r/include/asm/thread_info.h
... ... @@ -53,8 +53,6 @@
53 53  
54 54 #endif
55 55  
56   -#define PREEMPT_ACTIVE 0x10000000
57   -
58 56 #define THREAD_SIZE (PAGE_SIZE << 1)
59 57 #define THREAD_SIZE_ORDER 1
60 58 /*
arch/m32r/kernel/entry.S
... ... @@ -182,13 +182,7 @@
182 182 ld r4, PSW(sp) ; interrupts off (exception path) ?
183 183 and3 r4, r4, #0x4000
184 184 beqz r4, restore_all
185   - LDIMM (r4, PREEMPT_ACTIVE)
186   - st r4, @(TI_PRE_COUNT, r8)
187   - ENABLE_INTERRUPTS(r4)
188   - bl schedule
189   - ldi r4, #0
190   - st r4, @(TI_PRE_COUNT, r8)
191   - DISABLE_INTERRUPTS(r4)
  185 + bl preempt_schedule_irq
192 186 bra need_resched
193 187 #endif
194 188  
arch/m68k/include/asm/hardirq.h
... ... @@ -5,17 +5,6 @@
5 5 #include <linux/cache.h>
6 6 #include <asm/irq.h>
7 7  
8   -#define HARDIRQ_BITS 8
9   -
10   -/*
11   - * The hardirq mask has to be large enough to have
12   - * space for potentially all IRQ sources in the system
13   - * nesting on a single CPU:
14   - */
15   -#if (1 << HARDIRQ_BITS) < NR_IRQS
16   -# error HARDIRQ_BITS is too low!
17   -#endif
18   -
19 8 #ifdef CONFIG_MMU
20 9  
21 10 static inline void ack_bad_irq(unsigned int irq)
arch/m68k/include/asm/thread_info.h
... ... @@ -35,8 +35,6 @@
35 35 };
36 36 #endif /* __ASSEMBLY__ */
37 37  
38   -#define PREEMPT_ACTIVE 0x4000000
39   -
40 38 #define INIT_THREAD_INFO(tsk) \
41 39 { \
42 40 .task = &tsk, \
arch/m68k/kernel/entry.S
... ... @@ -45,7 +45,7 @@
45 45 .globl system_call, buserr, trap, resume
46 46 .globl sys_call_table
47 47 .globl __sys_fork, __sys_clone, __sys_vfork
48   -.globl ret_from_interrupt, bad_interrupt
  48 +.globl bad_interrupt
49 49 .globl auto_irqhandler_fixup
50 50 .globl user_irqvec_fixup
51 51  
... ... @@ -275,8 +275,6 @@
275 275 ENTRY(auto_inthandler)
276 276 SAVE_ALL_INT
277 277 GET_CURRENT(%d0)
278   - movel %d0,%a1
279   - addqb #1,%a1@(TINFO_PREEMPT+1)
280 278 | put exception # in d0
281 279 bfextu %sp@(PT_OFF_FORMATVEC){#4,#10},%d0
282 280 subw #VEC_SPUR,%d0
283 281  
284 282  
... ... @@ -286,32 +284,13 @@
286 284 auto_irqhandler_fixup = . + 2
287 285 jsr do_IRQ | process the IRQ
288 286 addql #8,%sp | pop parameters off stack
  287 + jra ret_from_exception
289 288  
290   -ret_from_interrupt:
291   - movel %curptr@(TASK_STACK),%a1
292   - subqb #1,%a1@(TINFO_PREEMPT+1)
293   - jeq ret_from_last_interrupt
294   -2: RESTORE_ALL
295   -
296   - ALIGN
297   -ret_from_last_interrupt:
298   - moveq #(~ALLOWINT>>8)&0xff,%d0
299   - andb %sp@(PT_OFF_SR),%d0
300   - jne 2b
301   -
302   - /* check if we need to do software interrupts */
303   - tstl irq_stat+CPUSTAT_SOFTIRQ_PENDING
304   - jeq .Lret_from_exception
305   - pea ret_from_exception
306   - jra do_softirq
307   -
308 289 /* Handler for user defined interrupt vectors */
309 290  
310 291 ENTRY(user_inthandler)
311 292 SAVE_ALL_INT
312 293 GET_CURRENT(%d0)
313   - movel %d0,%a1
314   - addqb #1,%a1@(TINFO_PREEMPT+1)
315 294 | put exception # in d0
316 295 bfextu %sp@(PT_OFF_FORMATVEC){#4,#10},%d0
317 296 user_irqvec_fixup = . + 2
318 297  
319 298  
320 299  
... ... @@ -321,29 +300,18 @@
321 300 movel %d0,%sp@- | put vector # on stack
322 301 jsr do_IRQ | process the IRQ
323 302 addql #8,%sp | pop parameters off stack
  303 + jra ret_from_exception
324 304  
325   - movel %curptr@(TASK_STACK),%a1
326   - subqb #1,%a1@(TINFO_PREEMPT+1)
327   - jeq ret_from_last_interrupt
328   - RESTORE_ALL
329   -
330 305 /* Handler for uninitialized and spurious interrupts */
331 306  
332 307 ENTRY(bad_inthandler)
333 308 SAVE_ALL_INT
334 309 GET_CURRENT(%d0)
335   - movel %d0,%a1
336   - addqb #1,%a1@(TINFO_PREEMPT+1)
337 310  
338 311 movel %sp,%sp@-
339 312 jsr handle_badint
340 313 addql #4,%sp
341   -
342   - movel %curptr@(TASK_STACK),%a1
343   - subqb #1,%a1@(TINFO_PREEMPT+1)
344   - jeq ret_from_last_interrupt
345   - RESTORE_ALL
346   -
  314 + jra ret_from_exception
347 315  
348 316 resume:
349 317 /*
arch/m68k/kernel/ints.c
... ... @@ -58,12 +58,6 @@
58 58 {
59 59 int i;
60 60  
61   - /* assembly irq entry code relies on this... */
62   - if (HARDIRQ_MASK != 0x00ff0000) {
63   - extern void hardirq_mask_is_broken(void);
64   - hardirq_mask_is_broken();
65   - }
66   -
67 61 for (i = IRQ_AUTO_1; i <= IRQ_AUTO_7; i++)
68 62 irq_set_chip_and_handler(i, &auto_irq_chip, handle_simple_irq);
69 63  
arch/m68k/platform/68000/entry.S
... ... @@ -27,7 +27,6 @@
27 27 .globl ret_from_exception
28 28 .globl ret_from_signal
29 29 .globl sys_call_table
30   -.globl ret_from_interrupt
31 30 .globl bad_interrupt
32 31 .globl inthandler1
33 32 .globl inthandler2
... ... @@ -137,7 +136,7 @@
137 136 movel #65,%sp@- /* put vector # on stack*/
138 137 jbsr process_int /* process the IRQ*/
139 138 3: addql #8,%sp /* pop parameters off stack*/
140   - bra ret_from_interrupt
  139 + bra ret_from_exception
141 140  
142 141 inthandler2:
143 142 SAVE_ALL_INT
... ... @@ -148,7 +147,7 @@
148 147 movel #66,%sp@- /* put vector # on stack*/
149 148 jbsr process_int /* process the IRQ*/
150 149 3: addql #8,%sp /* pop parameters off stack*/
151   - bra ret_from_interrupt
  150 + bra ret_from_exception
152 151  
153 152 inthandler3:
154 153 SAVE_ALL_INT
... ... @@ -159,7 +158,7 @@
159 158 movel #67,%sp@- /* put vector # on stack*/
160 159 jbsr process_int /* process the IRQ*/
161 160 3: addql #8,%sp /* pop parameters off stack*/
162   - bra ret_from_interrupt
  161 + bra ret_from_exception
163 162  
164 163 inthandler4:
165 164 SAVE_ALL_INT
... ... @@ -170,7 +169,7 @@
170 169 movel #68,%sp@- /* put vector # on stack*/
171 170 jbsr process_int /* process the IRQ*/
172 171 3: addql #8,%sp /* pop parameters off stack*/
173   - bra ret_from_interrupt
  172 + bra ret_from_exception
174 173  
175 174 inthandler5:
176 175 SAVE_ALL_INT
... ... @@ -181,7 +180,7 @@
181 180 movel #69,%sp@- /* put vector # on stack*/
182 181 jbsr process_int /* process the IRQ*/
183 182 3: addql #8,%sp /* pop parameters off stack*/
184   - bra ret_from_interrupt
  183 + bra ret_from_exception
185 184  
186 185 inthandler6:
187 186 SAVE_ALL_INT
... ... @@ -192,7 +191,7 @@
192 191 movel #70,%sp@- /* put vector # on stack*/
193 192 jbsr process_int /* process the IRQ*/
194 193 3: addql #8,%sp /* pop parameters off stack*/
195   - bra ret_from_interrupt
  194 + bra ret_from_exception
196 195  
197 196 inthandler7:
198 197 SAVE_ALL_INT
... ... @@ -203,7 +202,7 @@
203 202 movel #71,%sp@- /* put vector # on stack*/
204 203 jbsr process_int /* process the IRQ*/
205 204 3: addql #8,%sp /* pop parameters off stack*/
206   - bra ret_from_interrupt
  205 + bra ret_from_exception
207 206  
208 207 inthandler:
209 208 SAVE_ALL_INT
... ... @@ -214,23 +213,7 @@
214 213 movel %d0,%sp@- /* put vector # on stack*/
215 214 jbsr process_int /* process the IRQ*/
216 215 3: addql #8,%sp /* pop parameters off stack*/
217   - bra ret_from_interrupt
218   -
219   -ret_from_interrupt:
220   - jeq 1f
221   -2:
222   - RESTORE_ALL
223   -1:
224   - moveb %sp@(PT_OFF_SR), %d0
225   - and #7, %d0
226   - jhi 2b
227   -
228   - /* check if we need to do software interrupts */
229   - jeq ret_from_exception
230   -
231   - pea ret_from_exception
232   - jra do_softirq
233   -
  216 + bra ret_from_exception
234 217  
235 218 /*
236 219 * Handler for uninitialized and spurious interrupts.
arch/m68k/platform/68360/entry.S
... ... @@ -29,7 +29,6 @@
29 29 .globl ret_from_exception
30 30 .globl ret_from_signal
31 31 .globl sys_call_table
32   -.globl ret_from_interrupt
33 32 .globl bad_interrupt
34 33 .globl inthandler
35 34  
... ... @@ -132,26 +131,9 @@
132 131  
133 132 movel %sp,%sp@-
134 133 movel %d0,%sp@- /* put vector # on stack*/
135   - jbsr do_IRQ /* process the IRQ*/
136   -3: addql #8,%sp /* pop parameters off stack*/
137   - bra ret_from_interrupt
138   -
139   -ret_from_interrupt:
140   - jeq 1f
141   -2:
142   - RESTORE_ALL
143   -1:
144   - moveb %sp@(PT_OFF_SR), %d0
145   - and #7, %d0
146   - jhi 2b
147   - /* check if we need to do software interrupts */
148   -
149   - movel irq_stat+CPUSTAT_SOFTIRQ_PENDING,%d0
150   - jeq ret_from_exception
151   -
152   - pea ret_from_exception
153   - jra do_softirq
154   -
  134 + jbsr do_IRQ /* process the IRQ */
  135 + addql #8,%sp /* pop parameters off stack*/
  136 + jra ret_from_exception
155 137  
156 138 /*
157 139 * Handler for uninitialized and spurious interrupts.
arch/metag/include/asm/thread_info.h
... ... @@ -46,8 +46,6 @@
46 46  
47 47 #endif
48 48  
49   -#define PREEMPT_ACTIVE 0x10000000
50   -
51 49 #ifdef CONFIG_4KSTACKS
52 50 #define THREAD_SHIFT 12
53 51 #else
arch/microblaze/include/asm/thread_info.h
... ... @@ -106,8 +106,6 @@
106 106 /* thread information allocation */
107 107 #endif /* __ASSEMBLY__ */
108 108  
109   -#define PREEMPT_ACTIVE 0x10000000
110   -
111 109 /*
112 110 * thread information flags
113 111 * - these are process state flags that various assembly files may
arch/mips/include/asm/thread_info.h
... ... @@ -92,8 +92,6 @@
92 92  
93 93 #define STACK_WARN (THREAD_SIZE / 8)
94 94  
95   -#define PREEMPT_ACTIVE 0x10000000
96   -
97 95 /*
98 96 * thread information flags
99 97 * - these are process state flags that various assembly files may need to
arch/mn10300/include/asm/thread_info.h
... ... @@ -16,8 +16,6 @@
16 16  
17 17 #include <asm/page.h>
18 18  
19   -#define PREEMPT_ACTIVE 0x10000000
20   -
21 19 #ifdef CONFIG_4KSTACKS
22 20 #define THREAD_SIZE (4096)
23 21 #define THREAD_SIZE_ORDER (0)
arch/parisc/include/asm/thread_info.h
... ... @@ -46,9 +46,6 @@
46 46 #define THREAD_SIZE (PAGE_SIZE << THREAD_SIZE_ORDER)
47 47 #define THREAD_SHIFT (PAGE_SHIFT + THREAD_SIZE_ORDER)
48 48  
49   -#define PREEMPT_ACTIVE_BIT 28
50   -#define PREEMPT_ACTIVE (1 << PREEMPT_ACTIVE_BIT)
51   -
52 49 /*
53 50 * thread information flags
54 51 */
arch/powerpc/include/asm/thread_info.h
... ... @@ -82,8 +82,6 @@
82 82  
83 83 #endif /* __ASSEMBLY__ */
84 84  
85   -#define PREEMPT_ACTIVE 0x10000000
86   -
87 85 /*
88 86 * thread information flag bit numbers
89 87 */
arch/s390/include/asm/hardirq.h
... ... @@ -18,8 +18,6 @@
18 18 #define __ARCH_HAS_DO_SOFTIRQ
19 19 #define __ARCH_IRQ_EXIT_IRQS_DISABLED
20 20  
21   -#define HARDIRQ_BITS 8
22   -
23 21 static inline void ack_bad_irq(unsigned int irq)
24 22 {
25 23 printk(KERN_CRIT "unexpected IRQ trap at vector %02x\n", irq);
arch/s390/include/asm/thread_info.h
... ... @@ -111,7 +111,5 @@
111 111 #define is_32bit_task() (1)
112 112 #endif
113 113  
114   -#define PREEMPT_ACTIVE 0x4000000
115   -
116 114 #endif /* _ASM_THREAD_INFO_H */
arch/score/include/asm/thread_info.h
... ... @@ -72,8 +72,6 @@
72 72  
73 73 #endif /* !__ASSEMBLY__ */
74 74  
75   -#define PREEMPT_ACTIVE 0x10000000
76   -
77 75 /*
78 76 * thread information flags
79 77 * - these are process state flags that various assembly files may need to
arch/sh/include/asm/thread_info.h
... ... @@ -41,8 +41,6 @@
41 41  
42 42 #endif
43 43  
44   -#define PREEMPT_ACTIVE 0x10000000
45   -
46 44 #if defined(CONFIG_4KSTACKS)
47 45 #define THREAD_SHIFT 12
48 46 #else
arch/sh/kernel/entry-common.S
... ... @@ -108,7 +108,7 @@
108 108 and #(0xf0>>1), r0 ! interrupts off (exception path)?
109 109 cmp/eq #(0xf0>>1), r0
110 110 bt noresched
111   - mov.l 3f, r0
  111 + mov.l 1f, r0
112 112 jsr @r0 ! call preempt_schedule_irq
113 113 nop
114 114 bra need_resched
... ... @@ -119,9 +119,7 @@
119 119 nop
120 120  
121 121 .align 2
122   -1: .long PREEMPT_ACTIVE
123   -2: .long schedule
124   -3: .long preempt_schedule_irq
  122 +1: .long preempt_schedule_irq
125 123 #endif
126 124  
127 125 ENTRY(resume_userspace)
arch/sparc/include/asm/hardirq_32.h
... ... @@ -7,7 +7,6 @@
7 7 #ifndef __SPARC_HARDIRQ_H
8 8 #define __SPARC_HARDIRQ_H
9 9  
10   -#define HARDIRQ_BITS 8
11 10 #include <asm-generic/hardirq.h>
12 11  
13 12 #endif /* __SPARC_HARDIRQ_H */
arch/sparc/include/asm/hardirq_64.h
... ... @@ -14,7 +14,5 @@
14 14  
15 15 void ack_bad_irq(unsigned int irq);
16 16  
17   -#define HARDIRQ_BITS 8
18   -
19 17 #endif /* !(__SPARC64_HARDIRQ_H) */
arch/sparc/include/asm/thread_info_32.h
... ... @@ -105,8 +105,6 @@
105 105 #define TI_W_SAVED 0x250
106 106 /* #define TI_RESTART_BLOCK 0x25n */ /* Nobody cares */
107 107  
108   -#define PREEMPT_ACTIVE 0x4000000
109   -
110 108 /*
111 109 * thread information flag bit numbers
112 110 */
arch/sparc/include/asm/thread_info_64.h
... ... @@ -111,8 +111,6 @@
111 111 #define THREAD_SHIFT PAGE_SHIFT
112 112 #endif /* PAGE_SHIFT == 13 */
113 113  
114   -#define PREEMPT_ACTIVE 0x10000000
115   -
116 114 /*
117 115 * macros/functions for gaining access to the thread information structure
118 116 */
arch/sparc/kernel/rtrap_64.S
... ... @@ -312,12 +312,10 @@
312 312 nop
313 313 cmp %l4, 0
314 314 bne,pn %xcc, kern_fpucheck
315   - sethi %hi(PREEMPT_ACTIVE), %l6
316   - stw %l6, [%g6 + TI_PRE_COUNT]
317   - call schedule
318 315 nop
  316 + call preempt_schedule_irq
  317 + nop
319 318 ba,pt %xcc, rtrap
320   - stw %g0, [%g6 + TI_PRE_COUNT]
321 319 #endif
322 320 kern_fpucheck: ldub [%g6 + TI_FPDEPTH], %l5
323 321 brz,pt %l5, rt_continue
arch/tile/include/asm/hardirq.h
... ... @@ -42,7 +42,5 @@
42 42  
43 43 #include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */
44 44  
45   -#define HARDIRQ_BITS 8
46   -
47 45 #endif /* _ASM_TILE_HARDIRQ_H */
arch/tile/include/asm/thread_info.h
... ... @@ -113,8 +113,6 @@
113 113  
114 114 #endif /* !__ASSEMBLY__ */
115 115  
116   -#define PREEMPT_ACTIVE 0x10000000
117   -
118 116 /*
119 117 * Thread information flags that various assembly files may need to access.
120 118 * Keep flags accessed frequently in low bits, particular since it makes
arch/um/include/asm/thread_info.h
... ... @@ -60,8 +60,6 @@
60 60  
61 61 #endif
62 62  
63   -#define PREEMPT_ACTIVE 0x10000000
64   -
65 63 #define TIF_SYSCALL_TRACE 0 /* syscall trace active */
66 64 #define TIF_SIGPENDING 1 /* signal pending */
67 65 #define TIF_NEED_RESCHED 2 /* rescheduling necessary */
arch/unicore32/include/asm/thread_info.h
... ... @@ -118,12 +118,6 @@
118 118 #endif
119 119  
120 120 /*
121   - * We use bit 30 of the preempt_count to indicate that kernel
122   - * preemption is occurring. See <asm/hardirq.h>.
123   - */
124   -#define PREEMPT_ACTIVE 0x40000000
125   -
126   -/*
127 121 * thread information flags:
128 122 * TIF_SYSCALL_TRACE - syscall trace active
129 123 * TIF_SIGPENDING - signal pending
arch/x86/include/asm/thread_info.h
... ... @@ -153,8 +153,6 @@
153 153 #define _TIF_WORK_CTXSW_PREV (_TIF_WORK_CTXSW|_TIF_USER_RETURN_NOTIFY)
154 154 #define _TIF_WORK_CTXSW_NEXT (_TIF_WORK_CTXSW)
155 155  
156   -#define PREEMPT_ACTIVE 0x10000000
157   -
158 156 #ifdef CONFIG_X86_32
159 157  
160 158 #define STACK_WARN (THREAD_SIZE/8)
arch/xtensa/include/asm/thread_info.h
... ... @@ -76,8 +76,6 @@
76 76  
77 77 #endif
78 78  
79   -#define PREEMPT_ACTIVE 0x10000000
80   -
81 79 /*
82 80 * macros/functions for gaining access to the thread information structure
83 81 */
... ... @@ -70,6 +70,9 @@
70 70 * IRQ_MOVE_PCNTXT - Interrupt can be migrated from process context
71 71 * IRQ_NESTED_TRHEAD - Interrupt nests into another thread
72 72 * IRQ_PER_CPU_DEVID - Dev_id is a per-cpu variable
  73 + * IRQ_IS_POLLED - Always polled by another interrupt. Exclude
  74 + * it from the spurious interrupt detection
  75 + * mechanism and from core side polling.
73 76 */
74 77 enum {
75 78 IRQ_TYPE_NONE = 0x00000000,
76 79  
... ... @@ -94,12 +97,14 @@
94 97 IRQ_NESTED_THREAD = (1 << 15),
95 98 IRQ_NOTHREAD = (1 << 16),
96 99 IRQ_PER_CPU_DEVID = (1 << 17),
  100 + IRQ_IS_POLLED = (1 << 18),
97 101 };
98 102  
99 103 #define IRQF_MODIFY_MASK \
100 104 (IRQ_TYPE_SENSE_MASK | IRQ_NOPROBE | IRQ_NOREQUEST | \
101 105 IRQ_NOAUTOEN | IRQ_MOVE_PCNTXT | IRQ_LEVEL | IRQ_NO_BALANCING | \
102   - IRQ_PER_CPU | IRQ_NESTED_THREAD | IRQ_NOTHREAD | IRQ_PER_CPU_DEVID)
  106 + IRQ_PER_CPU | IRQ_NESTED_THREAD | IRQ_NOTHREAD | IRQ_PER_CPU_DEVID | \
  107 + IRQ_IS_POLLED)
103 108  
104 109 #define IRQ_NO_BALANCING_MASK (IRQ_PER_CPU | IRQ_NO_BALANCING)
105 110  
include/linux/preempt_mask.h
... ... @@ -11,36 +11,23 @@
11 11 * - bits 0-7 are the preemption count (max preemption depth: 256)
12 12 * - bits 8-15 are the softirq count (max # of softirqs: 256)
13 13 *
14   - * The hardirq count can in theory reach the same as NR_IRQS.
15   - * In reality, the number of nested IRQS is limited to the stack
16   - * size as well. For archs with over 1000 IRQS it is not practical
17   - * to expect that they will all nest. We give a max of 10 bits for
18   - * hardirq nesting. An arch may choose to give less than 10 bits.
19   - * m68k expects it to be 8.
  14 + * The hardirq count could in theory be the same as the number of
  15 + * interrupts in the system, but we run all interrupt handlers with
  16 + * interrupts disabled, so we cannot have nesting interrupts. Though
  17 + * there are a few palaeontologic drivers which reenable interrupts in
  18 + * the handler, so we need more than one bit here.
20 19 *
21   - * - bits 16-25 are the hardirq count (max # of nested hardirqs: 1024)
22   - * - bit 26 is the NMI_MASK
23   - * - bit 27 is the PREEMPT_ACTIVE flag
24   - *
25   - * PREEMPT_MASK: 0x000000ff
26   - * SOFTIRQ_MASK: 0x0000ff00
27   - * HARDIRQ_MASK: 0x03ff0000
28   - * NMI_MASK: 0x04000000
  20 + * PREEMPT_MASK: 0x000000ff
  21 + * SOFTIRQ_MASK: 0x0000ff00
  22 + * HARDIRQ_MASK: 0x000f0000
  23 + * NMI_MASK: 0x00100000
  24 + * PREEMPT_ACTIVE: 0x00200000
29 25 */
30 26 #define PREEMPT_BITS 8
31 27 #define SOFTIRQ_BITS 8
  28 +#define HARDIRQ_BITS 4
32 29 #define NMI_BITS 1
33 30  
34   -#define MAX_HARDIRQ_BITS 10
35   -
36   -#ifndef HARDIRQ_BITS
37   -# define HARDIRQ_BITS MAX_HARDIRQ_BITS
38   -#endif
39   -
40   -#if HARDIRQ_BITS > MAX_HARDIRQ_BITS
41   -#error HARDIRQ_BITS too high!
42   -#endif
43   -
44 31 #define PREEMPT_SHIFT 0
45 32 #define SOFTIRQ_SHIFT (PREEMPT_SHIFT + PREEMPT_BITS)
46 33 #define HARDIRQ_SHIFT (SOFTIRQ_SHIFT + SOFTIRQ_BITS)
47 34  
... ... @@ -60,15 +47,9 @@
60 47  
61 48 #define SOFTIRQ_DISABLE_OFFSET (2 * SOFTIRQ_OFFSET)
62 49  
63   -#ifndef PREEMPT_ACTIVE
64 50 #define PREEMPT_ACTIVE_BITS 1
65 51 #define PREEMPT_ACTIVE_SHIFT (NMI_SHIFT + NMI_BITS)
66 52 #define PREEMPT_ACTIVE (__IRQ_MASK(PREEMPT_ACTIVE_BITS) << PREEMPT_ACTIVE_SHIFT)
67   -#endif
68   -
69   -#if PREEMPT_ACTIVE < (1 << (NMI_SHIFT + NMI_BITS))
70   -#error PREEMPT_ACTIVE is too low!
71   -#endif
72 53  
73 54 #define hardirq_count() (preempt_count() & HARDIRQ_MASK)
74 55 #define softirq_count() (preempt_count() & SOFTIRQ_MASK)
include/linux/sched.h
... ... @@ -22,7 +22,7 @@
22 22 #include <linux/errno.h>
23 23 #include <linux/nodemask.h>
24 24 #include <linux/mm_types.h>
25   -#include <linux/preempt.h>
  25 +#include <linux/preempt_mask.h>
26 26  
27 27 #include <asm/page.h>
28 28 #include <asm/ptrace.h>
kernel/irq/settings.h
... ... @@ -14,6 +14,7 @@
14 14 _IRQ_NO_BALANCING = IRQ_NO_BALANCING,
15 15 _IRQ_NESTED_THREAD = IRQ_NESTED_THREAD,
16 16 _IRQ_PER_CPU_DEVID = IRQ_PER_CPU_DEVID,
  17 + _IRQ_IS_POLLED = IRQ_IS_POLLED,
17 18 _IRQF_MODIFY_MASK = IRQF_MODIFY_MASK,
18 19 };
19 20  
... ... @@ -26,6 +27,7 @@
26 27 #define IRQ_NOAUTOEN GOT_YOU_MORON
27 28 #define IRQ_NESTED_THREAD GOT_YOU_MORON
28 29 #define IRQ_PER_CPU_DEVID GOT_YOU_MORON
  30 +#define IRQ_IS_POLLED GOT_YOU_MORON
29 31 #undef IRQF_MODIFY_MASK
30 32 #define IRQF_MODIFY_MASK GOT_YOU_MORON
31 33  
... ... @@ -146,5 +148,10 @@
146 148 static inline bool irq_settings_is_nested_thread(struct irq_desc *desc)
147 149 {
148 150 return desc->status_use_accessors & _IRQ_NESTED_THREAD;
  151 +}
  152 +
  153 +static inline bool irq_settings_is_polled(struct irq_desc *desc)
  154 +{
  155 + return desc->status_use_accessors & _IRQ_IS_POLLED;
149 156 }
kernel/irq/spurious.c
... ... @@ -67,8 +67,13 @@
67 67  
68 68 raw_spin_lock(&desc->lock);
69 69  
70   - /* PER_CPU and nested thread interrupts are never polled */
71   - if (irq_settings_is_per_cpu(desc) || irq_settings_is_nested_thread(desc))
  70 + /*
  71 + * PER_CPU, nested thread interrupts and interrupts explicitely
  72 + * marked polled are excluded from polling.
  73 + */
  74 + if (irq_settings_is_per_cpu(desc) ||
  75 + irq_settings_is_nested_thread(desc) ||
  76 + irq_settings_is_polled(desc))
72 77 goto out;
73 78  
74 79 /*
... ... @@ -268,7 +273,8 @@
268 273 void note_interrupt(unsigned int irq, struct irq_desc *desc,
269 274 irqreturn_t action_ret)
270 275 {
271   - if (desc->istate & IRQS_POLL_INPROGRESS)
  276 + if (desc->istate & IRQS_POLL_INPROGRESS ||
  277 + irq_settings_is_polled(desc))
272 278 return;
273 279  
274 280 /* we get here again via the threaded handler */