Commit a787870924dbd6f321661e06d4ec1c7a408c9ccf

Authored by Peter Zijlstra
Committed by Ingo Molnar
1 parent f27dde8dee

sched, arch: Create asm/preempt.h

In order to prepare to per-arch implementations of preempt_count move
the required bits into an asm-generic header and use this for all
archs.

Signed-off-by: Peter Zijlstra <peterz@infradead.org>
Link: http://lkml.kernel.org/n/tip-h5j0c1r3e3fk015m30h8f1zx@git.kernel.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>

Showing 32 changed files with 85 additions and 48 deletions Side-by-side Diff

arch/alpha/include/asm/Kbuild
... ... @@ -3,4 +3,5 @@
3 3  
4 4 generic-y += exec.h
5 5 generic-y += trace_clock.h
  6 +generic-y += preempt.h
arch/arc/include/asm/Kbuild
... ... @@ -46,4 +46,5 @@
46 46 generic-y += user.h
47 47 generic-y += vga.h
48 48 generic-y += xor.h
  49 +generic-y += preempt.h
arch/arm/include/asm/Kbuild
... ... @@ -33,4 +33,5 @@
33 33 generic-y += trace_clock.h
34 34 generic-y += types.h
35 35 generic-y += unaligned.h
  36 +generic-y += preempt.h
arch/arm64/include/asm/Kbuild
... ... @@ -50,4 +50,5 @@
50 50 generic-y += user.h
51 51 generic-y += vga.h
52 52 generic-y += xor.h
  53 +generic-y += preempt.h
arch/avr32/include/asm/Kbuild
... ... @@ -3,4 +3,5 @@
3 3 generic-y += exec.h
4 4 generic-y += trace_clock.h
5 5 generic-y += param.h
  6 +generic-y += preempt.h
arch/blackfin/include/asm/Kbuild
... ... @@ -44,4 +44,5 @@
44 44 generic-y += unaligned.h
45 45 generic-y += user.h
46 46 generic-y += xor.h
  47 +generic-y += preempt.h
arch/c6x/include/asm/Kbuild
... ... @@ -56,4 +56,5 @@
56 56 generic-y += user.h
57 57 generic-y += vga.h
58 58 generic-y += xor.h
  59 +generic-y += preempt.h
arch/cris/include/asm/Kbuild
... ... @@ -11,4 +11,5 @@
11 11 generic-y += trace_clock.h
12 12 generic-y += vga.h
13 13 generic-y += xor.h
  14 +generic-y += preempt.h
arch/frv/include/asm/Kbuild
... ... @@ -2,4 +2,5 @@
2 2 generic-y += clkdev.h
3 3 generic-y += exec.h
4 4 generic-y += trace_clock.h
  5 +generic-y += preempt.h
arch/h8300/include/asm/Kbuild
... ... @@ -6,4 +6,5 @@
6 6 generic-y += module.h
7 7 generic-y += trace_clock.h
8 8 generic-y += xor.h
  9 +generic-y += preempt.h
arch/hexagon/include/asm/Kbuild
... ... @@ -53,4 +53,5 @@
53 53 generic-y += ucontext.h
54 54 generic-y += unaligned.h
55 55 generic-y += xor.h
  56 +generic-y += preempt.h
arch/ia64/include/asm/Kbuild
... ... @@ -3,5 +3,6 @@
3 3 generic-y += exec.h
4 4 generic-y += kvm_para.h
5 5 generic-y += trace_clock.h
  6 +generic-y += preempt.h
6 7 generic-y += vtime.h
arch/m32r/include/asm/Kbuild
... ... @@ -3,4 +3,5 @@
3 3 generic-y += exec.h
4 4 generic-y += module.h
5 5 generic-y += trace_clock.h
  6 +generic-y += preempt.h
arch/m68k/include/asm/Kbuild
... ... @@ -31,4 +31,5 @@
31 31 generic-y += types.h
32 32 generic-y += word-at-a-time.h
33 33 generic-y += xor.h
  34 +generic-y += preempt.h
arch/metag/include/asm/Kbuild
... ... @@ -52,4 +52,5 @@
52 52 generic-y += user.h
53 53 generic-y += vga.h
54 54 generic-y += xor.h
  55 +generic-y += preempt.h
arch/microblaze/include/asm/Kbuild
... ... @@ -3,4 +3,5 @@
3 3 generic-y += exec.h
4 4 generic-y += trace_clock.h
5 5 generic-y += syscalls.h
  6 +generic-y += preempt.h
arch/mips/include/asm/Kbuild
... ... @@ -11,6 +11,7 @@
11 11 generic-y += segment.h
12 12 generic-y += serial.h
13 13 generic-y += trace_clock.h
  14 +generic-y += preempt.h
14 15 generic-y += ucontext.h
15 16 generic-y += xor.h
arch/mn10300/include/asm/Kbuild
... ... @@ -2,4 +2,5 @@
2 2 generic-y += clkdev.h
3 3 generic-y += exec.h
4 4 generic-y += trace_clock.h
  5 +generic-y += preempt.h
arch/openrisc/include/asm/Kbuild
... ... @@ -67,4 +67,5 @@
67 67 generic-y += user.h
68 68 generic-y += word-at-a-time.h
69 69 generic-y += xor.h
  70 +generic-y += preempt.h
arch/parisc/include/asm/Kbuild
... ... @@ -4,4 +4,5 @@
4 4 div64.h irq_regs.h kdebug.h kvm_para.h local64.h local.h param.h \
5 5 poll.h xor.h clkdev.h exec.h
6 6 generic-y += trace_clock.h
  7 +generic-y += preempt.h
arch/powerpc/include/asm/Kbuild
... ... @@ -2,5 +2,6 @@
2 2 generic-y += clkdev.h
3 3 generic-y += rwsem.h
4 4 generic-y += trace_clock.h
  5 +generic-y += preempt.h
5 6 generic-y += vtime.h
arch/s390/include/asm/Kbuild
... ... @@ -2,4 +2,5 @@
2 2  
3 3 generic-y += clkdev.h
4 4 generic-y += trace_clock.h
  5 +generic-y += preempt.h
arch/score/include/asm/Kbuild
... ... @@ -4,4 +4,5 @@
4 4 generic-y += clkdev.h
5 5 generic-y += trace_clock.h
6 6 generic-y += xor.h
  7 +generic-y += preempt.h
arch/sh/include/asm/Kbuild
... ... @@ -34,4 +34,5 @@
34 34 generic-y += trace_clock.h
35 35 generic-y += ucontext.h
36 36 generic-y += xor.h
  37 +generic-y += preempt.h
arch/sparc/include/asm/Kbuild
... ... @@ -16,4 +16,5 @@
16 16 generic-y += trace_clock.h
17 17 generic-y += types.h
18 18 generic-y += word-at-a-time.h
  19 +generic-y += preempt.h
arch/tile/include/asm/Kbuild
... ... @@ -38,4 +38,5 @@
38 38 generic-y += trace_clock.h
39 39 generic-y += types.h
40 40 generic-y += xor.h
  41 +generic-y += preempt.h
arch/um/include/asm/Kbuild
... ... @@ -3,4 +3,5 @@
3 3 generic-y += ftrace.h pci.h io.h param.h delay.h mutex.h current.h exec.h
4 4 generic-y += switch_to.h clkdev.h
5 5 generic-y += trace_clock.h
  6 +generic-y += preempt.h
arch/unicore32/include/asm/Kbuild
... ... @@ -60,4 +60,5 @@
60 60 generic-y += user.h
61 61 generic-y += vga.h
62 62 generic-y += xor.h
  63 +generic-y += preempt.h
arch/x86/include/asm/Kbuild
... ... @@ -5,4 +5,5 @@
5 5 genhdr-y += unistd_x32.h
6 6  
7 7 generic-y += clkdev.h
  8 +generic-y += preempt.h
arch/xtensa/include/asm/Kbuild
... ... @@ -28,4 +28,5 @@
28 28 generic-y += topology.h
29 29 generic-y += trace_clock.h
30 30 generic-y += xor.h
  31 +generic-y += preempt.h
include/asm-generic/preempt.h
  1 +#ifndef __ASM_PREEMPT_H
  2 +#define __ASM_PREEMPT_H
  3 +
  4 +#include <linux/thread_info.h>
  5 +
  6 +/*
  7 + * We mask the PREEMPT_NEED_RESCHED bit so as not to confuse all current users
  8 + * that think a non-zero value indicates we cannot preempt.
  9 + */
  10 +static __always_inline int preempt_count(void)
  11 +{
  12 + return current_thread_info()->preempt_count & ~PREEMPT_NEED_RESCHED;
  13 +}
  14 +
  15 +static __always_inline int *preempt_count_ptr(void)
  16 +{
  17 + return &current_thread_info()->preempt_count;
  18 +}
  19 +
  20 +/*
  21 + * We now loose PREEMPT_NEED_RESCHED and cause an extra reschedule; however the
  22 + * alternative is loosing a reschedule. Better schedule too often -- also this
  23 + * should be a very rare operation.
  24 + */
  25 +static __always_inline void preempt_count_set(int pc)
  26 +{
  27 + *preempt_count_ptr() = pc;
  28 +}
  29 +
  30 +/*
  31 + * We fold the NEED_RESCHED bit into the preempt count such that
  32 + * preempt_enable() can decrement and test for needing to reschedule with a
  33 + * single instruction.
  34 + *
  35 + * We invert the actual bit, so that when the decrement hits 0 we know we both
  36 + * need to resched (the bit is cleared) and can resched (no preempt count).
  37 + */
  38 +
  39 +static __always_inline void set_preempt_need_resched(void)
  40 +{
  41 + *preempt_count_ptr() &= ~PREEMPT_NEED_RESCHED;
  42 +}
  43 +
  44 +static __always_inline void clear_preempt_need_resched(void)
  45 +{
  46 + *preempt_count_ptr() |= PREEMPT_NEED_RESCHED;
  47 +}
  48 +
  49 +static __always_inline bool test_preempt_need_resched(void)
  50 +{
  51 + return !(*preempt_count_ptr() & PREEMPT_NEED_RESCHED);
  52 +}
  53 +
  54 +#endif /* __ASM_PREEMPT_H */
include/linux/preempt.h
... ... @@ -6,7 +6,6 @@
6 6 * preempt_count (used for kernel preemption, interrupt count, etc.)
7 7 */
8 8  
9   -#include <linux/thread_info.h>
10 9 #include <linux/linkage.h>
11 10 #include <linux/list.h>
12 11  
... ... @@ -16,53 +15,7 @@
16 15 */
17 16 #define PREEMPT_NEED_RESCHED 0x80000000
18 17  
19   -/*
20   - * We mask the PREEMPT_NEED_RESCHED bit so as not to confuse all current users
21   - * that think a non-zero value indicates we cannot preempt.
22   - */
23   -static __always_inline int preempt_count(void)
24   -{
25   - return current_thread_info()->preempt_count & ~PREEMPT_NEED_RESCHED;
26   -}
27   -
28   -static __always_inline int *preempt_count_ptr(void)
29   -{
30   - return &current_thread_info()->preempt_count;
31   -}
32   -
33   -/*
34   - * We now loose PREEMPT_NEED_RESCHED and cause an extra reschedule; however the
35   - * alternative is loosing a reschedule. Better schedule too often -- also this
36   - * should be a very rare operation.
37   - */
38   -static __always_inline void preempt_count_set(int pc)
39   -{
40   - *preempt_count_ptr() = pc;
41   -}
42   -
43   -/*
44   - * We fold the NEED_RESCHED bit into the preempt count such that
45   - * preempt_enable() can decrement and test for needing to reschedule with a
46   - * single instruction.
47   - *
48   - * We invert the actual bit, so that when the decrement hits 0 we know we both
49   - * need to resched (the bit is cleared) and can resched (no preempt count).
50   - */
51   -
52   -static __always_inline void set_preempt_need_resched(void)
53   -{
54   - *preempt_count_ptr() &= ~PREEMPT_NEED_RESCHED;
55   -}
56   -
57   -static __always_inline void clear_preempt_need_resched(void)
58   -{
59   - *preempt_count_ptr() |= PREEMPT_NEED_RESCHED;
60   -}
61   -
62   -static __always_inline bool test_preempt_need_resched(void)
63   -{
64   - return !(*preempt_count_ptr() & PREEMPT_NEED_RESCHED);
65   -}
  18 +#include <asm/preempt.h>
66 19  
67 20 #if defined(CONFIG_DEBUG_PREEMPT) || defined(CONFIG_PREEMPT_TRACER)
68 21 extern void add_preempt_count(int val);