Commit a787870924dbd6f321661e06d4ec1c7a408c9ccf
Committed by
Ingo Molnar
1 parent
f27dde8dee
Exists in
master
and in
16 other branches
sched, arch: Create asm/preempt.h
In order to prepare to per-arch implementations of preempt_count move the required bits into an asm-generic header and use this for all archs. Signed-off-by: Peter Zijlstra <peterz@infradead.org> Link: http://lkml.kernel.org/n/tip-h5j0c1r3e3fk015m30h8f1zx@git.kernel.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
Showing 32 changed files with 85 additions and 48 deletions Side-by-side Diff
- arch/alpha/include/asm/Kbuild
- arch/arc/include/asm/Kbuild
- arch/arm/include/asm/Kbuild
- arch/arm64/include/asm/Kbuild
- arch/avr32/include/asm/Kbuild
- arch/blackfin/include/asm/Kbuild
- arch/c6x/include/asm/Kbuild
- arch/cris/include/asm/Kbuild
- arch/frv/include/asm/Kbuild
- arch/h8300/include/asm/Kbuild
- arch/hexagon/include/asm/Kbuild
- arch/ia64/include/asm/Kbuild
- arch/m32r/include/asm/Kbuild
- arch/m68k/include/asm/Kbuild
- arch/metag/include/asm/Kbuild
- arch/microblaze/include/asm/Kbuild
- arch/mips/include/asm/Kbuild
- arch/mn10300/include/asm/Kbuild
- arch/openrisc/include/asm/Kbuild
- arch/parisc/include/asm/Kbuild
- arch/powerpc/include/asm/Kbuild
- arch/s390/include/asm/Kbuild
- arch/score/include/asm/Kbuild
- arch/sh/include/asm/Kbuild
- arch/sparc/include/asm/Kbuild
- arch/tile/include/asm/Kbuild
- arch/um/include/asm/Kbuild
- arch/unicore32/include/asm/Kbuild
- arch/x86/include/asm/Kbuild
- arch/xtensa/include/asm/Kbuild
- include/asm-generic/preempt.h
- include/linux/preempt.h
arch/alpha/include/asm/Kbuild
arch/arc/include/asm/Kbuild
arch/arm/include/asm/Kbuild
arch/arm64/include/asm/Kbuild
arch/avr32/include/asm/Kbuild
arch/blackfin/include/asm/Kbuild
arch/c6x/include/asm/Kbuild
arch/cris/include/asm/Kbuild
arch/frv/include/asm/Kbuild
arch/h8300/include/asm/Kbuild
arch/hexagon/include/asm/Kbuild
arch/ia64/include/asm/Kbuild
arch/m32r/include/asm/Kbuild
arch/m68k/include/asm/Kbuild
arch/metag/include/asm/Kbuild
arch/microblaze/include/asm/Kbuild
arch/mips/include/asm/Kbuild
arch/mn10300/include/asm/Kbuild
arch/openrisc/include/asm/Kbuild
arch/parisc/include/asm/Kbuild
arch/powerpc/include/asm/Kbuild
arch/s390/include/asm/Kbuild
arch/score/include/asm/Kbuild
arch/sh/include/asm/Kbuild
arch/sparc/include/asm/Kbuild
arch/tile/include/asm/Kbuild
arch/um/include/asm/Kbuild
arch/unicore32/include/asm/Kbuild
arch/x86/include/asm/Kbuild
arch/xtensa/include/asm/Kbuild
include/asm-generic/preempt.h
1 | +#ifndef __ASM_PREEMPT_H | |
2 | +#define __ASM_PREEMPT_H | |
3 | + | |
4 | +#include <linux/thread_info.h> | |
5 | + | |
6 | +/* | |
7 | + * We mask the PREEMPT_NEED_RESCHED bit so as not to confuse all current users | |
8 | + * that think a non-zero value indicates we cannot preempt. | |
9 | + */ | |
10 | +static __always_inline int preempt_count(void) | |
11 | +{ | |
12 | + return current_thread_info()->preempt_count & ~PREEMPT_NEED_RESCHED; | |
13 | +} | |
14 | + | |
15 | +static __always_inline int *preempt_count_ptr(void) | |
16 | +{ | |
17 | + return ¤t_thread_info()->preempt_count; | |
18 | +} | |
19 | + | |
20 | +/* | |
21 | + * We now loose PREEMPT_NEED_RESCHED and cause an extra reschedule; however the | |
22 | + * alternative is loosing a reschedule. Better schedule too often -- also this | |
23 | + * should be a very rare operation. | |
24 | + */ | |
25 | +static __always_inline void preempt_count_set(int pc) | |
26 | +{ | |
27 | + *preempt_count_ptr() = pc; | |
28 | +} | |
29 | + | |
30 | +/* | |
31 | + * We fold the NEED_RESCHED bit into the preempt count such that | |
32 | + * preempt_enable() can decrement and test for needing to reschedule with a | |
33 | + * single instruction. | |
34 | + * | |
35 | + * We invert the actual bit, so that when the decrement hits 0 we know we both | |
36 | + * need to resched (the bit is cleared) and can resched (no preempt count). | |
37 | + */ | |
38 | + | |
39 | +static __always_inline void set_preempt_need_resched(void) | |
40 | +{ | |
41 | + *preempt_count_ptr() &= ~PREEMPT_NEED_RESCHED; | |
42 | +} | |
43 | + | |
44 | +static __always_inline void clear_preempt_need_resched(void) | |
45 | +{ | |
46 | + *preempt_count_ptr() |= PREEMPT_NEED_RESCHED; | |
47 | +} | |
48 | + | |
49 | +static __always_inline bool test_preempt_need_resched(void) | |
50 | +{ | |
51 | + return !(*preempt_count_ptr() & PREEMPT_NEED_RESCHED); | |
52 | +} | |
53 | + | |
54 | +#endif /* __ASM_PREEMPT_H */ |
include/linux/preempt.h
... | ... | @@ -6,7 +6,6 @@ |
6 | 6 | * preempt_count (used for kernel preemption, interrupt count, etc.) |
7 | 7 | */ |
8 | 8 | |
9 | -#include <linux/thread_info.h> | |
10 | 9 | #include <linux/linkage.h> |
11 | 10 | #include <linux/list.h> |
12 | 11 | |
... | ... | @@ -16,53 +15,7 @@ |
16 | 15 | */ |
17 | 16 | #define PREEMPT_NEED_RESCHED 0x80000000 |
18 | 17 | |
19 | -/* | |
20 | - * We mask the PREEMPT_NEED_RESCHED bit so as not to confuse all current users | |
21 | - * that think a non-zero value indicates we cannot preempt. | |
22 | - */ | |
23 | -static __always_inline int preempt_count(void) | |
24 | -{ | |
25 | - return current_thread_info()->preempt_count & ~PREEMPT_NEED_RESCHED; | |
26 | -} | |
27 | - | |
28 | -static __always_inline int *preempt_count_ptr(void) | |
29 | -{ | |
30 | - return ¤t_thread_info()->preempt_count; | |
31 | -} | |
32 | - | |
33 | -/* | |
34 | - * We now loose PREEMPT_NEED_RESCHED and cause an extra reschedule; however the | |
35 | - * alternative is loosing a reschedule. Better schedule too often -- also this | |
36 | - * should be a very rare operation. | |
37 | - */ | |
38 | -static __always_inline void preempt_count_set(int pc) | |
39 | -{ | |
40 | - *preempt_count_ptr() = pc; | |
41 | -} | |
42 | - | |
43 | -/* | |
44 | - * We fold the NEED_RESCHED bit into the preempt count such that | |
45 | - * preempt_enable() can decrement and test for needing to reschedule with a | |
46 | - * single instruction. | |
47 | - * | |
48 | - * We invert the actual bit, so that when the decrement hits 0 we know we both | |
49 | - * need to resched (the bit is cleared) and can resched (no preempt count). | |
50 | - */ | |
51 | - | |
52 | -static __always_inline void set_preempt_need_resched(void) | |
53 | -{ | |
54 | - *preempt_count_ptr() &= ~PREEMPT_NEED_RESCHED; | |
55 | -} | |
56 | - | |
57 | -static __always_inline void clear_preempt_need_resched(void) | |
58 | -{ | |
59 | - *preempt_count_ptr() |= PREEMPT_NEED_RESCHED; | |
60 | -} | |
61 | - | |
62 | -static __always_inline bool test_preempt_need_resched(void) | |
63 | -{ | |
64 | - return !(*preempt_count_ptr() & PREEMPT_NEED_RESCHED); | |
65 | -} | |
18 | +#include <asm/preempt.h> | |
66 | 19 | |
67 | 20 | #if defined(CONFIG_DEBUG_PREEMPT) || defined(CONFIG_PREEMPT_TRACER) |
68 | 21 | extern void add_preempt_count(int val); |