Commit bdd4e85dc36cdbcfc1608a5b2a17c80a9db8986a

Authored by Frederic Weisbecker
1 parent 2da8c8bc44

sched: Isolate preempt counting in its own config option

Create a new CONFIG_PREEMPT_COUNT that handles the inc/dec
of preempt count offset independently. So that the offset
can be updated by preempt_disable() and preempt_enable()
even without the need for CONFIG_PREEMPT beeing set.

This prepares to make CONFIG_DEBUG_SPINLOCK_SLEEP working
with !CONFIG_PREEMPT where it currently doesn't detect
code that sleeps inside explicit preemption disabled
sections.

Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com>
Acked-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>

Showing 8 changed files with 33 additions and 22 deletions Side-by-side Diff

include/linux/bit_spinlock.h
... ... @@ -88,7 +88,7 @@
88 88 {
89 89 #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
90 90 return test_bit(bitnum, addr);
91   -#elif defined CONFIG_PREEMPT
  91 +#elif defined CONFIG_PREEMPT_COUNT
92 92 return preempt_count();
93 93 #else
94 94 return 1;
include/linux/hardirq.h
... ... @@ -93,7 +93,7 @@
93 93 */
94 94 #define in_nmi() (preempt_count() & NMI_MASK)
95 95  
96   -#if defined(CONFIG_PREEMPT)
  96 +#if defined(CONFIG_PREEMPT_COUNT)
97 97 # define PREEMPT_CHECK_OFFSET 1
98 98 #else
99 99 # define PREEMPT_CHECK_OFFSET 0
... ... @@ -115,7 +115,7 @@
115 115 #define in_atomic_preempt_off() \
116 116 ((preempt_count() & ~PREEMPT_ACTIVE) != PREEMPT_CHECK_OFFSET)
117 117  
118   -#ifdef CONFIG_PREEMPT
  118 +#ifdef CONFIG_PREEMPT_COUNT
119 119 # define preemptible() (preempt_count() == 0 && !irqs_disabled())
120 120 # define IRQ_EXIT_OFFSET (HARDIRQ_OFFSET-1)
121 121 #else
include/linux/pagemap.h
... ... @@ -134,7 +134,7 @@
134 134 VM_BUG_ON(in_interrupt());
135 135  
136 136 #if !defined(CONFIG_SMP) && defined(CONFIG_TREE_RCU)
137   -# ifdef CONFIG_PREEMPT
  137 +# ifdef CONFIG_PREEMPT_COUNT
138 138 VM_BUG_ON(!in_atomic());
139 139 # endif
140 140 /*
... ... @@ -172,7 +172,7 @@
172 172 VM_BUG_ON(in_interrupt());
173 173  
174 174 #if !defined(CONFIG_SMP) && defined(CONFIG_TREE_RCU)
175   -# ifdef CONFIG_PREEMPT
  175 +# ifdef CONFIG_PREEMPT_COUNT
176 176 VM_BUG_ON(!in_atomic());
177 177 # endif
178 178 VM_BUG_ON(page_count(page) == 0);
include/linux/preempt.h
... ... @@ -27,6 +27,21 @@
27 27  
28 28 asmlinkage void preempt_schedule(void);
29 29  
  30 +#define preempt_check_resched() \
  31 +do { \
  32 + if (unlikely(test_thread_flag(TIF_NEED_RESCHED))) \
  33 + preempt_schedule(); \
  34 +} while (0)
  35 +
  36 +#else /* !CONFIG_PREEMPT */
  37 +
  38 +#define preempt_check_resched() do { } while (0)
  39 +
  40 +#endif /* CONFIG_PREEMPT */
  41 +
  42 +
  43 +#ifdef CONFIG_PREEMPT_COUNT
  44 +
30 45 #define preempt_disable() \
31 46 do { \
32 47 inc_preempt_count(); \
... ... @@ -39,12 +54,6 @@
39 54 dec_preempt_count(); \
40 55 } while (0)
41 56  
42   -#define preempt_check_resched() \
43   -do { \
44   - if (unlikely(test_thread_flag(TIF_NEED_RESCHED))) \
45   - preempt_schedule(); \
46   -} while (0)
47   -
48 57 #define preempt_enable() \
49 58 do { \
50 59 preempt_enable_no_resched(); \
51 60  
52 61  
... ... @@ -80,18 +89,17 @@
80 89 preempt_check_resched(); \
81 90 } while (0)
82 91  
83   -#else
  92 +#else /* !CONFIG_PREEMPT_COUNT */
84 93  
85 94 #define preempt_disable() do { } while (0)
86 95 #define preempt_enable_no_resched() do { } while (0)
87 96 #define preempt_enable() do { } while (0)
88   -#define preempt_check_resched() do { } while (0)
89 97  
90 98 #define preempt_disable_notrace() do { } while (0)
91 99 #define preempt_enable_no_resched_notrace() do { } while (0)
92 100 #define preempt_enable_notrace() do { } while (0)
93 101  
94   -#endif
  102 +#endif /* CONFIG_PREEMPT_COUNT */
95 103  
96 104 #ifdef CONFIG_PREEMPT_NOTIFIERS
97 105  
include/linux/rcupdate.h
... ... @@ -239,7 +239,7 @@
239 239 * Check debug_lockdep_rcu_enabled() to prevent false positives during boot
240 240 * and while lockdep is disabled.
241 241 */
242   -#ifdef CONFIG_PREEMPT
  242 +#ifdef CONFIG_PREEMPT_COUNT
243 243 static inline int rcu_read_lock_sched_held(void)
244 244 {
245 245 int lockdep_opinion = 0;
246 246  
... ... @@ -250,12 +250,12 @@
250 250 lockdep_opinion = lock_is_held(&rcu_sched_lock_map);
251 251 return lockdep_opinion || preempt_count() != 0 || irqs_disabled();
252 252 }
253   -#else /* #ifdef CONFIG_PREEMPT */
  253 +#else /* #ifdef CONFIG_PREEMPT_COUNT */
254 254 static inline int rcu_read_lock_sched_held(void)
255 255 {
256 256 return 1;
257 257 }
258   -#endif /* #else #ifdef CONFIG_PREEMPT */
  258 +#endif /* #else #ifdef CONFIG_PREEMPT_COUNT */
259 259  
260 260 #else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
261 261  
262 262  
263 263  
... ... @@ -276,17 +276,17 @@
276 276 return 1;
277 277 }
278 278  
279   -#ifdef CONFIG_PREEMPT
  279 +#ifdef CONFIG_PREEMPT_COUNT
280 280 static inline int rcu_read_lock_sched_held(void)
281 281 {
282 282 return preempt_count() != 0 || irqs_disabled();
283 283 }
284   -#else /* #ifdef CONFIG_PREEMPT */
  284 +#else /* #ifdef CONFIG_PREEMPT_COUNT */
285 285 static inline int rcu_read_lock_sched_held(void)
286 286 {
287 287 return 1;
288 288 }
289   -#endif /* #else #ifdef CONFIG_PREEMPT */
  289 +#endif /* #else #ifdef CONFIG_PREEMPT_COUNT */
290 290  
291 291 #endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */
292 292  
include/linux/sched.h
... ... @@ -2502,7 +2502,7 @@
2502 2502  
2503 2503 extern int __cond_resched_lock(spinlock_t *lock);
2504 2504  
2505   -#ifdef CONFIG_PREEMPT
  2505 +#ifdef CONFIG_PREEMPT_COUNT
2506 2506 #define PREEMPT_LOCK_OFFSET PREEMPT_OFFSET
2507 2507 #else
2508 2508 #define PREEMPT_LOCK_OFFSET 0
kernel/Kconfig.preempt
... ... @@ -35,6 +35,7 @@
35 35  
36 36 config PREEMPT
37 37 bool "Preemptible Kernel (Low-Latency Desktop)"
  38 + select PREEMPT_COUNT
38 39 help
39 40 This option reduces the latency of the kernel by making
40 41 all kernel code (that is not executing in a critical section)
... ... @@ -51,4 +52,7 @@
51 52 range.
52 53  
53 54 endchoice
  55 +
  56 +config PREEMPT_COUNT
  57 + bool
... ... @@ -2843,7 +2843,7 @@
2843 2843 #if defined(CONFIG_SMP)
2844 2844 p->on_cpu = 0;
2845 2845 #endif
2846   -#ifdef CONFIG_PREEMPT
  2846 +#ifdef CONFIG_PREEMPT_COUNT
2847 2847 /* Want to start with kernel preemption disabled. */
2848 2848 task_thread_info(p)->preempt_count = 1;
2849 2849 #endif