Commit ba74c1448f127649046615ec017bded7b2a76f29

Authored by Thomas Gleixner
Committed by Ingo Molnar
1 parent bd2f55361f

sched/rt: Document scheduler related skip-resched-check sites

Create a distinction between scheduler related preempt_enable_no_resched()
calls and the nearly one hundred other places in the kernel that do not
want to reschedule, for one reason or another.

This distinction matters for -rt, where the scheduler and the non-scheduler
preempt models (and checks) are different. For upstream it's purely
documentational.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Link: http://lkml.kernel.org/n/tip-gs88fvx2mdv5psnzxnv575ke@git.kernel.org
Signed-off-by: Ingo Molnar <mingo@elte.hu>

Showing 5 changed files with 11 additions and 8 deletions Side-by-side Diff

arch/powerpc/kernel/idle.c
... ... @@ -102,7 +102,7 @@
102 102 rcu_idle_exit();
103 103 tick_nohz_idle_exit();
104 104 if (cpu_should_die()) {
105   - preempt_enable_no_resched();
  105 + sched_preempt_enable_no_resched();
106 106 cpu_die();
107 107 }
108 108 schedule_preempt_disabled();
arch/sparc/kernel/process_64.c
... ... @@ -106,7 +106,7 @@
106 106  
107 107 #ifdef CONFIG_HOTPLUG_CPU
108 108 if (cpu_is_offline(cpu)) {
109   - preempt_enable_no_resched();
  109 + sched_preempt_enable_no_resched();
110 110 cpu_play_dead();
111 111 }
112 112 #endif
include/linux/preempt.h
... ... @@ -48,12 +48,14 @@
48 48 barrier(); \
49 49 } while (0)
50 50  
51   -#define preempt_enable_no_resched() \
  51 +#define sched_preempt_enable_no_resched() \
52 52 do { \
53 53 barrier(); \
54 54 dec_preempt_count(); \
55 55 } while (0)
56 56  
  57 +#define preempt_enable_no_resched() sched_preempt_enable_no_resched()
  58 +
57 59 #define preempt_enable() \
58 60 do { \
59 61 preempt_enable_no_resched(); \
... ... @@ -92,6 +94,7 @@
92 94 #else /* !CONFIG_PREEMPT_COUNT */
93 95  
94 96 #define preempt_disable() do { } while (0)
  97 +#define sched_preempt_enable_no_resched() do { } while (0)
95 98 #define preempt_enable_no_resched() do { } while (0)
96 99 #define preempt_enable() do { } while (0)
97 100  
... ... @@ -3220,7 +3220,7 @@
3220 3220  
3221 3221 post_schedule(rq);
3222 3222  
3223   - preempt_enable_no_resched();
  3223 + sched_preempt_enable_no_resched();
3224 3224 if (need_resched())
3225 3225 goto need_resched;
3226 3226 }
... ... @@ -3253,7 +3253,7 @@
3253 3253 */
3254 3254 void __sched schedule_preempt_disabled(void)
3255 3255 {
3256   - preempt_enable_no_resched();
  3256 + sched_preempt_enable_no_resched();
3257 3257 schedule();
3258 3258 preempt_disable();
3259 3259 }
... ... @@ -4486,7 +4486,7 @@
4486 4486 __release(rq->lock);
4487 4487 spin_release(&rq->lock.dep_map, 1, _THIS_IP_);
4488 4488 do_raw_spin_unlock(&rq->lock);
4489   - preempt_enable_no_resched();
  4489 + sched_preempt_enable_no_resched();
4490 4490  
4491 4491 schedule();
4492 4492  
... ... @@ -353,7 +353,7 @@
353 353 tick_nohz_irq_exit();
354 354 #endif
355 355 rcu_irq_exit();
356   - preempt_enable_no_resched();
  356 + sched_preempt_enable_no_resched();
357 357 }
358 358  
359 359 /*
... ... @@ -759,7 +759,7 @@
759 759 if (local_softirq_pending())
760 760 __do_softirq();
761 761 local_irq_enable();
762   - preempt_enable_no_resched();
  762 + sched_preempt_enable_no_resched();
763 763 cond_resched();
764 764 preempt_disable();
765 765 rcu_note_context_switch((long)__bind_cpu);