Commit 41719b03091911028116155deddc5eedf8c45e37

Authored by Peter Zijlstra
Committed by Ingo Molnar
1 parent 93d81d1aca

mutex: preemption fixes

The problem is that dropping the spinlock right before schedule is a voluntary
preemption point and can cause a schedule, right after which we schedule again.

Fix this inefficiency by keeping preemption disabled until we schedule, do this
by explicity disabling preemption and providing a schedule() variant that
assumes preemption is already disabled.

Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Signed-off-by: Ingo Molnar <mingo@elte.hu>

Showing 3 changed files with 12 additions and 4 deletions Side-by-side Diff

include/linux/sched.h
... ... @@ -328,6 +328,7 @@
328 328 extern signed long schedule_timeout_interruptible(signed long timeout);
329 329 extern signed long schedule_timeout_killable(signed long timeout);
330 330 extern signed long schedule_timeout_uninterruptible(signed long timeout);
  331 +asmlinkage void __schedule(void);
331 332 asmlinkage void schedule(void);
332 333  
333 334 struct nsproxy;
... ... @@ -131,6 +131,7 @@
131 131 struct mutex_waiter waiter;
132 132 unsigned long flags;
133 133  
  134 + preempt_disable();
134 135 spin_lock_mutex(&lock->wait_lock, flags);
135 136  
136 137 debug_mutex_lock_common(lock, &waiter);
137 138  
... ... @@ -170,13 +171,14 @@
170 171 spin_unlock_mutex(&lock->wait_lock, flags);
171 172  
172 173 debug_mutex_free_waiter(&waiter);
  174 + preempt_enable();
173 175 return -EINTR;
174 176 }
175 177 __set_task_state(task, state);
176 178  
177 179 /* didnt get the lock, go to sleep: */
178 180 spin_unlock_mutex(&lock->wait_lock, flags);
179   - schedule();
  181 + __schedule();
180 182 spin_lock_mutex(&lock->wait_lock, flags);
181 183 }
182 184  
... ... @@ -193,6 +195,7 @@
193 195 spin_unlock_mutex(&lock->wait_lock, flags);
194 196  
195 197 debug_mutex_free_waiter(&waiter);
  198 + preempt_enable();
196 199  
197 200 return 0;
198 201 }
... ... @@ -4538,15 +4538,13 @@
4538 4538 /*
4539 4539 * schedule() is the main scheduler function.
4540 4540 */
4541   -asmlinkage void __sched schedule(void)
  4541 +asmlinkage void __sched __schedule(void)
4542 4542 {
4543 4543 struct task_struct *prev, *next;
4544 4544 unsigned long *switch_count;
4545 4545 struct rq *rq;
4546 4546 int cpu;
4547 4547  
4548   -need_resched:
4549   - preempt_disable();
4550 4548 cpu = smp_processor_id();
4551 4549 rq = cpu_rq(cpu);
4552 4550 rcu_qsctr_inc(cpu);
4553 4551  
... ... @@ -4603,7 +4601,13 @@
4603 4601  
4604 4602 if (unlikely(reacquire_kernel_lock(current) < 0))
4605 4603 goto need_resched_nonpreemptible;
  4604 +}
4606 4605  
  4606 +asmlinkage void __sched schedule(void)
  4607 +{
  4608 +need_resched:
  4609 + preempt_disable();
  4610 + __schedule();
4607 4611 preempt_enable_no_resched();
4608 4612 if (unlikely(test_thread_flag(TIF_NEED_RESCHED)))
4609 4613 goto need_resched;