Commit 1b12bbc747560ea68bcc132c3d05699e52271da0

Authored by Peter Zijlstra
Committed by Ingo Molnar
1 parent 64aa348edc

lockdep: re-annotate scheduler runqueues

Instead of using a per-rq lock class, use the regular nesting operations.

However, take extra care with double_lock_balance() as it can release the
already held rq->lock (and therefore change its nesting class).

So what can happen is:

 spin_lock(rq->lock);	// this rq subclass 0

 double_lock_balance(rq, other_rq);
   // release rq
   // acquire other_rq->lock subclass 0
   // acquire rq->lock subclass 1

 spin_unlock(other_rq->lock);

leaving you with rq->lock in subclass 1

So a subsequent double_lock_balance() call can try to nest a subclass 1
lock while already holding a subclass 1 lock.

Fix this by introducing double_unlock_balance() which releases the other
rq's lock, but also re-sets the subclass for this rq's lock to 0.

Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Signed-off-by: Ingo Molnar <mingo@elte.hu>

Showing 2 changed files with 14 additions and 5 deletions Side-by-side Diff

... ... @@ -2812,6 +2812,13 @@
2812 2812 return ret;
2813 2813 }
2814 2814  
  2815 +static void double_unlock_balance(struct rq *this_rq, struct rq *busiest)
  2816 + __releases(busiest->lock)
  2817 +{
  2818 + spin_unlock(&busiest->lock);
  2819 + lock_set_subclass(&this_rq->lock.dep_map, 0, _RET_IP_);
  2820 +}
  2821 +
2815 2822 /*
2816 2823 * If dest_cpu is allowed for this process, migrate the task to it.
2817 2824 * This is accomplished by forcing the cpu_allowed mask to only
... ... @@ -3636,7 +3643,7 @@
3636 3643 ld_moved = move_tasks(this_rq, this_cpu, busiest,
3637 3644 imbalance, sd, CPU_NEWLY_IDLE,
3638 3645 &all_pinned);
3639   - spin_unlock(&busiest->lock);
  3646 + double_unlock_balance(this_rq, busiest);
3640 3647  
3641 3648 if (unlikely(all_pinned)) {
3642 3649 cpu_clear(cpu_of(busiest), *cpus);
... ... @@ -3751,7 +3758,7 @@
3751 3758 else
3752 3759 schedstat_inc(sd, alb_failed);
3753 3760 }
3754   - spin_unlock(&target_rq->lock);
  3761 + double_unlock_balance(busiest_rq, target_rq);
3755 3762 }
3756 3763  
3757 3764 #ifdef CONFIG_NO_HZ
... ... @@ -861,6 +861,8 @@
861 861 #define RT_MAX_TRIES 3
862 862  
863 863 static int double_lock_balance(struct rq *this_rq, struct rq *busiest);
  864 +static void double_unlock_balance(struct rq *this_rq, struct rq *busiest);
  865 +
864 866 static void deactivate_task(struct rq *rq, struct task_struct *p, int sleep);
865 867  
866 868 static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu)
... ... @@ -1022,7 +1024,7 @@
1022 1024 break;
1023 1025  
1024 1026 /* try again */
1025   - spin_unlock(&lowest_rq->lock);
  1027 + double_unlock_balance(rq, lowest_rq);
1026 1028 lowest_rq = NULL;
1027 1029 }
1028 1030  
... ... @@ -1091,7 +1093,7 @@
1091 1093  
1092 1094 resched_task(lowest_rq->curr);
1093 1095  
1094   - spin_unlock(&lowest_rq->lock);
  1096 + double_unlock_balance(rq, lowest_rq);
1095 1097  
1096 1098 ret = 1;
1097 1099 out:
... ... @@ -1197,7 +1199,7 @@
1197 1199  
1198 1200 }
1199 1201 skip:
1200   - spin_unlock(&src_rq->lock);
  1202 + double_unlock_balance(this_rq, src_rq);
1201 1203 }
1202 1204  
1203 1205 return ret;