Commit 09dc4ab03936df5c5aa711d27c81283c6d09f495

Authored by Roman Gushchin
Committed by Ingo Molnar
1 parent 0f397f2c90

sched/fair: Fix tg_set_cfs_bandwidth() deadlock on rq->lock

tg_set_cfs_bandwidth() sets cfs_b->timer_active to 0 to
force the period timer restart. It's not safe, because
can lead to deadlock, described in commit 927b54fccbf0:
"__start_cfs_bandwidth calls hrtimer_cancel while holding rq->lock,
waiting for the hrtimer to finish. However, if sched_cfs_period_timer
runs for another loop iteration, the hrtimer can attempt to take
rq->lock, resulting in deadlock."

Three CPUs must be involved:

  CPU0               CPU1                         CPU2
  take rq->lock      period timer fired
  ...                take cfs_b lock
  ...                ...                          tg_set_cfs_bandwidth()
  throttle_cfs_rq()  release cfs_b lock           take cfs_b lock
  ...                distribute_cfs_runtime()     timer_active = 0
  take cfs_b->lock   wait for rq->lock            ...
  __start_cfs_bandwidth()
  {wait for timer callback
   break if timer_active == 1}

So, CPU0 and CPU1 are deadlocked.

Instead of resetting cfs_b->timer_active, tg_set_cfs_bandwidth can
wait for period timer callbacks (ignoring cfs_b->timer_active) and
restart the timer explicitly.

Signed-off-by: Roman Gushchin <klamm@yandex-team.ru>
Reviewed-by: Ben Segall <bsegall@google.com>
Signed-off-by: Peter Zijlstra <peterz@infradead.org>
Link: http://lkml.kernel.org/r/87wqdi9g8e.wl\%klamm@yandex-team.ru
Cc: pjt@google.com
Cc: chris.j.arges@canonical.com
Cc: gregkh@linuxfoundation.org
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: Ingo Molnar <mingo@kernel.org>

Showing 3 changed files with 6 additions and 7 deletions Side-by-side Diff

... ... @@ -7751,8 +7751,7 @@
7751 7751 /* restart the period timer (if active) to handle new period expiry */
7752 7752 if (runtime_enabled && cfs_b->timer_active) {
7753 7753 /* force a reprogram */
7754   - cfs_b->timer_active = 0;
7755   - __start_cfs_bandwidth(cfs_b);
  7754 + __start_cfs_bandwidth(cfs_b, true);
7756 7755 }
7757 7756 raw_spin_unlock_irq(&cfs_b->lock);
7758 7757  
... ... @@ -3130,7 +3130,7 @@
3130 3130 */
3131 3131 if (!cfs_b->timer_active) {
3132 3132 __refill_cfs_bandwidth_runtime(cfs_b);
3133   - __start_cfs_bandwidth(cfs_b);
  3133 + __start_cfs_bandwidth(cfs_b, false);
3134 3134 }
3135 3135  
3136 3136 if (cfs_b->runtime > 0) {
... ... @@ -3309,7 +3309,7 @@
3309 3309 raw_spin_lock(&cfs_b->lock);
3310 3310 list_add_tail_rcu(&cfs_rq->throttled_list, &cfs_b->throttled_cfs_rq);
3311 3311 if (!cfs_b->timer_active)
3312   - __start_cfs_bandwidth(cfs_b);
  3312 + __start_cfs_bandwidth(cfs_b, false);
3313 3313 raw_spin_unlock(&cfs_b->lock);
3314 3314 }
3315 3315  
... ... @@ -3691,7 +3691,7 @@
3691 3691 }
3692 3692  
3693 3693 /* requires cfs_b->lock, may release to reprogram timer */
3694   -void __start_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
  3694 +void __start_cfs_bandwidth(struct cfs_bandwidth *cfs_b, bool force)
3695 3695 {
3696 3696 /*
3697 3697 * The timer may be active because we're trying to set a new bandwidth
... ... @@ -3706,7 +3706,7 @@
3706 3706 cpu_relax();
3707 3707 raw_spin_lock(&cfs_b->lock);
3708 3708 /* if someone else restarted the timer then we're done */
3709   - if (cfs_b->timer_active)
  3709 + if (!force && cfs_b->timer_active)
3710 3710 return;
3711 3711 }
3712 3712  
kernel/sched/sched.h
... ... @@ -278,7 +278,7 @@
278 278 extern int sched_group_set_shares(struct task_group *tg, unsigned long shares);
279 279  
280 280 extern void __refill_cfs_bandwidth_runtime(struct cfs_bandwidth *cfs_b);
281   -extern void __start_cfs_bandwidth(struct cfs_bandwidth *cfs_b);
  281 +extern void __start_cfs_bandwidth(struct cfs_bandwidth *cfs_b, bool force);
282 282 extern void unthrottle_cfs_rq(struct cfs_rq *cfs_rq);
283 283  
284 284 extern void free_rt_sched_group(struct task_group *tg);