Commit d54d14bfb49f0b61aed9f20cb84cb692566cf83b

Authored by Linus Torvalds

Merge branch 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull scheduler fixes from Ingo Molnar:
 "Four misc fixes: each was deemed serious enough to warrant v3.15
  inclusion"

* 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  sched/fair: Fix tg_set_cfs_bandwidth() deadlock on rq->lock
  sched/dl: Fix race in dl_task_timer()
  sched: Fix sched_policy < 0 comparison
  sched/numa: Fix use of spin_{un}lock_irq() when interrupts are disabled

Showing 4 changed files Side-by-side Diff

... ... @@ -3685,7 +3685,7 @@
3685 3685 if (retval)
3686 3686 return retval;
3687 3687  
3688   - if (attr.sched_policy < 0)
  3688 + if ((int)attr.sched_policy < 0)
3689 3689 return -EINVAL;
3690 3690  
3691 3691 rcu_read_lock();
... ... @@ -7751,8 +7751,7 @@
7751 7751 /* restart the period timer (if active) to handle new period expiry */
7752 7752 if (runtime_enabled && cfs_b->timer_active) {
7753 7753 /* force a reprogram */
7754   - cfs_b->timer_active = 0;
7755   - __start_cfs_bandwidth(cfs_b);
  7754 + __start_cfs_bandwidth(cfs_b, true);
7756 7755 }
7757 7756 raw_spin_unlock_irq(&cfs_b->lock);
7758 7757  
kernel/sched/deadline.c
... ... @@ -513,8 +513,16 @@
513 513 struct sched_dl_entity,
514 514 dl_timer);
515 515 struct task_struct *p = dl_task_of(dl_se);
516   - struct rq *rq = task_rq(p);
  516 + struct rq *rq;
  517 +again:
  518 + rq = task_rq(p);
517 519 raw_spin_lock(&rq->lock);
  520 +
  521 + if (rq != task_rq(p)) {
  522 + /* Task was moved, retrying. */
  523 + raw_spin_unlock(&rq->lock);
  524 + goto again;
  525 + }
518 526  
519 527 /*
520 528 * We need to take care of a possible races here. In fact, the
... ... @@ -1707,18 +1707,19 @@
1707 1707 void task_numa_free(struct task_struct *p)
1708 1708 {
1709 1709 struct numa_group *grp = p->numa_group;
1710   - int i;
1711 1710 void *numa_faults = p->numa_faults_memory;
  1711 + unsigned long flags;
  1712 + int i;
1712 1713  
1713 1714 if (grp) {
1714   - spin_lock_irq(&grp->lock);
  1715 + spin_lock_irqsave(&grp->lock, flags);
1715 1716 for (i = 0; i < NR_NUMA_HINT_FAULT_STATS * nr_node_ids; i++)
1716 1717 grp->faults[i] -= p->numa_faults_memory[i];
1717 1718 grp->total_faults -= p->total_numa_faults;
1718 1719  
1719 1720 list_del(&p->numa_entry);
1720 1721 grp->nr_tasks--;
1721   - spin_unlock_irq(&grp->lock);
  1722 + spin_unlock_irqrestore(&grp->lock, flags);
1722 1723 rcu_assign_pointer(p->numa_group, NULL);
1723 1724 put_numa_group(grp);
1724 1725 }
... ... @@ -3129,7 +3130,7 @@
3129 3130 */
3130 3131 if (!cfs_b->timer_active) {
3131 3132 __refill_cfs_bandwidth_runtime(cfs_b);
3132   - __start_cfs_bandwidth(cfs_b);
  3133 + __start_cfs_bandwidth(cfs_b, false);
3133 3134 }
3134 3135  
3135 3136 if (cfs_b->runtime > 0) {
... ... @@ -3308,7 +3309,7 @@
3308 3309 raw_spin_lock(&cfs_b->lock);
3309 3310 list_add_tail_rcu(&cfs_rq->throttled_list, &cfs_b->throttled_cfs_rq);
3310 3311 if (!cfs_b->timer_active)
3311   - __start_cfs_bandwidth(cfs_b);
  3312 + __start_cfs_bandwidth(cfs_b, false);
3312 3313 raw_spin_unlock(&cfs_b->lock);
3313 3314 }
3314 3315  
... ... @@ -3690,7 +3691,7 @@
3690 3691 }
3691 3692  
3692 3693 /* requires cfs_b->lock, may release to reprogram timer */
3693   -void __start_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
  3694 +void __start_cfs_bandwidth(struct cfs_bandwidth *cfs_b, bool force)
3694 3695 {
3695 3696 /*
3696 3697 * The timer may be active because we're trying to set a new bandwidth
... ... @@ -3705,7 +3706,7 @@
3705 3706 cpu_relax();
3706 3707 raw_spin_lock(&cfs_b->lock);
3707 3708 /* if someone else restarted the timer then we're done */
3708   - if (cfs_b->timer_active)
  3709 + if (!force && cfs_b->timer_active)
3709 3710 return;
3710 3711 }
3711 3712  
kernel/sched/sched.h
... ... @@ -278,7 +278,7 @@
278 278 extern int sched_group_set_shares(struct task_group *tg, unsigned long shares);
279 279  
280 280 extern void __refill_cfs_bandwidth_runtime(struct cfs_bandwidth *cfs_b);
281   -extern void __start_cfs_bandwidth(struct cfs_bandwidth *cfs_b);
  281 +extern void __start_cfs_bandwidth(struct cfs_bandwidth *cfs_b, bool force);
282 282 extern void unthrottle_cfs_rq(struct cfs_rq *cfs_rq);
283 283  
284 284 extern void free_rt_sched_group(struct task_group *tg);