Commit 1e1b6c511d1b23cb7c3b619d82fc7bd9f620565d

Authored by KOSAKI Motohiro
Committed by Ingo Molnar
1 parent 1e87623178

cpuset: Fix cpuset_cpus_allowed_fallback(), don't update tsk->rt.nr_cpus_allowed

The rule is, we have to update tsk->rt.nr_cpus_allowed if we change
tsk->cpus_allowed. Otherwise RT scheduler may confuse.

Signed-off-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Cc: Oleg Nesterov <oleg@redhat.com>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Link: http://lkml.kernel.org/r/4DD4B3FA.5060901@jp.fujitsu.com
Signed-off-by: Ingo Molnar <mingo@elte.hu>

Showing 5 changed files with 24 additions and 12 deletions Side-by-side Diff

include/linux/cpuset.h
... ... @@ -146,7 +146,7 @@
146 146  
147 147 static inline int cpuset_cpus_allowed_fallback(struct task_struct *p)
148 148 {
149   - cpumask_copy(&p->cpus_allowed, cpu_possible_mask);
  149 + do_set_cpus_allowed(p, cpu_possible_mask);
150 150 return cpumask_any(cpu_active_mask);
151 151 }
152 152  
include/linux/sched.h
... ... @@ -1841,9 +1841,16 @@
1841 1841 #endif
1842 1842  
1843 1843 #ifdef CONFIG_SMP
  1844 +extern void do_set_cpus_allowed(struct task_struct *p,
  1845 + const struct cpumask *new_mask);
  1846 +
1844 1847 extern int set_cpus_allowed_ptr(struct task_struct *p,
1845 1848 const struct cpumask *new_mask);
1846 1849 #else
  1850 +static inline void do_set_cpus_allowed(struct task_struct *p,
  1851 + const struct cpumask *new_mask)
  1852 +{
  1853 +}
1847 1854 static inline int set_cpus_allowed_ptr(struct task_struct *p,
1848 1855 const struct cpumask *new_mask)
1849 1856 {
... ... @@ -2190,7 +2190,7 @@
2190 2190 rcu_read_lock();
2191 2191 cs = task_cs(tsk);
2192 2192 if (cs)
2193   - cpumask_copy(&tsk->cpus_allowed, cs->cpus_allowed);
  2193 + do_set_cpus_allowed(tsk, cs->cpus_allowed);
2194 2194 rcu_read_unlock();
2195 2195  
2196 2196 /*
... ... @@ -2217,7 +2217,7 @@
2217 2217 * Like above we can temporary set any mask and rely on
2218 2218 * set_cpus_allowed_ptr() as synchronization point.
2219 2219 */
2220   - cpumask_copy(&tsk->cpus_allowed, cpu_possible_mask);
  2220 + do_set_cpus_allowed(tsk, cpu_possible_mask);
2221 2221 cpu = cpumask_any(cpu_active_mask);
2222 2222 }
2223 2223  
... ... @@ -202,8 +202,8 @@
202 202 return;
203 203 }
204 204  
205   - p->cpus_allowed = cpumask_of_cpu(cpu);
206   - p->rt.nr_cpus_allowed = 1;
  205 + /* It's safe because the task is inactive. */
  206 + do_set_cpus_allowed(p, cpumask_of(cpu));
207 207 p->flags |= PF_THREAD_BOUND;
208 208 }
209 209 EXPORT_SYMBOL(kthread_bind);
... ... @@ -5860,7 +5860,7 @@
5860 5860 idle->state = TASK_RUNNING;
5861 5861 idle->se.exec_start = sched_clock();
5862 5862  
5863   - cpumask_copy(&idle->cpus_allowed, cpumask_of(cpu));
  5863 + do_set_cpus_allowed(idle, cpumask_of(cpu));
5864 5864 /*
5865 5865 * We're having a chicken and egg problem, even though we are
5866 5866 * holding rq->lock, the cpu isn't yet set to this cpu so the
... ... @@ -5948,6 +5948,16 @@
5948 5948 }
5949 5949  
5950 5950 #ifdef CONFIG_SMP
  5951 +void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
  5952 +{
  5953 + if (p->sched_class && p->sched_class->set_cpus_allowed)
  5954 + p->sched_class->set_cpus_allowed(p, new_mask);
  5955 + else {
  5956 + cpumask_copy(&p->cpus_allowed, new_mask);
  5957 + p->rt.nr_cpus_allowed = cpumask_weight(new_mask);
  5958 + }
  5959 +}
  5960 +
5951 5961 /*
5952 5962 * This is how migration works:
5953 5963 *
... ... @@ -5993,12 +6003,7 @@
5993 6003 goto out;
5994 6004 }
5995 6005  
5996   - if (p->sched_class->set_cpus_allowed)
5997   - p->sched_class->set_cpus_allowed(p, new_mask);
5998   - else {
5999   - cpumask_copy(&p->cpus_allowed, new_mask);
6000   - p->rt.nr_cpus_allowed = cpumask_weight(new_mask);
6001   - }
  6006 + do_set_cpus_allowed(p, new_mask);
6002 6007  
6003 6008 /* Can the task run on the task's current CPU? If so, we're done */
6004 6009 if (cpumask_test_cpu(task_cpu(p), new_mask))