Commit 9985b0bab332289f14837eff3c6e0bcc658b58f7

Authored by David Rientjes
Committed by Ingo Molnar
1 parent 7def2be1dc

sched: prevent bound kthreads from changing cpus_allowed

Kthreads that have called kthread_bind() are bound to specific cpus, so
other tasks should not be able to change their cpus_allowed from under
them.  Otherwise, it is possible to move kthreads, such as the migration
or software watchdog threads, so they are not allowed access to the cpu
they work on.

Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Paul Menage <menage@google.com>
Cc: Paul Jackson <pj@sgi.com>
Signed-off-by: David Rientjes <rientjes@google.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>

Showing 4 changed files with 21 additions and 1 deletions Side-by-side Diff

include/linux/sched.h
... ... @@ -1486,6 +1486,7 @@
1486 1486 #define PF_SWAPWRITE 0x00800000 /* Allowed to write to swap */
1487 1487 #define PF_SPREAD_PAGE 0x01000000 /* Spread page cache over cpuset */
1488 1488 #define PF_SPREAD_SLAB 0x02000000 /* Spread some slab caches over cpuset */
  1489 +#define PF_THREAD_BOUND 0x04000000 /* Thread bound to specific cpu */
1489 1490 #define PF_MEMPOLICY 0x10000000 /* Non-default NUMA mempolicy */
1490 1491 #define PF_MUTEX_TESTER 0x20000000 /* Thread belongs to the rt mutex tester */
1491 1492 #define PF_FREEZER_SKIP 0x40000000 /* Freezer should not count it as freezeable */
... ... @@ -1190,7 +1190,16 @@
1190 1190  
1191 1191 if (cpus_empty(cs->cpus_allowed) || nodes_empty(cs->mems_allowed))
1192 1192 return -ENOSPC;
  1193 + if (tsk->flags & PF_THREAD_BOUND) {
  1194 + cpumask_t mask;
1193 1195  
  1196 + mutex_lock(&callback_mutex);
  1197 + mask = cs->cpus_allowed;
  1198 + mutex_unlock(&callback_mutex);
  1199 + if (!cpus_equal(tsk->cpus_allowed, mask))
  1200 + return -EINVAL;
  1201 + }
  1202 +
1194 1203 return security_task_setscheduler(tsk, 0, NULL);
1195 1204 }
1196 1205  
1197 1206  
1198 1207  
... ... @@ -1203,11 +1212,14 @@
1203 1212 struct mm_struct *mm;
1204 1213 struct cpuset *cs = cgroup_cs(cont);
1205 1214 struct cpuset *oldcs = cgroup_cs(oldcont);
  1215 + int err;
1206 1216  
1207 1217 mutex_lock(&callback_mutex);
1208 1218 guarantee_online_cpus(cs, &cpus);
1209   - set_cpus_allowed_ptr(tsk, &cpus);
  1219 + err = set_cpus_allowed_ptr(tsk, &cpus);
1210 1220 mutex_unlock(&callback_mutex);
  1221 + if (err)
  1222 + return;
1211 1223  
1212 1224 from = oldcs->mems_allowed;
1213 1225 to = cs->mems_allowed;
... ... @@ -180,6 +180,7 @@
180 180 set_task_cpu(k, cpu);
181 181 k->cpus_allowed = cpumask_of_cpu(cpu);
182 182 k->rt.nr_cpus_allowed = 1;
  183 + k->flags |= PF_THREAD_BOUND;
183 184 }
184 185 EXPORT_SYMBOL(kthread_bind);
185 186  
... ... @@ -5563,6 +5563,12 @@
5563 5563 goto out;
5564 5564 }
5565 5565  
  5566 + if (unlikely((p->flags & PF_THREAD_BOUND) && p != current &&
  5567 + !cpus_equal(p->cpus_allowed, *new_mask))) {
  5568 + ret = -EINVAL;
  5569 + goto out;
  5570 + }
  5571 +
5566 5572 if (p->sched_class->set_cpus_allowed)
5567 5573 p->sched_class->set_cpus_allowed(p, new_mask);
5568 5574 else {