Commit ee24d3797780eee6ffe581a7b78d27896f9b494a

Authored by Li Zefan
Committed by Linus Torvalds
1 parent c8163ca8af

cpuset: fix unchecked calls to NODEMASK_ALLOC()

Those functions that use NODEMASK_ALLOC() can't propagate errno
to users, but will fail silently.

Fix it by using a static nodemask_t variable for each function, and
those variables are protected by cgroup_mutex;

[akpm@linux-foundation.org: fix comment spelling, strengthen cgroup_lock comment]
Signed-off-by: Li Zefan <lizf@cn.fujitsu.com>
Cc: Paul Menage <menage@google.com>
Acked-by: David Rientjes <rientjes@google.com>
Cc: Miao Xie <miaox@cn.fujitsu.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

Showing 1 changed file with 16 additions and 35 deletions Side-by-side Diff

... ... @@ -1015,18 +1015,13 @@
1015 1015 struct cpuset *cs;
1016 1016 int migrate;
1017 1017 const nodemask_t *oldmem = scan->data;
1018   - NODEMASK_ALLOC(nodemask_t, newmems, GFP_KERNEL);
  1018 + static nodemask_t newmems; /* protected by cgroup_mutex */
1019 1019  
1020   - if (!newmems)
1021   - return;
1022   -
1023 1020 cs = cgroup_cs(scan->cg);
1024   - guarantee_online_mems(cs, newmems);
  1021 + guarantee_online_mems(cs, &newmems);
1025 1022  
1026   - cpuset_change_task_nodemask(p, newmems);
  1023 + cpuset_change_task_nodemask(p, &newmems);
1027 1024  
1028   - NODEMASK_FREE(newmems);
1029   -
1030 1025 mm = get_task_mm(p);
1031 1026 if (!mm)
1032 1027 return;
1033 1028  
1034 1029  
1035 1030  
1036 1031  
1037 1032  
1038 1033  
1039 1034  
1040 1035  
... ... @@ -1438,41 +1433,35 @@
1438 1433 struct mm_struct *mm;
1439 1434 struct cpuset *cs = cgroup_cs(cont);
1440 1435 struct cpuset *oldcs = cgroup_cs(oldcont);
1441   - NODEMASK_ALLOC(nodemask_t, to, GFP_KERNEL);
  1436 + static nodemask_t to; /* protected by cgroup_mutex */
1442 1437  
1443   - if (to == NULL)
1444   - goto alloc_fail;
1445   -
1446 1438 if (cs == &top_cpuset) {
1447 1439 cpumask_copy(cpus_attach, cpu_possible_mask);
1448 1440 } else {
1449 1441 guarantee_online_cpus(cs, cpus_attach);
1450 1442 }
1451   - guarantee_online_mems(cs, to);
  1443 + guarantee_online_mems(cs, &to);
1452 1444  
1453 1445 /* do per-task migration stuff possibly for each in the threadgroup */
1454   - cpuset_attach_task(tsk, to, cs);
  1446 + cpuset_attach_task(tsk, &to, cs);
1455 1447 if (threadgroup) {
1456 1448 struct task_struct *c;
1457 1449 rcu_read_lock();
1458 1450 list_for_each_entry_rcu(c, &tsk->thread_group, thread_group) {
1459   - cpuset_attach_task(c, to, cs);
  1451 + cpuset_attach_task(c, &to, cs);
1460 1452 }
1461 1453 rcu_read_unlock();
1462 1454 }
1463 1455  
1464 1456 /* change mm; only needs to be done once even if threadgroup */
1465   - *to = cs->mems_allowed;
  1457 + to = cs->mems_allowed;
1466 1458 mm = get_task_mm(tsk);
1467 1459 if (mm) {
1468   - mpol_rebind_mm(mm, to);
  1460 + mpol_rebind_mm(mm, &to);
1469 1461 if (is_memory_migrate(cs))
1470   - cpuset_migrate_mm(mm, &oldcs->mems_allowed, to);
  1462 + cpuset_migrate_mm(mm, &oldcs->mems_allowed, &to);
1471 1463 mmput(mm);
1472 1464 }
1473   -
1474   -alloc_fail:
1475   - NODEMASK_FREE(to);
1476 1465 }
1477 1466  
1478 1467 /* The various types of files and directories in a cpuset file system */
1479 1468  
... ... @@ -2055,11 +2044,8 @@
2055 2044 struct cpuset *cp; /* scans cpusets being updated */
2056 2045 struct cpuset *child; /* scans child cpusets of cp */
2057 2046 struct cgroup *cont;
2058   - NODEMASK_ALLOC(nodemask_t, oldmems, GFP_KERNEL);
  2047 + static nodemask_t oldmems; /* protected by cgroup_mutex */
2059 2048  
2060   - if (oldmems == NULL)
2061   - return;
2062   -
2063 2049 list_add_tail((struct list_head *)&root->stack_list, &queue);
2064 2050  
2065 2051 while (!list_empty(&queue)) {
... ... @@ -2075,7 +2061,7 @@
2075 2061 nodes_subset(cp->mems_allowed, node_states[N_HIGH_MEMORY]))
2076 2062 continue;
2077 2063  
2078   - *oldmems = cp->mems_allowed;
  2064 + oldmems = cp->mems_allowed;
2079 2065  
2080 2066 /* Remove offline cpus and mems from this cpuset. */
2081 2067 mutex_lock(&callback_mutex);
2082 2068  
... ... @@ -2091,10 +2077,9 @@
2091 2077 remove_tasks_in_empty_cpuset(cp);
2092 2078 else {
2093 2079 update_tasks_cpumask(cp, NULL);
2094   - update_tasks_nodemask(cp, oldmems, NULL);
  2080 + update_tasks_nodemask(cp, &oldmems, NULL);
2095 2081 }
2096 2082 }
2097   - NODEMASK_FREE(oldmems);
2098 2083 }
2099 2084  
2100 2085 /*
2101 2086  
2102 2087  
2103 2088  
... ... @@ -2136,19 +2121,16 @@
2136 2121 static int cpuset_track_online_nodes(struct notifier_block *self,
2137 2122 unsigned long action, void *arg)
2138 2123 {
2139   - NODEMASK_ALLOC(nodemask_t, oldmems, GFP_KERNEL);
  2124 + static nodemask_t oldmems; /* protected by cgroup_mutex */
2140 2125  
2141   - if (oldmems == NULL)
2142   - return NOTIFY_DONE;
2143   -
2144 2126 cgroup_lock();
2145 2127 switch (action) {
2146 2128 case MEM_ONLINE:
2147   - *oldmems = top_cpuset.mems_allowed;
  2129 + oldmems = top_cpuset.mems_allowed;
2148 2130 mutex_lock(&callback_mutex);
2149 2131 top_cpuset.mems_allowed = node_states[N_HIGH_MEMORY];
2150 2132 mutex_unlock(&callback_mutex);
2151   - update_tasks_nodemask(&top_cpuset, oldmems, NULL);
  2133 + update_tasks_nodemask(&top_cpuset, &oldmems, NULL);
2152 2134 break;
2153 2135 case MEM_OFFLINE:
2154 2136 /*
... ... @@ -2162,7 +2144,6 @@
2162 2144 }
2163 2145 cgroup_unlock();
2164 2146  
2165   - NODEMASK_FREE(oldmems);
2166 2147 return NOTIFY_OK;
2167 2148 }
2168 2149 #endif