Commit c8dad2bb6307f5b00f804a686917105206a4d5c9

Authored by Jan Blunck
Committed by Linus Torvalds
1 parent f817ed4853

memcg: reduce size of mem_cgroup by using nr_cpu_ids

As Jan Blunck <jblunck@suse.de> pointed out, allocating per-cpu stat for
memcg to the size of NR_CPUS is not good.

This patch changes mem_cgroup's cpustat allocation not based on NR_CPUS
but based on nr_cpu_ids.

Reviewed-by: Li Zefan <lizf@cn.fujitsu.com>
Signed-off-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Li Zefan <lizf@cn.fujitsu.com>
Cc: Balbir Singh <balbir@in.ibm.com>
Cc: Pavel Emelyanov <xemul@openvz.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

Showing 1 changed file with 18 additions and 17 deletions Side-by-side Diff

... ... @@ -60,7 +60,7 @@
60 60 } ____cacheline_aligned_in_smp;
61 61  
62 62 struct mem_cgroup_stat {
63   - struct mem_cgroup_stat_cpu cpustat[NR_CPUS];
  63 + struct mem_cgroup_stat_cpu cpustat[0];
64 64 };
65 65  
66 66 /*
67 67  
... ... @@ -129,11 +129,10 @@
129 129  
130 130 int prev_priority; /* for recording reclaim priority */
131 131 /*
132   - * statistics.
  132 + * statistics. This must be placed at the end of memcg.
133 133 */
134 134 struct mem_cgroup_stat stat;
135 135 };
136   -static struct mem_cgroup init_mem_cgroup;
137 136  
138 137 enum charge_type {
139 138 MEM_CGROUP_CHARGE_TYPE_CACHE = 0,
140 139  
141 140  
142 141  
143 142  
144 143  
... ... @@ -1293,23 +1292,30 @@
1293 1292 kfree(mem->info.nodeinfo[node]);
1294 1293 }
1295 1294  
  1295 +static int mem_cgroup_size(void)
  1296 +{
  1297 + int cpustat_size = nr_cpu_ids * sizeof(struct mem_cgroup_stat_cpu);
  1298 + return sizeof(struct mem_cgroup) + cpustat_size;
  1299 +}
  1300 +
1296 1301 static struct mem_cgroup *mem_cgroup_alloc(void)
1297 1302 {
1298 1303 struct mem_cgroup *mem;
  1304 + int size = mem_cgroup_size();
1299 1305  
1300   - if (sizeof(*mem) < PAGE_SIZE)
1301   - mem = kmalloc(sizeof(*mem), GFP_KERNEL);
  1306 + if (size < PAGE_SIZE)
  1307 + mem = kmalloc(size, GFP_KERNEL);
1302 1308 else
1303   - mem = vmalloc(sizeof(*mem));
  1309 + mem = vmalloc(size);
1304 1310  
1305 1311 if (mem)
1306   - memset(mem, 0, sizeof(*mem));
  1312 + memset(mem, 0, size);
1307 1313 return mem;
1308 1314 }
1309 1315  
1310 1316 static void mem_cgroup_free(struct mem_cgroup *mem)
1311 1317 {
1312   - if (sizeof(*mem) < PAGE_SIZE)
  1318 + if (mem_cgroup_size() < PAGE_SIZE)
1313 1319 kfree(mem);
1314 1320 else
1315 1321 vfree(mem);
... ... @@ -1322,13 +1328,9 @@
1322 1328 struct mem_cgroup *mem;
1323 1329 int node;
1324 1330  
1325   - if (unlikely((cont->parent) == NULL)) {
1326   - mem = &init_mem_cgroup;
1327   - } else {
1328   - mem = mem_cgroup_alloc();
1329   - if (!mem)
1330   - return ERR_PTR(-ENOMEM);
1331   - }
  1331 + mem = mem_cgroup_alloc();
  1332 + if (!mem)
  1333 + return ERR_PTR(-ENOMEM);
1332 1334  
1333 1335 res_counter_init(&mem->res);
1334 1336  
... ... @@ -1340,8 +1342,7 @@
1340 1342 free_out:
1341 1343 for_each_node_state(node, N_POSSIBLE)
1342 1344 free_mem_cgroup_per_zone_info(mem, node);
1343   - if (cont->parent != NULL)
1344   - mem_cgroup_free(mem);
  1345 + mem_cgroup_free(mem);
1345 1346 return ERR_PTR(-ENOMEM);
1346 1347 }
1347 1348