Commit 8697d33194faae6fdd6b2e799f6308aa00cfdf67
Committed by
Linus Torvalds
1 parent
c7ba5c9e81
Exists in
master
and in
4 other branches
Memory controller: add switch to control what type of pages to limit
Choose if we want cached pages to be accounted or not. By default both are accounted for. A new set of tunables are added. echo -n 1 > mem_control_type switches the accounting to account for only mapped pages echo -n 3 > mem_control_type switches the behaviour back [bunk@kernel.org: mm/memcontrol.c: clenups] [akpm@linux-foundation.org: fix sparc32 build] Signed-off-by: Balbir Singh <balbir@linux.vnet.ibm.com> Cc: Pavel Emelianov <xemul@openvz.org> Cc: Paul Menage <menage@google.com> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: "Eric W. Biederman" <ebiederm@xmission.com> Cc: Nick Piggin <nickpiggin@yahoo.com.au> Cc: Kirill Korotaev <dev@sw.ru> Cc: Herbert Poetzl <herbert@13thfloor.at> Cc: David Rientjes <rientjes@google.com> Cc: Vaidyanathan Srinivasan <svaidy@linux.vnet.ibm.com> Signed-off-by: Adrian Bunk <bunk@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Showing 4 changed files with 106 additions and 5 deletions Side-by-side Diff
include/linux/memcontrol.h
... | ... | @@ -22,6 +22,8 @@ |
22 | 22 | |
23 | 23 | struct mem_cgroup; |
24 | 24 | struct page_cgroup; |
25 | +struct page; | |
26 | +struct mm_struct; | |
25 | 27 | |
26 | 28 | #ifdef CONFIG_CGROUP_MEM_CONT |
27 | 29 | |
... | ... | @@ -40,6 +42,7 @@ |
40 | 42 | struct mem_cgroup *mem_cont, |
41 | 43 | int active); |
42 | 44 | extern void mem_cgroup_out_of_memory(struct mem_cgroup *mem, gfp_t gfp_mask); |
45 | +extern int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm); | |
43 | 46 | |
44 | 47 | static inline void mem_cgroup_uncharge_page(struct page *page) |
45 | 48 | { |
... | ... | @@ -82,6 +85,12 @@ |
82 | 85 | static inline void mem_cgroup_move_lists(struct page_cgroup *pc, |
83 | 86 | bool active) |
84 | 87 | { |
88 | +} | |
89 | + | |
90 | +static inline int mem_cgroup_cache_charge(struct page *page, | |
91 | + struct mm_struct *mm) | |
92 | +{ | |
93 | + return 0; | |
85 | 94 | } |
86 | 95 | |
87 | 96 | #endif /* CONFIG_CGROUP_MEM_CONT */ |
mm/filemap.c
mm/memcontrol.c
... | ... | @@ -29,6 +29,8 @@ |
29 | 29 | #include <linux/spinlock.h> |
30 | 30 | #include <linux/fs.h> |
31 | 31 | |
32 | +#include <asm/uaccess.h> | |
33 | + | |
32 | 34 | struct cgroup_subsys mem_cgroup_subsys; |
33 | 35 | static const int MEM_CGROUP_RECLAIM_RETRIES = 5; |
34 | 36 | |
... | ... | @@ -60,6 +62,7 @@ |
60 | 62 | * spin_lock to protect the per cgroup LRU |
61 | 63 | */ |
62 | 64 | spinlock_t lru_lock; |
65 | + unsigned long control_type; /* control RSS or RSS+Pagecache */ | |
63 | 66 | }; |
64 | 67 | |
65 | 68 | /* |
66 | 69 | |
... | ... | @@ -82,7 +85,16 @@ |
82 | 85 | /* mapped and cached states */ |
83 | 86 | }; |
84 | 87 | |
88 | +enum { | |
89 | + MEM_CGROUP_TYPE_UNSPEC = 0, | |
90 | + MEM_CGROUP_TYPE_MAPPED, | |
91 | + MEM_CGROUP_TYPE_CACHED, | |
92 | + MEM_CGROUP_TYPE_ALL, | |
93 | + MEM_CGROUP_TYPE_MAX, | |
94 | +}; | |
85 | 95 | |
96 | +static struct mem_cgroup init_mem_cgroup; | |
97 | + | |
86 | 98 | static inline |
87 | 99 | struct mem_cgroup *mem_cgroup_from_cont(struct cgroup *cont) |
88 | 100 | { |
89 | 101 | |
90 | 102 | |
... | ... | @@ -139,18 +151,18 @@ |
139 | 151 | (page->page_cgroup & ~PAGE_CGROUP_LOCK); |
140 | 152 | } |
141 | 153 | |
142 | -void __always_inline lock_page_cgroup(struct page *page) | |
154 | +static void __always_inline lock_page_cgroup(struct page *page) | |
143 | 155 | { |
144 | 156 | bit_spin_lock(PAGE_CGROUP_LOCK_BIT, &page->page_cgroup); |
145 | 157 | VM_BUG_ON(!page_cgroup_locked(page)); |
146 | 158 | } |
147 | 159 | |
148 | -void __always_inline unlock_page_cgroup(struct page *page) | |
160 | +static void __always_inline unlock_page_cgroup(struct page *page) | |
149 | 161 | { |
150 | 162 | bit_spin_unlock(PAGE_CGROUP_LOCK_BIT, &page->page_cgroup); |
151 | 163 | } |
152 | 164 | |
153 | -void __mem_cgroup_move_lists(struct page_cgroup *pc, bool active) | |
165 | +static void __mem_cgroup_move_lists(struct page_cgroup *pc, bool active) | |
154 | 166 | { |
155 | 167 | if (active) |
156 | 168 | list_move(&pc->lru, &pc->mem_cgroup->active_list); |
... | ... | @@ -366,6 +378,22 @@ |
366 | 378 | } |
367 | 379 | |
368 | 380 | /* |
381 | + * See if the cached pages should be charged at all? | |
382 | + */ | |
383 | +int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm) | |
384 | +{ | |
385 | + struct mem_cgroup *mem; | |
386 | + if (!mm) | |
387 | + mm = &init_mm; | |
388 | + | |
389 | + mem = rcu_dereference(mm->mem_cgroup); | |
390 | + if (mem->control_type == MEM_CGROUP_TYPE_ALL) | |
391 | + return mem_cgroup_charge(page, mm); | |
392 | + else | |
393 | + return 0; | |
394 | +} | |
395 | + | |
396 | +/* | |
369 | 397 | * Uncharging is always a welcome operation, we never complain, simply |
370 | 398 | * uncharge. |
371 | 399 | */ |
... | ... | @@ -375,6 +403,10 @@ |
375 | 403 | struct page *page; |
376 | 404 | unsigned long flags; |
377 | 405 | |
406 | + /* | |
407 | + * This can handle cases when a page is not charged at all and we | |
408 | + * are switching between handling the control_type. | |
409 | + */ | |
378 | 410 | if (!pc) |
379 | 411 | return; |
380 | 412 | |
... | ... | @@ -425,6 +457,60 @@ |
425 | 457 | mem_cgroup_write_strategy); |
426 | 458 | } |
427 | 459 | |
460 | +static ssize_t mem_control_type_write(struct cgroup *cont, | |
461 | + struct cftype *cft, struct file *file, | |
462 | + const char __user *userbuf, | |
463 | + size_t nbytes, loff_t *pos) | |
464 | +{ | |
465 | + int ret; | |
466 | + char *buf, *end; | |
467 | + unsigned long tmp; | |
468 | + struct mem_cgroup *mem; | |
469 | + | |
470 | + mem = mem_cgroup_from_cont(cont); | |
471 | + buf = kmalloc(nbytes + 1, GFP_KERNEL); | |
472 | + ret = -ENOMEM; | |
473 | + if (buf == NULL) | |
474 | + goto out; | |
475 | + | |
476 | + buf[nbytes] = 0; | |
477 | + ret = -EFAULT; | |
478 | + if (copy_from_user(buf, userbuf, nbytes)) | |
479 | + goto out_free; | |
480 | + | |
481 | + ret = -EINVAL; | |
482 | + tmp = simple_strtoul(buf, &end, 10); | |
483 | + if (*end != '\0') | |
484 | + goto out_free; | |
485 | + | |
486 | + if (tmp <= MEM_CGROUP_TYPE_UNSPEC || tmp >= MEM_CGROUP_TYPE_MAX) | |
487 | + goto out_free; | |
488 | + | |
489 | + mem->control_type = tmp; | |
490 | + ret = nbytes; | |
491 | +out_free: | |
492 | + kfree(buf); | |
493 | +out: | |
494 | + return ret; | |
495 | +} | |
496 | + | |
497 | +static ssize_t mem_control_type_read(struct cgroup *cont, | |
498 | + struct cftype *cft, | |
499 | + struct file *file, char __user *userbuf, | |
500 | + size_t nbytes, loff_t *ppos) | |
501 | +{ | |
502 | + unsigned long val; | |
503 | + char buf[64], *s; | |
504 | + struct mem_cgroup *mem; | |
505 | + | |
506 | + mem = mem_cgroup_from_cont(cont); | |
507 | + s = buf; | |
508 | + val = mem->control_type; | |
509 | + s += sprintf(s, "%lu\n", val); | |
510 | + return simple_read_from_buffer((void __user *)userbuf, nbytes, | |
511 | + ppos, buf, s - buf); | |
512 | +} | |
513 | + | |
428 | 514 | static struct cftype mem_cgroup_files[] = { |
429 | 515 | { |
430 | 516 | .name = "usage_in_bytes", |
... | ... | @@ -442,6 +528,11 @@ |
442 | 528 | .private = RES_FAILCNT, |
443 | 529 | .read = mem_cgroup_read, |
444 | 530 | }, |
531 | + { | |
532 | + .name = "control_type", | |
533 | + .write = mem_control_type_write, | |
534 | + .read = mem_control_type_read, | |
535 | + }, | |
445 | 536 | }; |
446 | 537 | |
447 | 538 | static struct mem_cgroup init_mem_cgroup; |
... | ... | @@ -464,6 +555,7 @@ |
464 | 555 | INIT_LIST_HEAD(&mem->active_list); |
465 | 556 | INIT_LIST_HEAD(&mem->inactive_list); |
466 | 557 | spin_lock_init(&mem->lru_lock); |
558 | + mem->control_type = MEM_CGROUP_TYPE_ALL; | |
467 | 559 | return &mem->css; |
468 | 560 | } |
469 | 561 |