Commit d8046582d5ee24448800e71c6933fdb6813aa062
Committed by
Linus Torvalds
1 parent
cdec2e4265
Exists in
master
and in
7 other branches
memcg: make memcg's file mapped consistent with global VM
In global VM, FILE_MAPPED is used but memcg uses MAPPED_FILE. This makes grep difficult. Replace memcg's MAPPED_FILE with FILE_MAPPED And in global VM, mapped shared memory is accounted into FILE_MAPPED. But memcg doesn't. fix it. Note: page_is_file_cache() just checks SwapBacked or not. So, we need to check PageAnon. Cc: Balbir Singh <balbir@in.ibm.com> Reviewed-by: Daisuke Nishimura <nishimura@mxp.nes.nec.co.jp> Signed-off-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Showing 3 changed files with 13 additions and 16 deletions Side-by-side Diff
include/linux/memcontrol.h
... | ... | @@ -122,7 +122,7 @@ |
122 | 122 | } |
123 | 123 | |
124 | 124 | extern bool mem_cgroup_oom_called(struct task_struct *task); |
125 | -void mem_cgroup_update_mapped_file_stat(struct page *page, int val); | |
125 | +void mem_cgroup_update_file_mapped(struct page *page, int val); | |
126 | 126 | unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order, |
127 | 127 | gfp_t gfp_mask, int nid, |
128 | 128 | int zid); |
... | ... | @@ -287,7 +287,7 @@ |
287 | 287 | { |
288 | 288 | } |
289 | 289 | |
290 | -static inline void mem_cgroup_update_mapped_file_stat(struct page *page, | |
290 | +static inline void mem_cgroup_update_file_mapped(struct page *page, | |
291 | 291 | int val) |
292 | 292 | { |
293 | 293 | } |
mm/memcontrol.c
... | ... | @@ -67,7 +67,7 @@ |
67 | 67 | */ |
68 | 68 | MEM_CGROUP_STAT_CACHE, /* # of pages charged as cache */ |
69 | 69 | MEM_CGROUP_STAT_RSS, /* # of pages charged as anon rss */ |
70 | - MEM_CGROUP_STAT_MAPPED_FILE, /* # of pages charged as file rss */ | |
70 | + MEM_CGROUP_STAT_FILE_MAPPED, /* # of pages charged as file rss */ | |
71 | 71 | MEM_CGROUP_STAT_PGPGIN_COUNT, /* # of pages paged in */ |
72 | 72 | MEM_CGROUP_STAT_PGPGOUT_COUNT, /* # of pages paged out */ |
73 | 73 | MEM_CGROUP_STAT_EVENTS, /* sum of pagein + pageout for internal use */ |
... | ... | @@ -1227,7 +1227,7 @@ |
1227 | 1227 | * Currently used to update mapped file statistics, but the routine can be |
1228 | 1228 | * generalized to update other statistics as well. |
1229 | 1229 | */ |
1230 | -void mem_cgroup_update_mapped_file_stat(struct page *page, int val) | |
1230 | +void mem_cgroup_update_file_mapped(struct page *page, int val) | |
1231 | 1231 | { |
1232 | 1232 | struct mem_cgroup *mem; |
1233 | 1233 | struct mem_cgroup_stat *stat; |
... | ... | @@ -1235,9 +1235,6 @@ |
1235 | 1235 | int cpu; |
1236 | 1236 | struct page_cgroup *pc; |
1237 | 1237 | |
1238 | - if (!page_is_file_cache(page)) | |
1239 | - return; | |
1240 | - | |
1241 | 1238 | pc = lookup_page_cgroup(page); |
1242 | 1239 | if (unlikely(!pc)) |
1243 | 1240 | return; |
... | ... | @@ -1257,7 +1254,7 @@ |
1257 | 1254 | stat = &mem->stat; |
1258 | 1255 | cpustat = &stat->cpustat[cpu]; |
1259 | 1256 | |
1260 | - __mem_cgroup_stat_add_safe(cpustat, MEM_CGROUP_STAT_MAPPED_FILE, val); | |
1257 | + __mem_cgroup_stat_add_safe(cpustat, MEM_CGROUP_STAT_FILE_MAPPED, val); | |
1261 | 1258 | done: |
1262 | 1259 | unlock_page_cgroup(pc); |
1263 | 1260 | } |
1264 | 1261 | |
1265 | 1262 | |
... | ... | @@ -1654,18 +1651,18 @@ |
1654 | 1651 | mem_cgroup_charge_statistics(from, pc, false); |
1655 | 1652 | |
1656 | 1653 | page = pc->page; |
1657 | - if (page_is_file_cache(page) && page_mapped(page)) { | |
1654 | + if (page_mapped(page) && !PageAnon(page)) { | |
1658 | 1655 | cpu = smp_processor_id(); |
1659 | 1656 | /* Update mapped_file data for mem_cgroup "from" */ |
1660 | 1657 | stat = &from->stat; |
1661 | 1658 | cpustat = &stat->cpustat[cpu]; |
1662 | - __mem_cgroup_stat_add_safe(cpustat, MEM_CGROUP_STAT_MAPPED_FILE, | |
1659 | + __mem_cgroup_stat_add_safe(cpustat, MEM_CGROUP_STAT_FILE_MAPPED, | |
1663 | 1660 | -1); |
1664 | 1661 | |
1665 | 1662 | /* Update mapped_file data for mem_cgroup "to" */ |
1666 | 1663 | stat = &to->stat; |
1667 | 1664 | cpustat = &stat->cpustat[cpu]; |
1668 | - __mem_cgroup_stat_add_safe(cpustat, MEM_CGROUP_STAT_MAPPED_FILE, | |
1665 | + __mem_cgroup_stat_add_safe(cpustat, MEM_CGROUP_STAT_FILE_MAPPED, | |
1669 | 1666 | 1); |
1670 | 1667 | } |
1671 | 1668 | |
... | ... | @@ -2889,7 +2886,7 @@ |
2889 | 2886 | enum { |
2890 | 2887 | MCS_CACHE, |
2891 | 2888 | MCS_RSS, |
2892 | - MCS_MAPPED_FILE, | |
2889 | + MCS_FILE_MAPPED, | |
2893 | 2890 | MCS_PGPGIN, |
2894 | 2891 | MCS_PGPGOUT, |
2895 | 2892 | MCS_SWAP, |
... | ... | @@ -2933,8 +2930,8 @@ |
2933 | 2930 | s->stat[MCS_CACHE] += val * PAGE_SIZE; |
2934 | 2931 | val = mem_cgroup_read_stat(&mem->stat, MEM_CGROUP_STAT_RSS); |
2935 | 2932 | s->stat[MCS_RSS] += val * PAGE_SIZE; |
2936 | - val = mem_cgroup_read_stat(&mem->stat, MEM_CGROUP_STAT_MAPPED_FILE); | |
2937 | - s->stat[MCS_MAPPED_FILE] += val * PAGE_SIZE; | |
2933 | + val = mem_cgroup_read_stat(&mem->stat, MEM_CGROUP_STAT_FILE_MAPPED); | |
2934 | + s->stat[MCS_FILE_MAPPED] += val * PAGE_SIZE; | |
2938 | 2935 | val = mem_cgroup_read_stat(&mem->stat, MEM_CGROUP_STAT_PGPGIN_COUNT); |
2939 | 2936 | s->stat[MCS_PGPGIN] += val; |
2940 | 2937 | val = mem_cgroup_read_stat(&mem->stat, MEM_CGROUP_STAT_PGPGOUT_COUNT); |
mm/rmap.c
... | ... | @@ -721,7 +721,7 @@ |
721 | 721 | { |
722 | 722 | if (atomic_inc_and_test(&page->_mapcount)) { |
723 | 723 | __inc_zone_page_state(page, NR_FILE_MAPPED); |
724 | - mem_cgroup_update_mapped_file_stat(page, 1); | |
724 | + mem_cgroup_update_file_mapped(page, 1); | |
725 | 725 | } |
726 | 726 | } |
727 | 727 | |
728 | 728 | |
... | ... | @@ -753,8 +753,8 @@ |
753 | 753 | __dec_zone_page_state(page, NR_ANON_PAGES); |
754 | 754 | } else { |
755 | 755 | __dec_zone_page_state(page, NR_FILE_MAPPED); |
756 | + mem_cgroup_update_file_mapped(page, -1); | |
756 | 757 | } |
757 | - mem_cgroup_update_mapped_file_stat(page, -1); | |
758 | 758 | /* |
759 | 759 | * It would be tidy to reset the PageAnon mapping here, |
760 | 760 | * but that might overwrite a racing page_add_anon_rmap |