Commit f8d665422603ee1b8ed04dcad4242f14d623c941

Authored by Hirokazu Takahashi
Committed by Linus Torvalds
1 parent 08e552c69c

memcg: add mem_cgroup_disabled()

We check mem_cgroup is disabled or not by checking
mem_cgroup_subsys.disabled.  I think it has more references than expected,
now.

replacing
   if (mem_cgroup_subsys.disabled)
with
   if (mem_cgroup_disabled())

give us good look, I think.

[kamezawa.hiroyu@jp.fujitsu.com: fix typo]
Signed-off-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Li Zefan <lizf@cn.fujitsu.com>
Cc: Balbir Singh <balbir@in.ibm.com>
Cc: Pavel Emelyanov <xemul@openvz.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

Showing 3 changed files with 30 additions and 17 deletions Side-by-side Diff

include/linux/memcontrol.h
... ... @@ -19,7 +19,7 @@
19 19  
20 20 #ifndef _LINUX_MEMCONTROL_H
21 21 #define _LINUX_MEMCONTROL_H
22   -
  22 +#include <linux/cgroup.h>
23 23 struct mem_cgroup;
24 24 struct page_cgroup;
25 25 struct page;
... ... @@ -87,6 +87,14 @@
87 87 #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
88 88 extern int do_swap_account;
89 89 #endif
  90 +
  91 +static inline bool mem_cgroup_disabled(void)
  92 +{
  93 + if (mem_cgroup_subsys.disabled)
  94 + return true;
  95 + return false;
  96 +}
  97 +
90 98 #else /* CONFIG_CGROUP_MEM_RES_CTLR */
91 99 struct mem_cgroup;
92 100  
... ... @@ -213,6 +221,11 @@
213 221 enum lru_list lru)
214 222 {
215 223 return 0;
  224 +}
  225 +
  226 +static inline bool mem_cgroup_disabled(void)
  227 +{
  228 + return true;
216 229 }
217 230 #endif /* CONFIG_CGROUP_MEM_CONT */
218 231  
... ... @@ -279,7 +279,7 @@
279 279 struct mem_cgroup *mem;
280 280 struct mem_cgroup_per_zone *mz;
281 281  
282   - if (mem_cgroup_subsys.disabled)
  282 + if (mem_cgroup_disabled())
283 283 return;
284 284 pc = lookup_page_cgroup(page);
285 285 /* can happen while we handle swapcache. */
... ... @@ -302,7 +302,7 @@
302 302 struct mem_cgroup_per_zone *mz;
303 303 struct page_cgroup *pc;
304 304  
305   - if (mem_cgroup_subsys.disabled)
  305 + if (mem_cgroup_disabled())
306 306 return;
307 307  
308 308 pc = lookup_page_cgroup(page);
... ... @@ -319,7 +319,7 @@
319 319 struct page_cgroup *pc;
320 320 struct mem_cgroup_per_zone *mz;
321 321  
322   - if (mem_cgroup_subsys.disabled)
  322 + if (mem_cgroup_disabled())
323 323 return;
324 324 pc = lookup_page_cgroup(page);
325 325 /* barrier to sync with "charge" */
... ... @@ -344,7 +344,7 @@
344 344 void mem_cgroup_move_lists(struct page *page,
345 345 enum lru_list from, enum lru_list to)
346 346 {
347   - if (mem_cgroup_subsys.disabled)
  347 + if (mem_cgroup_disabled())
348 348 return;
349 349 mem_cgroup_del_lru_list(page, from);
350 350 mem_cgroup_add_lru_list(page, to);
... ... @@ -731,7 +731,7 @@
731 731 int mem_cgroup_newpage_charge(struct page *page,
732 732 struct mm_struct *mm, gfp_t gfp_mask)
733 733 {
734   - if (mem_cgroup_subsys.disabled)
  734 + if (mem_cgroup_disabled())
735 735 return 0;
736 736 if (PageCompound(page))
737 737 return 0;
... ... @@ -753,7 +753,7 @@
753 753 int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm,
754 754 gfp_t gfp_mask)
755 755 {
756   - if (mem_cgroup_subsys.disabled)
  756 + if (mem_cgroup_disabled())
757 757 return 0;
758 758 if (PageCompound(page))
759 759 return 0;
... ... @@ -799,7 +799,7 @@
799 799 struct mem_cgroup *mem;
800 800 swp_entry_t ent;
801 801  
802   - if (mem_cgroup_subsys.disabled)
  802 + if (mem_cgroup_disabled())
803 803 return 0;
804 804  
805 805 if (!do_swap_account)
... ... @@ -833,7 +833,7 @@
833 833 {
834 834 int ret = 0;
835 835  
836   - if (mem_cgroup_subsys.disabled)
  836 + if (mem_cgroup_disabled())
837 837 return 0;
838 838 if (unlikely(!mm))
839 839 mm = &init_mm;
... ... @@ -880,7 +880,7 @@
880 880 {
881 881 struct page_cgroup *pc;
882 882  
883   - if (mem_cgroup_subsys.disabled)
  883 + if (mem_cgroup_disabled())
884 884 return;
885 885 if (!ptr)
886 886 return;
... ... @@ -909,7 +909,7 @@
909 909  
910 910 void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *mem)
911 911 {
912   - if (mem_cgroup_subsys.disabled)
  912 + if (mem_cgroup_disabled())
913 913 return;
914 914 if (!mem)
915 915 return;
... ... @@ -930,7 +930,7 @@
930 930 struct mem_cgroup *mem = NULL;
931 931 struct mem_cgroup_per_zone *mz;
932 932  
933   - if (mem_cgroup_subsys.disabled)
  933 + if (mem_cgroup_disabled())
934 934 return NULL;
935 935  
936 936 if (PageSwapCache(page))
... ... @@ -1049,7 +1049,7 @@
1049 1049 struct mem_cgroup *mem = NULL;
1050 1050 int ret = 0;
1051 1051  
1052   - if (mem_cgroup_subsys.disabled)
  1052 + if (mem_cgroup_disabled())
1053 1053 return 0;
1054 1054  
1055 1055 pc = lookup_page_cgroup(page);
... ... @@ -1131,7 +1131,7 @@
1131 1131 int progress = 0;
1132 1132 int retry = MEM_CGROUP_RECLAIM_RETRIES;
1133 1133  
1134   - if (mem_cgroup_subsys.disabled)
  1134 + if (mem_cgroup_disabled())
1135 1135 return 0;
1136 1136 if (!mm)
1137 1137 return 0;
... ... @@ -1697,7 +1697,7 @@
1697 1697 #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
1698 1698 static void __init enable_swap_cgroup(void)
1699 1699 {
1700   - if (!mem_cgroup_subsys.disabled && really_do_swap_account)
  1700 + if (!mem_cgroup_disabled() && really_do_swap_account)
1701 1701 do_swap_account = 1;
1702 1702 }
1703 1703 #else
... ... @@ -74,7 +74,7 @@
74 74  
75 75 int nid, fail;
76 76  
77   - if (mem_cgroup_subsys.disabled)
  77 + if (mem_cgroup_disabled())
78 78 return;
79 79  
80 80 for_each_online_node(nid) {
... ... @@ -247,7 +247,7 @@
247 247 unsigned long pfn;
248 248 int fail = 0;
249 249  
250   - if (mem_cgroup_subsys.disabled)
  250 + if (mem_cgroup_disabled())
251 251 return;
252 252  
253 253 for (pfn = 0; !fail && pfn < max_pfn; pfn += PAGES_PER_SECTION) {