Commit c05555b572921c464d064d9267f7f7bc06d424fa

Authored by KAMEZAWA Hiroyuki
Committed by Linus Torvalds
1 parent addb9efebb

memcg: atomic ops for page_cgroup->flags

This patch makes page_cgroup->flags to be atomic_ops and define functions
(and macros) to access it.

Before trying to modify memory resource controller, this atomic operation
on flags is necessary.  Most of flags in this patch is for LRU and modfied
under mz->lru_lock but we'll add another flags which is not for LRU soon.
For example, we'll place LOCK bit on flags field.  We need atomic
operation to modify LRU bit without LOCK.

Signed-off-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Acked-by: Balbir Singh <balbir@linux.vnet.ibm.com>
Cc: Daisuke Nishimura <nishimura@mxp.nes.nec.co.jp>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

Showing 1 changed file with 82 additions and 40 deletions Side-by-side Diff

... ... @@ -157,13 +157,47 @@
157 157 struct list_head lru; /* per cgroup LRU list */
158 158 struct page *page;
159 159 struct mem_cgroup *mem_cgroup;
160   - int flags;
  160 + unsigned long flags;
161 161 };
162   -#define PAGE_CGROUP_FLAG_CACHE (0x1) /* charged as cache */
163   -#define PAGE_CGROUP_FLAG_ACTIVE (0x2) /* page is active in this cgroup */
164   -#define PAGE_CGROUP_FLAG_FILE (0x4) /* page is file system backed */
165   -#define PAGE_CGROUP_FLAG_UNEVICTABLE (0x8) /* page is unevictableable */
166 162  
  163 +enum {
  164 + /* flags for mem_cgroup */
  165 + PCG_CACHE, /* charged as cache */
  166 + /* flags for LRU placement */
  167 + PCG_ACTIVE, /* page is active in this cgroup */
  168 + PCG_FILE, /* page is file system backed */
  169 + PCG_UNEVICTABLE, /* page is unevictableable */
  170 +};
  171 +
  172 +#define TESTPCGFLAG(uname, lname) \
  173 +static inline int PageCgroup##uname(struct page_cgroup *pc) \
  174 + { return test_bit(PCG_##lname, &pc->flags); }
  175 +
  176 +#define SETPCGFLAG(uname, lname) \
  177 +static inline void SetPageCgroup##uname(struct page_cgroup *pc)\
  178 + { set_bit(PCG_##lname, &pc->flags); }
  179 +
  180 +#define CLEARPCGFLAG(uname, lname) \
  181 +static inline void ClearPageCgroup##uname(struct page_cgroup *pc) \
  182 + { clear_bit(PCG_##lname, &pc->flags); }
  183 +
  184 +
  185 +/* Cache flag is set only once (at allocation) */
  186 +TESTPCGFLAG(Cache, CACHE)
  187 +
  188 +/* LRU management flags (from global-lru definition) */
  189 +TESTPCGFLAG(File, FILE)
  190 +SETPCGFLAG(File, FILE)
  191 +CLEARPCGFLAG(File, FILE)
  192 +
  193 +TESTPCGFLAG(Active, ACTIVE)
  194 +SETPCGFLAG(Active, ACTIVE)
  195 +CLEARPCGFLAG(Active, ACTIVE)
  196 +
  197 +TESTPCGFLAG(Unevictable, UNEVICTABLE)
  198 +SETPCGFLAG(Unevictable, UNEVICTABLE)
  199 +CLEARPCGFLAG(Unevictable, UNEVICTABLE)
  200 +
167 201 static int page_cgroup_nid(struct page_cgroup *pc)
168 202 {
169 203 return page_to_nid(pc->page);
170 204  
171 205  
172 206  
... ... @@ -177,15 +211,25 @@
177 211 enum charge_type {
178 212 MEM_CGROUP_CHARGE_TYPE_CACHE = 0,
179 213 MEM_CGROUP_CHARGE_TYPE_MAPPED,
180   - MEM_CGROUP_CHARGE_TYPE_FORCE, /* used by force_empty */
181 214 MEM_CGROUP_CHARGE_TYPE_SHMEM, /* used by page migration of shmem */
  215 + MEM_CGROUP_CHARGE_TYPE_FORCE, /* used by force_empty */
  216 + NR_CHARGE_TYPE,
182 217 };
183 218  
  219 +static const unsigned long
  220 +pcg_default_flags[NR_CHARGE_TYPE] = {
  221 + ((1 << PCG_CACHE) | (1 << PCG_FILE)),
  222 + ((1 << PCG_ACTIVE)),
  223 + ((1 << PCG_ACTIVE) | (1 << PCG_CACHE)),
  224 + 0,
  225 +};
  226 +
184 227 /*
185 228 * Always modified under lru lock. Then, not necessary to preempt_disable()
186 229 */
187   -static void mem_cgroup_charge_statistics(struct mem_cgroup *mem, int flags,
188   - bool charge)
  230 +static void mem_cgroup_charge_statistics(struct mem_cgroup *mem,
  231 + struct page_cgroup *pc,
  232 + bool charge)
189 233 {
190 234 int val = (charge)? 1 : -1;
191 235 struct mem_cgroup_stat *stat = &mem->stat;
... ... @@ -194,7 +238,7 @@
194 238 VM_BUG_ON(!irqs_disabled());
195 239  
196 240 cpustat = &stat->cpustat[smp_processor_id()];
197   - if (flags & PAGE_CGROUP_FLAG_CACHE)
  241 + if (PageCgroupCache(pc))
198 242 __mem_cgroup_stat_add_safe(cpustat, MEM_CGROUP_STAT_CACHE, val);
199 243 else
200 244 __mem_cgroup_stat_add_safe(cpustat, MEM_CGROUP_STAT_RSS, val);
201 245  
202 246  
203 247  
... ... @@ -295,18 +339,18 @@
295 339 {
296 340 int lru = LRU_BASE;
297 341  
298   - if (pc->flags & PAGE_CGROUP_FLAG_UNEVICTABLE)
  342 + if (PageCgroupUnevictable(pc))
299 343 lru = LRU_UNEVICTABLE;
300 344 else {
301   - if (pc->flags & PAGE_CGROUP_FLAG_ACTIVE)
  345 + if (PageCgroupActive(pc))
302 346 lru += LRU_ACTIVE;
303   - if (pc->flags & PAGE_CGROUP_FLAG_FILE)
  347 + if (PageCgroupFile(pc))
304 348 lru += LRU_FILE;
305 349 }
306 350  
307 351 MEM_CGROUP_ZSTAT(mz, lru) -= 1;
308 352  
309   - mem_cgroup_charge_statistics(pc->mem_cgroup, pc->flags, false);
  353 + mem_cgroup_charge_statistics(pc->mem_cgroup, pc, false);
310 354 list_del(&pc->lru);
311 355 }
312 356  
313 357  
314 358  
315 359  
316 360  
... ... @@ -315,27 +359,27 @@
315 359 {
316 360 int lru = LRU_BASE;
317 361  
318   - if (pc->flags & PAGE_CGROUP_FLAG_UNEVICTABLE)
  362 + if (PageCgroupUnevictable(pc))
319 363 lru = LRU_UNEVICTABLE;
320 364 else {
321   - if (pc->flags & PAGE_CGROUP_FLAG_ACTIVE)
  365 + if (PageCgroupActive(pc))
322 366 lru += LRU_ACTIVE;
323   - if (pc->flags & PAGE_CGROUP_FLAG_FILE)
  367 + if (PageCgroupFile(pc))
324 368 lru += LRU_FILE;
325 369 }
326 370  
327 371 MEM_CGROUP_ZSTAT(mz, lru) += 1;
328 372 list_add(&pc->lru, &mz->lists[lru]);
329 373  
330   - mem_cgroup_charge_statistics(pc->mem_cgroup, pc->flags, true);
  374 + mem_cgroup_charge_statistics(pc->mem_cgroup, pc, true);
331 375 }
332 376  
333 377 static void __mem_cgroup_move_lists(struct page_cgroup *pc, enum lru_list lru)
334 378 {
335 379 struct mem_cgroup_per_zone *mz = page_cgroup_zoneinfo(pc);
336   - int active = pc->flags & PAGE_CGROUP_FLAG_ACTIVE;
337   - int file = pc->flags & PAGE_CGROUP_FLAG_FILE;
338   - int unevictable = pc->flags & PAGE_CGROUP_FLAG_UNEVICTABLE;
  380 + int active = PageCgroupActive(pc);
  381 + int file = PageCgroupFile(pc);
  382 + int unevictable = PageCgroupUnevictable(pc);
339 383 enum lru_list from = unevictable ? LRU_UNEVICTABLE :
340 384 (LRU_FILE * !!file + !!active);
341 385  
342 386  
343 387  
344 388  
... ... @@ -343,16 +387,20 @@
343 387 return;
344 388  
345 389 MEM_CGROUP_ZSTAT(mz, from) -= 1;
346   -
  390 + /*
  391 + * However this is done under mz->lru_lock, another flags, which
  392 + * are not related to LRU, will be modified from out-of-lock.
  393 + * We have to use atomic set/clear flags.
  394 + */
347 395 if (is_unevictable_lru(lru)) {
348   - pc->flags &= ~PAGE_CGROUP_FLAG_ACTIVE;
349   - pc->flags |= PAGE_CGROUP_FLAG_UNEVICTABLE;
  396 + ClearPageCgroupActive(pc);
  397 + SetPageCgroupUnevictable(pc);
350 398 } else {
351 399 if (is_active_lru(lru))
352   - pc->flags |= PAGE_CGROUP_FLAG_ACTIVE;
  400 + SetPageCgroupActive(pc);
353 401 else
354   - pc->flags &= ~PAGE_CGROUP_FLAG_ACTIVE;
355   - pc->flags &= ~PAGE_CGROUP_FLAG_UNEVICTABLE;
  402 + ClearPageCgroupActive(pc);
  403 + ClearPageCgroupUnevictable(pc);
356 404 }
357 405  
358 406 MEM_CGROUP_ZSTAT(mz, lru) += 1;
... ... @@ -589,16 +637,7 @@
589 637 * If a page is accounted as a page cache, insert to inactive list.
590 638 * If anon, insert to active list.
591 639 */
592   - if (ctype == MEM_CGROUP_CHARGE_TYPE_CACHE) {
593   - pc->flags = PAGE_CGROUP_FLAG_CACHE;
594   - if (page_is_file_cache(page))
595   - pc->flags |= PAGE_CGROUP_FLAG_FILE;
596   - else
597   - pc->flags |= PAGE_CGROUP_FLAG_ACTIVE;
598   - } else if (ctype == MEM_CGROUP_CHARGE_TYPE_MAPPED)
599   - pc->flags = PAGE_CGROUP_FLAG_ACTIVE;
600   - else /* MEM_CGROUP_CHARGE_TYPE_SHMEM */
601   - pc->flags = PAGE_CGROUP_FLAG_CACHE | PAGE_CGROUP_FLAG_ACTIVE;
  640 + pc->flags = pcg_default_flags[ctype];
602 641  
603 642 lock_page_cgroup(page);
604 643 if (unlikely(page_get_page_cgroup(page))) {
605 644  
... ... @@ -677,8 +716,12 @@
677 716 if (unlikely(!mm))
678 717 mm = &init_mm;
679 718  
680   - return mem_cgroup_charge_common(page, mm, gfp_mask,
  719 + if (page_is_file_cache(page))
  720 + return mem_cgroup_charge_common(page, mm, gfp_mask,
681 721 MEM_CGROUP_CHARGE_TYPE_CACHE, NULL);
  722 + else
  723 + return mem_cgroup_charge_common(page, mm, gfp_mask,
  724 + MEM_CGROUP_CHARGE_TYPE_SHMEM, NULL);
682 725 }
683 726  
684 727 /*
... ... @@ -706,8 +749,7 @@
706 749 VM_BUG_ON(pc->page != page);
707 750  
708 751 if ((ctype == MEM_CGROUP_CHARGE_TYPE_MAPPED)
709   - && ((pc->flags & PAGE_CGROUP_FLAG_CACHE)
710   - || page_mapped(page)))
  752 + && ((PageCgroupCache(pc) || page_mapped(page))))
711 753 goto unlock;
712 754  
713 755 mz = page_cgroup_zoneinfo(pc);
... ... @@ -758,7 +800,7 @@
758 800 if (pc) {
759 801 mem = pc->mem_cgroup;
760 802 css_get(&mem->css);
761   - if (pc->flags & PAGE_CGROUP_FLAG_CACHE) {
  803 + if (PageCgroupCache(pc)) {
762 804 if (page_is_file_cache(page))
763 805 ctype = MEM_CGROUP_CHARGE_TYPE_CACHE;
764 806 else