Commit 2c26fdd70c3094fa3e84caf9ef434911933d5477
Committed by
Linus Torvalds
1 parent
887007561a
Exists in
master
and in
7 other branches
memcg: revert gfp mask fix
My patch, memcg-fix-gfp_mask-of-callers-of-charge.patch changed gfp_mask of callers of charge to be GFP_HIGHUSER_MOVABLE for showing what will happen at memory reclaim. But in recent discussion, it's NACKed because it sounds ugly. This patch is for reverting it and add some clean up to gfp_mask of callers of charge. No behavior change but need review before generating HUNK in deep queue. This patch also adds explanation to meaning of gfp_mask passed to charge functions in memcontrol.h. Signed-off-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Cc: Balbir Singh <balbir@in.ibm.com> Cc: Daisuke Nishimura <nishimura@mxp.nes.nec.co.jp> Cc: Hugh Dickins <hugh@veritas.com> Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Showing 6 changed files with 25 additions and 18 deletions Side-by-side Diff
include/linux/memcontrol.h
... | ... | @@ -26,6 +26,16 @@ |
26 | 26 | struct mm_struct; |
27 | 27 | |
28 | 28 | #ifdef CONFIG_CGROUP_MEM_RES_CTLR |
29 | +/* | |
30 | + * All "charge" functions with gfp_mask should use GFP_KERNEL or | |
31 | + * (gfp_mask & GFP_RECLAIM_MASK). In current implementatin, memcg doesn't | |
32 | + * alloc memory but reclaims memory from all available zones. So, "where I want | |
33 | + * memory from" bits of gfp_mask has no meaning. So any bits of that field is | |
34 | + * available but adding a rule is better. charge functions' gfp_mask should | |
35 | + * be set to GFP_KERNEL or gfp_mask & GFP_RECLAIM_MASK for avoiding ambiguous | |
36 | + * codes. | |
37 | + * (Of course, if memcg does memory allocation in future, GFP_KERNEL is sane.) | |
38 | + */ | |
29 | 39 | |
30 | 40 | extern int mem_cgroup_newpage_charge(struct page *page, struct mm_struct *mm, |
31 | 41 | gfp_t gfp_mask); |
mm/filemap.c
mm/memcontrol.c
... | ... | @@ -1248,7 +1248,7 @@ |
1248 | 1248 | unlock_page_cgroup(pc); |
1249 | 1249 | |
1250 | 1250 | if (mem) { |
1251 | - ret = mem_cgroup_try_charge(NULL, GFP_HIGHUSER_MOVABLE, &mem); | |
1251 | + ret = mem_cgroup_try_charge(NULL, GFP_KERNEL, &mem); | |
1252 | 1252 | css_put(&mem->css); |
1253 | 1253 | } |
1254 | 1254 | *ptr = mem; |
... | ... | @@ -1378,7 +1378,7 @@ |
1378 | 1378 | break; |
1379 | 1379 | |
1380 | 1380 | progress = try_to_free_mem_cgroup_pages(memcg, |
1381 | - GFP_HIGHUSER_MOVABLE, false); | |
1381 | + GFP_KERNEL, false); | |
1382 | 1382 | if (!progress) retry_count--; |
1383 | 1383 | } |
1384 | 1384 | return ret; |
... | ... | @@ -1418,7 +1418,7 @@ |
1418 | 1418 | break; |
1419 | 1419 | |
1420 | 1420 | oldusage = res_counter_read_u64(&memcg->memsw, RES_USAGE); |
1421 | - try_to_free_mem_cgroup_pages(memcg, GFP_HIGHUSER_MOVABLE, true); | |
1421 | + try_to_free_mem_cgroup_pages(memcg, GFP_KERNEL, true); | |
1422 | 1422 | curusage = res_counter_read_u64(&memcg->memsw, RES_USAGE); |
1423 | 1423 | if (curusage >= oldusage) |
1424 | 1424 | retry_count--; |
... | ... | @@ -1464,7 +1464,7 @@ |
1464 | 1464 | } |
1465 | 1465 | spin_unlock_irqrestore(&zone->lru_lock, flags); |
1466 | 1466 | |
1467 | - ret = mem_cgroup_move_parent(pc, mem, GFP_HIGHUSER_MOVABLE); | |
1467 | + ret = mem_cgroup_move_parent(pc, mem, GFP_KERNEL); | |
1468 | 1468 | if (ret == -ENOMEM) |
1469 | 1469 | break; |
1470 | 1470 | |
... | ... | @@ -1550,7 +1550,7 @@ |
1550 | 1550 | goto out; |
1551 | 1551 | } |
1552 | 1552 | progress = try_to_free_mem_cgroup_pages(mem, |
1553 | - GFP_HIGHUSER_MOVABLE, false); | |
1553 | + GFP_KERNEL, false); | |
1554 | 1554 | if (!progress) { |
1555 | 1555 | nr_retries--; |
1556 | 1556 | /* maybe some writeback is necessary */ |
mm/memory.c
... | ... | @@ -2000,7 +2000,7 @@ |
2000 | 2000 | cow_user_page(new_page, old_page, address, vma); |
2001 | 2001 | __SetPageUptodate(new_page); |
2002 | 2002 | |
2003 | - if (mem_cgroup_newpage_charge(new_page, mm, GFP_HIGHUSER_MOVABLE)) | |
2003 | + if (mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL)) | |
2004 | 2004 | goto oom_free_new; |
2005 | 2005 | |
2006 | 2006 | /* |
... | ... | @@ -2431,8 +2431,7 @@ |
2431 | 2431 | lock_page(page); |
2432 | 2432 | delayacct_clear_flag(DELAYACCT_PF_SWAPIN); |
2433 | 2433 | |
2434 | - if (mem_cgroup_try_charge_swapin(mm, page, | |
2435 | - GFP_HIGHUSER_MOVABLE, &ptr) == -ENOMEM) { | |
2434 | + if (mem_cgroup_try_charge_swapin(mm, page, GFP_KERNEL, &ptr)) { | |
2436 | 2435 | ret = VM_FAULT_OOM; |
2437 | 2436 | unlock_page(page); |
2438 | 2437 | goto out; |
... | ... | @@ -2524,7 +2523,7 @@ |
2524 | 2523 | goto oom; |
2525 | 2524 | __SetPageUptodate(page); |
2526 | 2525 | |
2527 | - if (mem_cgroup_newpage_charge(page, mm, GFP_HIGHUSER_MOVABLE)) | |
2526 | + if (mem_cgroup_newpage_charge(page, mm, GFP_KERNEL)) | |
2528 | 2527 | goto oom_free_page; |
2529 | 2528 | |
2530 | 2529 | entry = mk_pte(page, vma->vm_page_prot); |
... | ... | @@ -2615,8 +2614,7 @@ |
2615 | 2614 | ret = VM_FAULT_OOM; |
2616 | 2615 | goto out; |
2617 | 2616 | } |
2618 | - if (mem_cgroup_newpage_charge(page, | |
2619 | - mm, GFP_HIGHUSER_MOVABLE)) { | |
2617 | + if (mem_cgroup_newpage_charge(page, mm, GFP_KERNEL)) { | |
2620 | 2618 | ret = VM_FAULT_OOM; |
2621 | 2619 | page_cache_release(page); |
2622 | 2620 | goto out; |
mm/shmem.c
... | ... | @@ -932,8 +932,8 @@ |
932 | 932 | * Charge page using GFP_HIGHUSER_MOVABLE while we can wait. |
933 | 933 | * charged back to the user(not to caller) when swap account is used. |
934 | 934 | */ |
935 | - error = mem_cgroup_cache_charge_swapin(page, | |
936 | - current->mm, GFP_HIGHUSER_MOVABLE, true); | |
935 | + error = mem_cgroup_cache_charge_swapin(page, current->mm, GFP_KERNEL, | |
936 | + true); | |
937 | 937 | if (error) |
938 | 938 | goto out; |
939 | 939 | error = radix_tree_preload(GFP_KERNEL); |
... | ... | @@ -1275,7 +1275,7 @@ |
1275 | 1275 | * charge against this swap cache here. |
1276 | 1276 | */ |
1277 | 1277 | if (mem_cgroup_cache_charge_swapin(swappage, |
1278 | - current->mm, gfp, false)) { | |
1278 | + current->mm, gfp & GFP_RECLAIM_MASK, false)) { | |
1279 | 1279 | page_cache_release(swappage); |
1280 | 1280 | error = -ENOMEM; |
1281 | 1281 | goto failed; |
... | ... | @@ -1393,7 +1393,7 @@ |
1393 | 1393 | |
1394 | 1394 | /* Precharge page while we can wait, compensate after */ |
1395 | 1395 | error = mem_cgroup_cache_charge(filepage, current->mm, |
1396 | - GFP_HIGHUSER_MOVABLE); | |
1396 | + GFP_KERNEL); | |
1397 | 1397 | if (error) { |
1398 | 1398 | page_cache_release(filepage); |
1399 | 1399 | shmem_unacct_blocks(info->flags, 1); |
mm/swapfile.c
... | ... | @@ -698,8 +698,7 @@ |
698 | 698 | pte_t *pte; |
699 | 699 | int ret = 1; |
700 | 700 | |
701 | - if (mem_cgroup_try_charge_swapin(vma->vm_mm, page, | |
702 | - GFP_HIGHUSER_MOVABLE, &ptr)) | |
701 | + if (mem_cgroup_try_charge_swapin(vma->vm_mm, page, GFP_KERNEL, &ptr)) | |
703 | 702 | ret = -ENOMEM; |
704 | 703 | |
705 | 704 | pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); |