Commit 430e48631e72aeab74d844c57b441f98a2e36eee

Authored by KAMEZAWA Hiroyuki
Committed by Linus Torvalds
1 parent c62b1a3b31

memcg: update threshold and softlimit at commit

Presently, move_task does "batched" precharge.  Because res_counter or
css's refcnt are not-scalable jobs for memcg, try_charge_()..  tend to be
done in batched manner if allowed.

Now, softlimit and threshold check their event counter in try_charge, but
the charge is not a per-page event.  And event counter is not updated at
charge().  Moreover, precharge doesn't pass "page" to try_charge() and
softlimit tree will be never updated until uncharge() causes an event."

So the best place to check the event counter is commit_charge().  This is
per-page event by its nature.  This patch move checks to there.

Signed-off-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Kirill A. Shutemov <kirill@shutemov.name>
Cc: Balbir Singh <balbir@linux.vnet.ibm.com>
Cc: Daisuke Nishimura <nishimura@mxp.nes.nec.co.jp>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

Showing 1 changed file with 18 additions and 20 deletions Side-by-side Diff

... ... @@ -1424,8 +1424,7 @@
1424 1424 * oom-killer can be invoked.
1425 1425 */
1426 1426 static int __mem_cgroup_try_charge(struct mm_struct *mm,
1427   - gfp_t gfp_mask, struct mem_cgroup **memcg,
1428   - bool oom, struct page *page)
  1427 + gfp_t gfp_mask, struct mem_cgroup **memcg, bool oom)
1429 1428 {
1430 1429 struct mem_cgroup *mem, *mem_over_limit;
1431 1430 int nr_retries = MEM_CGROUP_RECLAIM_RETRIES;
... ... @@ -1463,7 +1462,7 @@
1463 1462 unsigned long flags = 0;
1464 1463  
1465 1464 if (consume_stock(mem))
1466   - goto charged;
  1465 + goto done;
1467 1466  
1468 1467 ret = res_counter_charge(&mem->res, csize, &fail_res);
1469 1468 if (likely(!ret)) {
1470 1469  
... ... @@ -1558,16 +1557,7 @@
1558 1557 }
1559 1558 if (csize > PAGE_SIZE)
1560 1559 refill_stock(mem, csize - PAGE_SIZE);
1561   -charged:
1562   - /*
1563   - * Insert ancestor (and ancestor's ancestors), to softlimit RB-tree.
1564   - * if they exceeds softlimit.
1565   - */
1566   - if (page && mem_cgroup_soft_limit_check(mem))
1567   - mem_cgroup_update_tree(mem, page);
1568 1560 done:
1569   - if (mem_cgroup_threshold_check(mem))
1570   - mem_cgroup_threshold(mem);
1571 1561 return 0;
1572 1562 nomem:
1573 1563 css_put(&mem->css);
... ... @@ -1691,6 +1681,16 @@
1691 1681 mem_cgroup_charge_statistics(mem, pc, true);
1692 1682  
1693 1683 unlock_page_cgroup(pc);
  1684 + /*
  1685 + * "charge_statistics" updated event counter. Then, check it.
  1686 + * Insert ancestor (and ancestor's ancestors), to softlimit RB-tree.
  1687 + * if they exceeds softlimit.
  1688 + */
  1689 + if (mem_cgroup_soft_limit_check(mem))
  1690 + mem_cgroup_update_tree(mem, pc->page);
  1691 + if (mem_cgroup_threshold_check(mem))
  1692 + mem_cgroup_threshold(mem);
  1693 +
1694 1694 }
1695 1695  
1696 1696 /**
... ... @@ -1788,7 +1788,7 @@
1788 1788 goto put;
1789 1789  
1790 1790 parent = mem_cgroup_from_cont(pcg);
1791   - ret = __mem_cgroup_try_charge(NULL, gfp_mask, &parent, false, page);
  1791 + ret = __mem_cgroup_try_charge(NULL, gfp_mask, &parent, false);
1792 1792 if (ret || !parent)
1793 1793 goto put_back;
1794 1794  
... ... @@ -1824,7 +1824,7 @@
1824 1824 prefetchw(pc);
1825 1825  
1826 1826 mem = memcg;
1827   - ret = __mem_cgroup_try_charge(mm, gfp_mask, &mem, true, page);
  1827 + ret = __mem_cgroup_try_charge(mm, gfp_mask, &mem, true);
1828 1828 if (ret || !mem)
1829 1829 return ret;
1830 1830  
1831 1831  
... ... @@ -1944,14 +1944,14 @@
1944 1944 if (!mem)
1945 1945 goto charge_cur_mm;
1946 1946 *ptr = mem;
1947   - ret = __mem_cgroup_try_charge(NULL, mask, ptr, true, page);
  1947 + ret = __mem_cgroup_try_charge(NULL, mask, ptr, true);
1948 1948 /* drop extra refcnt from tryget */
1949 1949 css_put(&mem->css);
1950 1950 return ret;
1951 1951 charge_cur_mm:
1952 1952 if (unlikely(!mm))
1953 1953 mm = &init_mm;
1954   - return __mem_cgroup_try_charge(mm, mask, ptr, true, page);
  1954 + return __mem_cgroup_try_charge(mm, mask, ptr, true);
1955 1955 }
1956 1956  
1957 1957 static void
... ... @@ -2340,8 +2340,7 @@
2340 2340 unlock_page_cgroup(pc);
2341 2341  
2342 2342 if (mem) {
2343   - ret = __mem_cgroup_try_charge(NULL, GFP_KERNEL, &mem, false,
2344   - page);
  2343 + ret = __mem_cgroup_try_charge(NULL, GFP_KERNEL, &mem, false);
2345 2344 css_put(&mem->css);
2346 2345 }
2347 2346 *ptr = mem;
... ... @@ -3872,8 +3871,7 @@
3872 3871 batch_count = PRECHARGE_COUNT_AT_ONCE;
3873 3872 cond_resched();
3874 3873 }
3875   - ret = __mem_cgroup_try_charge(NULL, GFP_KERNEL, &mem,
3876   - false, NULL);
  3874 + ret = __mem_cgroup_try_charge(NULL, GFP_KERNEL, &mem, false);
3877 3875 if (ret || !mem)
3878 3876 /* mem_cgroup_clear_mc() will do uncharge later */
3879 3877 return -ENOMEM;