Commit 11c9ea4e80fc3be83485667204c68d0a732f3757

Authored by Johannes Weiner
Committed by Linus Torvalds
1 parent e7018b8d27

memcg: convert per-cpu stock from bytes to page granularity

We never keep subpage quantities in the per-cpu stock.

Signed-off-by: Johannes Weiner <hannes@cmpxchg.org>
Acked-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Daisuke Nishimura <nishimura@mxp.nes.nec.co.jp>
Cc: Balbir Singh <balbir@linux.vnet.ibm.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

Showing 1 changed file with 13 additions and 11 deletions Side-by-side Diff

... ... @@ -1650,14 +1650,14 @@
1650 1650 #define CHARGE_SIZE (32 * PAGE_SIZE)
1651 1651 struct memcg_stock_pcp {
1652 1652 struct mem_cgroup *cached; /* this never be root cgroup */
1653   - int charge;
  1653 + unsigned int nr_pages;
1654 1654 struct work_struct work;
1655 1655 };
1656 1656 static DEFINE_PER_CPU(struct memcg_stock_pcp, memcg_stock);
1657 1657 static atomic_t memcg_drain_count;
1658 1658  
1659 1659 /*
1660   - * Try to consume stocked charge on this cpu. If success, PAGE_SIZE is consumed
  1660 + * Try to consume stocked charge on this cpu. If success, one page is consumed
1661 1661 * from local stock and true is returned. If the stock is 0 or charges from a
1662 1662 * cgroup which is not current target, returns false. This stock will be
1663 1663 * refilled.
... ... @@ -1668,8 +1668,8 @@
1668 1668 bool ret = true;
1669 1669  
1670 1670 stock = &get_cpu_var(memcg_stock);
1671   - if (mem == stock->cached && stock->charge)
1672   - stock->charge -= PAGE_SIZE;
  1671 + if (mem == stock->cached && stock->nr_pages)
  1672 + stock->nr_pages--;
1673 1673 else /* need to call res_counter_charge */
1674 1674 ret = false;
1675 1675 put_cpu_var(memcg_stock);
1676 1676  
1677 1677  
... ... @@ -1683,13 +1683,15 @@
1683 1683 {
1684 1684 struct mem_cgroup *old = stock->cached;
1685 1685  
1686   - if (stock->charge) {
1687   - res_counter_uncharge(&old->res, stock->charge);
  1686 + if (stock->nr_pages) {
  1687 + unsigned long bytes = stock->nr_pages * PAGE_SIZE;
  1688 +
  1689 + res_counter_uncharge(&old->res, bytes);
1688 1690 if (do_swap_account)
1689   - res_counter_uncharge(&old->memsw, stock->charge);
  1691 + res_counter_uncharge(&old->memsw, bytes);
  1692 + stock->nr_pages = 0;
1690 1693 }
1691 1694 stock->cached = NULL;
1692   - stock->charge = 0;
1693 1695 }
1694 1696  
1695 1697 /*
... ... @@ -1706,7 +1708,7 @@
1706 1708 * Cache charges(val) which is from res_counter, to local per_cpu area.
1707 1709 * This will be consumed by consume_stock() function, later.
1708 1710 */
1709   -static void refill_stock(struct mem_cgroup *mem, int val)
  1711 +static void refill_stock(struct mem_cgroup *mem, unsigned int nr_pages)
1710 1712 {
1711 1713 struct memcg_stock_pcp *stock = &get_cpu_var(memcg_stock);
1712 1714  
... ... @@ -1714,7 +1716,7 @@
1714 1716 drain_stock(stock);
1715 1717 stock->cached = mem;
1716 1718 }
1717   - stock->charge += val;
  1719 + stock->nr_pages += nr_pages;
1718 1720 put_cpu_var(memcg_stock);
1719 1721 }
1720 1722  
... ... @@ -2012,7 +2014,7 @@
2012 2014 } while (ret != CHARGE_OK);
2013 2015  
2014 2016 if (csize > page_size)
2015   - refill_stock(mem, csize - page_size);
  2017 + refill_stock(mem, (csize - page_size) >> PAGE_SHIFT);
2016 2018 css_put(&mem->css);
2017 2019 done:
2018 2020 *memcg = mem;