Commit b85a96c0b6cb79c67e7b01b66368f2e31579d7c5

Authored by Daisuke Nishimura
Committed by Linus Torvalds
1 parent f9717d28d6

memcg: memory swap controller: fix limit check

There are scatterd calls of res_counter_check_under_limit(), and most of
them don't take mem+swap accounting into account.

define mem_cgroup_check_under_limit() and avoid direct use of
res_counter_check_limit().

Reported-by: Daisuke Nishimura <nishimura@mxp.nes.nec.co.jp>
Signed-off-by: Daisuke Nishimura <nishimura@mxp.nes.nec.co.jp>
Signed-off-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Balbir Singh <balbir@in.ibm.com>
Cc: Daisuke Nishimura <nishimura@mxp.nes.nec.co.jp>
Cc: Hugh Dickins <hugh@veritas.com>
Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

Showing 1 changed file with 17 additions and 9 deletions Side-by-side Diff

... ... @@ -571,6 +571,18 @@
571 571 return ret;
572 572 }
573 573  
  574 +static bool mem_cgroup_check_under_limit(struct mem_cgroup *mem)
  575 +{
  576 + if (do_swap_account) {
  577 + if (res_counter_check_under_limit(&mem->res) &&
  578 + res_counter_check_under_limit(&mem->memsw))
  579 + return true;
  580 + } else
  581 + if (res_counter_check_under_limit(&mem->res))
  582 + return true;
  583 + return false;
  584 +}
  585 +
574 586 /*
575 587 * Dance down the hierarchy if needed to reclaim memory. We remember the
576 588 * last child we reclaimed from, so that we don't end up penalizing
... ... @@ -592,7 +604,7 @@
592 604 * have left.
593 605 */
594 606 ret = try_to_free_mem_cgroup_pages(root_mem, gfp_mask, noswap);
595   - if (res_counter_check_under_limit(&root_mem->res))
  607 + if (mem_cgroup_check_under_limit(root_mem))
596 608 return 0;
597 609  
598 610 next_mem = mem_cgroup_get_first_node(root_mem);
... ... @@ -606,7 +618,7 @@
606 618 continue;
607 619 }
608 620 ret = try_to_free_mem_cgroup_pages(next_mem, gfp_mask, noswap);
609   - if (res_counter_check_under_limit(&root_mem->res))
  621 + if (mem_cgroup_check_under_limit(root_mem))
610 622 return 0;
611 623 cgroup_lock();
612 624 next_mem = mem_cgroup_get_next_node(next_mem, root_mem);
... ... @@ -709,12 +721,8 @@
709 721 * current usage of the cgroup before giving up
710 722 *
711 723 */
712   - if (do_swap_account) {
713   - if (res_counter_check_under_limit(&mem_over_limit->res) &&
714   - res_counter_check_under_limit(&mem_over_limit->memsw))
715   - continue;
716   - } else if (res_counter_check_under_limit(&mem_over_limit->res))
717   - continue;
  724 + if (mem_cgroup_check_under_limit(mem_over_limit))
  725 + continue;
718 726  
719 727 if (!nr_retries--) {
720 728 if (oom) {
... ... @@ -1334,7 +1342,7 @@
1334 1342  
1335 1343 do {
1336 1344 progress = try_to_free_mem_cgroup_pages(mem, gfp_mask, true);
1337   - progress += res_counter_check_under_limit(&mem->res);
  1345 + progress += mem_cgroup_check_under_limit(mem);
1338 1346 } while (!progress && --retry);
1339 1347  
1340 1348 css_put(&mem->css);