Commit 297880f4af4e492ed5084be9397d65a18ade56ee
Committed by
Linus Torvalds
1 parent
63678c32e2
Exists in
smarc_imx_lf-5.15.y
and in
27 other branches
mm, hugetlb_cgroup: round limit_in_bytes down to hugepage size
The page_counter rounds limits down to page size values. This makes sense, except in the case of hugetlb_cgroup where it's not possible to charge partial hugepages. If the hugetlb_cgroup margin is less than the hugepage size being charged, it will fail as expected. Round the hugetlb_cgroup limit down to hugepage size, since it is the effective limit of the cgroup. For consistency, round down PAGE_COUNTER_MAX as well when a hugetlb_cgroup is created: this prevents error reports when a user cannot restore the value to the kernel default. Signed-off-by: David Rientjes <rientjes@google.com> Cc: Michal Hocko <mhocko@kernel.org> Cc: Nikolay Borisov <kernel@kyup.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Showing 1 changed file with 26 additions and 9 deletions Side-by-side Diff
mm/hugetlb_cgroup.c
... | ... | @@ -67,26 +67,42 @@ |
67 | 67 | return false; |
68 | 68 | } |
69 | 69 | |
70 | +static void hugetlb_cgroup_init(struct hugetlb_cgroup *h_cgroup, | |
71 | + struct hugetlb_cgroup *parent_h_cgroup) | |
72 | +{ | |
73 | + int idx; | |
74 | + | |
75 | + for (idx = 0; idx < HUGE_MAX_HSTATE; idx++) { | |
76 | + struct page_counter *counter = &h_cgroup->hugepage[idx]; | |
77 | + struct page_counter *parent = NULL; | |
78 | + unsigned long limit; | |
79 | + int ret; | |
80 | + | |
81 | + if (parent_h_cgroup) | |
82 | + parent = &parent_h_cgroup->hugepage[idx]; | |
83 | + page_counter_init(counter, parent); | |
84 | + | |
85 | + limit = round_down(PAGE_COUNTER_MAX, | |
86 | + 1 << huge_page_order(&hstates[idx])); | |
87 | + ret = page_counter_limit(counter, limit); | |
88 | + VM_BUG_ON(ret); | |
89 | + } | |
90 | +} | |
91 | + | |
70 | 92 | static struct cgroup_subsys_state * |
71 | 93 | hugetlb_cgroup_css_alloc(struct cgroup_subsys_state *parent_css) |
72 | 94 | { |
73 | 95 | struct hugetlb_cgroup *parent_h_cgroup = hugetlb_cgroup_from_css(parent_css); |
74 | 96 | struct hugetlb_cgroup *h_cgroup; |
75 | - int idx; | |
76 | 97 | |
77 | 98 | h_cgroup = kzalloc(sizeof(*h_cgroup), GFP_KERNEL); |
78 | 99 | if (!h_cgroup) |
79 | 100 | return ERR_PTR(-ENOMEM); |
80 | 101 | |
81 | - if (parent_h_cgroup) { | |
82 | - for (idx = 0; idx < HUGE_MAX_HSTATE; idx++) | |
83 | - page_counter_init(&h_cgroup->hugepage[idx], | |
84 | - &parent_h_cgroup->hugepage[idx]); | |
85 | - } else { | |
102 | + if (!parent_h_cgroup) | |
86 | 103 | root_h_cgroup = h_cgroup; |
87 | - for (idx = 0; idx < HUGE_MAX_HSTATE; idx++) | |
88 | - page_counter_init(&h_cgroup->hugepage[idx], NULL); | |
89 | - } | |
104 | + | |
105 | + hugetlb_cgroup_init(h_cgroup, parent_h_cgroup); | |
90 | 106 | return &h_cgroup->css; |
91 | 107 | } |
92 | 108 | |
... | ... | @@ -285,6 +301,7 @@ |
285 | 301 | return ret; |
286 | 302 | |
287 | 303 | idx = MEMFILE_IDX(of_cft(of)->private); |
304 | + nr_pages = round_down(nr_pages, 1 << huge_page_order(&hstates[idx])); | |
288 | 305 | |
289 | 306 | switch (MEMFILE_ATTR(of_cft(of)->private)) { |
290 | 307 | case RES_LIMIT: |