Blame view

mm/hugetlb_cgroup.c 21.4 KB
2bc64a204   Aneesh Kumar K.V   mm/hugetlb: add n...
1
2
3
4
5
  /*
   *
   * Copyright IBM Corporation, 2012
   * Author Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
   *
faced7e08   Giuseppe Scrivano   mm: hugetlb contr...
6
7
8
9
   * Cgroup v2
   * Copyright (C) 2019 Red Hat, Inc.
   * Author: Giuseppe Scrivano <gscrivan@redhat.com>
   *
2bc64a204   Aneesh Kumar K.V   mm/hugetlb: add n...
10
11
12
13
14
15
16
17
18
19
20
   * This program is free software; you can redistribute it and/or modify it
   * under the terms of version 2.1 of the GNU Lesser General Public License
   * as published by the Free Software Foundation.
   *
   * This program is distributed in the hope that it would be useful, but
   * WITHOUT ANY WARRANTY; without even the implied warranty of
   * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
   *
   */
  
  #include <linux/cgroup.h>
71f87bee3   Johannes Weiner   mm: hugetlb_cgrou...
21
  #include <linux/page_counter.h>
2bc64a204   Aneesh Kumar K.V   mm/hugetlb: add n...
22
23
24
  #include <linux/slab.h>
  #include <linux/hugetlb.h>
  #include <linux/hugetlb_cgroup.h>
abb8206cb   Aneesh Kumar K.V   hugetlb/cgroup: a...
25
26
27
  #define MEMFILE_PRIVATE(x, val)	(((x) << 16) | (val))
  #define MEMFILE_IDX(val)	(((val) >> 16) & 0xffff)
  #define MEMFILE_ATTR(val)	((val) & 0xffff)
faced7e08   Giuseppe Scrivano   mm: hugetlb contr...
28
29
  #define hugetlb_cgroup_from_counter(counter, idx)                   \
  	container_of(counter, struct hugetlb_cgroup, hugepage[idx])
2bc64a204   Aneesh Kumar K.V   mm/hugetlb: add n...
30
  static struct hugetlb_cgroup *root_h_cgroup __read_mostly;
cdc2fcfea   Mina Almasry   hugetlb_cgroup: a...
31
  static inline struct page_counter *
1adc4d419   Mina Almasry   hugetlb_cgroup: a...
32
33
  __hugetlb_cgroup_counter_from_cgroup(struct hugetlb_cgroup *h_cg, int idx,
  				     bool rsvd)
cdc2fcfea   Mina Almasry   hugetlb_cgroup: a...
34
35
36
37
38
  {
  	if (rsvd)
  		return &h_cg->rsvd_hugepage[idx];
  	return &h_cg->hugepage[idx];
  }
1adc4d419   Mina Almasry   hugetlb_cgroup: a...
39
40
41
42
43
44
45
46
47
48
49
  static inline struct page_counter *
  hugetlb_cgroup_counter_from_cgroup(struct hugetlb_cgroup *h_cg, int idx)
  {
  	return __hugetlb_cgroup_counter_from_cgroup(h_cg, idx, false);
  }
  
  static inline struct page_counter *
  hugetlb_cgroup_counter_from_cgroup_rsvd(struct hugetlb_cgroup *h_cg, int idx)
  {
  	return __hugetlb_cgroup_counter_from_cgroup(h_cg, idx, true);
  }
2bc64a204   Aneesh Kumar K.V   mm/hugetlb: add n...
50
51
52
  static inline
  struct hugetlb_cgroup *hugetlb_cgroup_from_css(struct cgroup_subsys_state *s)
  {
a7c6d554a   Tejun Heo   cgroup: add/updat...
53
  	return s ? container_of(s, struct hugetlb_cgroup, css) : NULL;
2bc64a204   Aneesh Kumar K.V   mm/hugetlb: add n...
54
55
56
  }
  
  static inline
2bc64a204   Aneesh Kumar K.V   mm/hugetlb: add n...
57
58
  struct hugetlb_cgroup *hugetlb_cgroup_from_task(struct task_struct *task)
  {
073219e99   Tejun Heo   cgroup: clean up ...
59
  	return hugetlb_cgroup_from_css(task_css(task, hugetlb_cgrp_id));
2bc64a204   Aneesh Kumar K.V   mm/hugetlb: add n...
60
61
62
63
64
65
  }
  
  static inline bool hugetlb_cgroup_is_root(struct hugetlb_cgroup *h_cg)
  {
  	return (h_cg == root_h_cgroup);
  }
3f7985183   Tejun Heo   hugetlb_cgroup: p...
66
67
  static inline struct hugetlb_cgroup *
  parent_hugetlb_cgroup(struct hugetlb_cgroup *h_cg)
2bc64a204   Aneesh Kumar K.V   mm/hugetlb: add n...
68
  {
5c9d535b8   Tejun Heo   cgroup: remove cs...
69
  	return hugetlb_cgroup_from_css(h_cg->css.parent);
2bc64a204   Aneesh Kumar K.V   mm/hugetlb: add n...
70
  }
3f7985183   Tejun Heo   hugetlb_cgroup: p...
71
  static inline bool hugetlb_cgroup_have_usage(struct hugetlb_cgroup *h_cg)
2bc64a204   Aneesh Kumar K.V   mm/hugetlb: add n...
72
73
  {
  	int idx;
2bc64a204   Aneesh Kumar K.V   mm/hugetlb: add n...
74
75
  
  	for (idx = 0; idx < hugetlb_max_hstate; idx++) {
1adc4d419   Mina Almasry   hugetlb_cgroup: a...
76
  		if (page_counter_read(
7a5bde379   Mike Kravetz   hugetlb_cgroup: f...
77
  				hugetlb_cgroup_counter_from_cgroup(h_cg, idx)))
2bc64a204   Aneesh Kumar K.V   mm/hugetlb: add n...
78
79
80
81
  			return true;
  	}
  	return false;
  }
297880f4a   David Rientjes   mm, hugetlb_cgrou...
82
83
84
85
86
87
  static void hugetlb_cgroup_init(struct hugetlb_cgroup *h_cgroup,
  				struct hugetlb_cgroup *parent_h_cgroup)
  {
  	int idx;
  
  	for (idx = 0; idx < HUGE_MAX_HSTATE; idx++) {
1adc4d419   Mina Almasry   hugetlb_cgroup: a...
88
89
  		struct page_counter *fault_parent = NULL;
  		struct page_counter *rsvd_parent = NULL;
297880f4a   David Rientjes   mm, hugetlb_cgrou...
90
91
  		unsigned long limit;
  		int ret;
1adc4d419   Mina Almasry   hugetlb_cgroup: a...
92
93
94
95
96
97
98
99
100
101
102
103
  		if (parent_h_cgroup) {
  			fault_parent = hugetlb_cgroup_counter_from_cgroup(
  				parent_h_cgroup, idx);
  			rsvd_parent = hugetlb_cgroup_counter_from_cgroup_rsvd(
  				parent_h_cgroup, idx);
  		}
  		page_counter_init(hugetlb_cgroup_counter_from_cgroup(h_cgroup,
  								     idx),
  				  fault_parent);
  		page_counter_init(
  			hugetlb_cgroup_counter_from_cgroup_rsvd(h_cgroup, idx),
  			rsvd_parent);
297880f4a   David Rientjes   mm, hugetlb_cgrou...
104
105
106
  
  		limit = round_down(PAGE_COUNTER_MAX,
  				   1 << huge_page_order(&hstates[idx]));
1adc4d419   Mina Almasry   hugetlb_cgroup: a...
107
108
109
110
111
112
113
114
  
  		ret = page_counter_set_max(
  			hugetlb_cgroup_counter_from_cgroup(h_cgroup, idx),
  			limit);
  		VM_BUG_ON(ret);
  		ret = page_counter_set_max(
  			hugetlb_cgroup_counter_from_cgroup_rsvd(h_cgroup, idx),
  			limit);
297880f4a   David Rientjes   mm, hugetlb_cgrou...
115
116
117
  		VM_BUG_ON(ret);
  	}
  }
eb95419b0   Tejun Heo   cgroup: pass arou...
118
119
  static struct cgroup_subsys_state *
  hugetlb_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
2bc64a204   Aneesh Kumar K.V   mm/hugetlb: add n...
120
  {
eb95419b0   Tejun Heo   cgroup: pass arou...
121
122
  	struct hugetlb_cgroup *parent_h_cgroup = hugetlb_cgroup_from_css(parent_css);
  	struct hugetlb_cgroup *h_cgroup;
2bc64a204   Aneesh Kumar K.V   mm/hugetlb: add n...
123
124
125
126
  
  	h_cgroup = kzalloc(sizeof(*h_cgroup), GFP_KERNEL);
  	if (!h_cgroup)
  		return ERR_PTR(-ENOMEM);
297880f4a   David Rientjes   mm, hugetlb_cgrou...
127
  	if (!parent_h_cgroup)
2bc64a204   Aneesh Kumar K.V   mm/hugetlb: add n...
128
  		root_h_cgroup = h_cgroup;
297880f4a   David Rientjes   mm, hugetlb_cgrou...
129
130
  
  	hugetlb_cgroup_init(h_cgroup, parent_h_cgroup);
2bc64a204   Aneesh Kumar K.V   mm/hugetlb: add n...
131
132
  	return &h_cgroup->css;
  }
eb95419b0   Tejun Heo   cgroup: pass arou...
133
  static void hugetlb_cgroup_css_free(struct cgroup_subsys_state *css)
2bc64a204   Aneesh Kumar K.V   mm/hugetlb: add n...
134
135
  {
  	struct hugetlb_cgroup *h_cgroup;
eb95419b0   Tejun Heo   cgroup: pass arou...
136
  	h_cgroup = hugetlb_cgroup_from_css(css);
2bc64a204   Aneesh Kumar K.V   mm/hugetlb: add n...
137
138
  	kfree(h_cgroup);
  }
da1def559   Aneesh Kumar K.V   hugetlb/cgroup: a...
139
140
141
142
143
144
145
  /*
   * Should be called with hugetlb_lock held.
   * Since we are holding hugetlb_lock, pages cannot get moved from
   * active list or uncharged from the cgroup, So no need to get
   * page reference and test for page active here. This function
   * cannot fail.
   */
3f7985183   Tejun Heo   hugetlb_cgroup: p...
146
  static void hugetlb_cgroup_move_parent(int idx, struct hugetlb_cgroup *h_cg,
da1def559   Aneesh Kumar K.V   hugetlb/cgroup: a...
147
148
  				       struct page *page)
  {
71f87bee3   Johannes Weiner   mm: hugetlb_cgrou...
149
150
  	unsigned int nr_pages;
  	struct page_counter *counter;
da1def559   Aneesh Kumar K.V   hugetlb/cgroup: a...
151
  	struct hugetlb_cgroup *page_hcg;
3f7985183   Tejun Heo   hugetlb_cgroup: p...
152
  	struct hugetlb_cgroup *parent = parent_hugetlb_cgroup(h_cg);
da1def559   Aneesh Kumar K.V   hugetlb/cgroup: a...
153
154
155
156
157
158
159
160
161
  
  	page_hcg = hugetlb_cgroup_from_page(page);
  	/*
  	 * We can have pages in active list without any cgroup
  	 * ie, hugepage with less than 3 pages. We can safely
  	 * ignore those pages.
  	 */
  	if (!page_hcg || page_hcg != h_cg)
  		goto out;
d8c6546b1   Matthew Wilcox (Oracle)   mm: introduce com...
162
  	nr_pages = compound_nr(page);
da1def559   Aneesh Kumar K.V   hugetlb/cgroup: a...
163
164
165
  	if (!parent) {
  		parent = root_h_cgroup;
  		/* root has no limit */
71f87bee3   Johannes Weiner   mm: hugetlb_cgrou...
166
  		page_counter_charge(&parent->hugepage[idx], nr_pages);
da1def559   Aneesh Kumar K.V   hugetlb/cgroup: a...
167
168
  	}
  	counter = &h_cg->hugepage[idx];
71f87bee3   Johannes Weiner   mm: hugetlb_cgrou...
169
170
  	/* Take the pages off the local counter */
  	page_counter_cancel(counter, nr_pages);
da1def559   Aneesh Kumar K.V   hugetlb/cgroup: a...
171
172
173
174
175
176
177
178
179
180
  
  	set_hugetlb_cgroup(page, parent);
  out:
  	return;
  }
  
  /*
   * Force the hugetlb cgroup to empty the hugetlb resources by moving them to
   * the parent cgroup.
   */
eb95419b0   Tejun Heo   cgroup: pass arou...
181
  static void hugetlb_cgroup_css_offline(struct cgroup_subsys_state *css)
2bc64a204   Aneesh Kumar K.V   mm/hugetlb: add n...
182
  {
eb95419b0   Tejun Heo   cgroup: pass arou...
183
  	struct hugetlb_cgroup *h_cg = hugetlb_cgroup_from_css(css);
da1def559   Aneesh Kumar K.V   hugetlb/cgroup: a...
184
185
  	struct hstate *h;
  	struct page *page;
7a5bde379   Mike Kravetz   hugetlb_cgroup: f...
186
  	int idx;
da1def559   Aneesh Kumar K.V   hugetlb/cgroup: a...
187
188
  
  	do {
7a5bde379   Mike Kravetz   hugetlb_cgroup: f...
189
  		idx = 0;
da1def559   Aneesh Kumar K.V   hugetlb/cgroup: a...
190
191
192
  		for_each_hstate(h) {
  			spin_lock(&hugetlb_lock);
  			list_for_each_entry(page, &h->hugepage_activelist, lru)
3f7985183   Tejun Heo   hugetlb_cgroup: p...
193
  				hugetlb_cgroup_move_parent(idx, h_cg, page);
da1def559   Aneesh Kumar K.V   hugetlb/cgroup: a...
194
195
196
197
198
  
  			spin_unlock(&hugetlb_lock);
  			idx++;
  		}
  		cond_resched();
3f7985183   Tejun Heo   hugetlb_cgroup: p...
199
  	} while (hugetlb_cgroup_have_usage(h_cg));
2bc64a204   Aneesh Kumar K.V   mm/hugetlb: add n...
200
  }
faced7e08   Giuseppe Scrivano   mm: hugetlb contr...
201
202
203
204
205
206
207
208
209
210
211
212
  static inline void hugetlb_event(struct hugetlb_cgroup *hugetlb, int idx,
  				 enum hugetlb_memory_event event)
  {
  	atomic_long_inc(&hugetlb->events_local[idx][event]);
  	cgroup_file_notify(&hugetlb->events_local_file[idx]);
  
  	do {
  		atomic_long_inc(&hugetlb->events[idx][event]);
  		cgroup_file_notify(&hugetlb->events_file[idx]);
  	} while ((hugetlb = parent_hugetlb_cgroup(hugetlb)) &&
  		 !hugetlb_cgroup_is_root(hugetlb));
  }
1adc4d419   Mina Almasry   hugetlb_cgroup: a...
213
214
215
  static int __hugetlb_cgroup_charge_cgroup(int idx, unsigned long nr_pages,
  					  struct hugetlb_cgroup **ptr,
  					  bool rsvd)
6d76dcf40   Aneesh Kumar K.V   hugetlb/cgroup: a...
216
217
  {
  	int ret = 0;
71f87bee3   Johannes Weiner   mm: hugetlb_cgrou...
218
  	struct page_counter *counter;
6d76dcf40   Aneesh Kumar K.V   hugetlb/cgroup: a...
219
  	struct hugetlb_cgroup *h_cg = NULL;
6d76dcf40   Aneesh Kumar K.V   hugetlb/cgroup: a...
220
221
222
223
224
225
226
227
228
229
230
231
  
  	if (hugetlb_cgroup_disabled())
  		goto done;
  	/*
  	 * We don't charge any cgroup if the compound page have less
  	 * than 3 pages.
  	 */
  	if (huge_page_order(&hstates[idx]) < HUGETLB_CGROUP_MIN_ORDER)
  		goto done;
  again:
  	rcu_read_lock();
  	h_cg = hugetlb_cgroup_from_task(current);
0362f326d   Roman Gushchin   mm: hugetlb: swit...
232
  	if (!css_tryget(&h_cg->css)) {
6d76dcf40   Aneesh Kumar K.V   hugetlb/cgroup: a...
233
234
235
236
  		rcu_read_unlock();
  		goto again;
  	}
  	rcu_read_unlock();
1adc4d419   Mina Almasry   hugetlb_cgroup: a...
237
238
239
  	if (!page_counter_try_charge(
  		    __hugetlb_cgroup_counter_from_cgroup(h_cg, idx, rsvd),
  		    nr_pages, &counter)) {
6071ca520   Johannes Weiner   mm: page_counter:...
240
  		ret = -ENOMEM;
726b7bbea   Mina Almasry   hugetlb_cgroup: f...
241
  		hugetlb_event(h_cg, idx, HUGETLB_MAX);
1adc4d419   Mina Almasry   hugetlb_cgroup: a...
242
243
  		css_put(&h_cg->css);
  		goto done;
faced7e08   Giuseppe Scrivano   mm: hugetlb contr...
244
  	}
1adc4d419   Mina Almasry   hugetlb_cgroup: a...
245
246
247
248
249
  	/* Reservations take a reference to the css because they do not get
  	 * reparented.
  	 */
  	if (!rsvd)
  		css_put(&h_cg->css);
6d76dcf40   Aneesh Kumar K.V   hugetlb/cgroup: a...
250
251
252
253
  done:
  	*ptr = h_cg;
  	return ret;
  }
1adc4d419   Mina Almasry   hugetlb_cgroup: a...
254
255
256
257
258
259
260
261
262
263
264
  int hugetlb_cgroup_charge_cgroup(int idx, unsigned long nr_pages,
  				 struct hugetlb_cgroup **ptr)
  {
  	return __hugetlb_cgroup_charge_cgroup(idx, nr_pages, ptr, false);
  }
  
  int hugetlb_cgroup_charge_cgroup_rsvd(int idx, unsigned long nr_pages,
  				      struct hugetlb_cgroup **ptr)
  {
  	return __hugetlb_cgroup_charge_cgroup(idx, nr_pages, ptr, true);
  }
94ae8ba71   Aneesh Kumar K.V   hugetlb/cgroup: a...
265
  /* Should be called with hugetlb_lock held */
1adc4d419   Mina Almasry   hugetlb_cgroup: a...
266
267
268
  static void __hugetlb_cgroup_commit_charge(int idx, unsigned long nr_pages,
  					   struct hugetlb_cgroup *h_cg,
  					   struct page *page, bool rsvd)
6d76dcf40   Aneesh Kumar K.V   hugetlb/cgroup: a...
269
270
271
  {
  	if (hugetlb_cgroup_disabled() || !h_cg)
  		return;
1adc4d419   Mina Almasry   hugetlb_cgroup: a...
272
  	__set_hugetlb_cgroup(page, h_cg, rsvd);
6d76dcf40   Aneesh Kumar K.V   hugetlb/cgroup: a...
273
274
  	return;
  }
1adc4d419   Mina Almasry   hugetlb_cgroup: a...
275
276
277
278
279
280
281
282
283
284
285
286
287
  void hugetlb_cgroup_commit_charge(int idx, unsigned long nr_pages,
  				  struct hugetlb_cgroup *h_cg,
  				  struct page *page)
  {
  	__hugetlb_cgroup_commit_charge(idx, nr_pages, h_cg, page, false);
  }
  
  void hugetlb_cgroup_commit_charge_rsvd(int idx, unsigned long nr_pages,
  				       struct hugetlb_cgroup *h_cg,
  				       struct page *page)
  {
  	__hugetlb_cgroup_commit_charge(idx, nr_pages, h_cg, page, true);
  }
6d76dcf40   Aneesh Kumar K.V   hugetlb/cgroup: a...
288
289
290
  /*
   * Should be called with hugetlb_lock held
   */
1adc4d419   Mina Almasry   hugetlb_cgroup: a...
291
292
  static void __hugetlb_cgroup_uncharge_page(int idx, unsigned long nr_pages,
  					   struct page *page, bool rsvd)
6d76dcf40   Aneesh Kumar K.V   hugetlb/cgroup: a...
293
294
  {
  	struct hugetlb_cgroup *h_cg;
6d76dcf40   Aneesh Kumar K.V   hugetlb/cgroup: a...
295
296
297
  
  	if (hugetlb_cgroup_disabled())
  		return;
7ea8574e5   Michal Hocko   hugetlb_cgroup: u...
298
  	lockdep_assert_held(&hugetlb_lock);
1adc4d419   Mina Almasry   hugetlb_cgroup: a...
299
  	h_cg = __hugetlb_cgroup_from_page(page, rsvd);
6d76dcf40   Aneesh Kumar K.V   hugetlb/cgroup: a...
300
301
  	if (unlikely(!h_cg))
  		return;
1adc4d419   Mina Almasry   hugetlb_cgroup: a...
302
303
304
305
306
307
308
309
  	__set_hugetlb_cgroup(page, NULL, rsvd);
  
  	page_counter_uncharge(__hugetlb_cgroup_counter_from_cgroup(h_cg, idx,
  								   rsvd),
  			      nr_pages);
  
  	if (rsvd)
  		css_put(&h_cg->css);
6d76dcf40   Aneesh Kumar K.V   hugetlb/cgroup: a...
310
311
  	return;
  }
1adc4d419   Mina Almasry   hugetlb_cgroup: a...
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
  void hugetlb_cgroup_uncharge_page(int idx, unsigned long nr_pages,
  				  struct page *page)
  {
  	__hugetlb_cgroup_uncharge_page(idx, nr_pages, page, false);
  }
  
  void hugetlb_cgroup_uncharge_page_rsvd(int idx, unsigned long nr_pages,
  				       struct page *page)
  {
  	__hugetlb_cgroup_uncharge_page(idx, nr_pages, page, true);
  }
  
  static void __hugetlb_cgroup_uncharge_cgroup(int idx, unsigned long nr_pages,
  					     struct hugetlb_cgroup *h_cg,
  					     bool rsvd)
6d76dcf40   Aneesh Kumar K.V   hugetlb/cgroup: a...
327
  {
6d76dcf40   Aneesh Kumar K.V   hugetlb/cgroup: a...
328
329
330
331
332
  	if (hugetlb_cgroup_disabled() || !h_cg)
  		return;
  
  	if (huge_page_order(&hstates[idx]) < HUGETLB_CGROUP_MIN_ORDER)
  		return;
1adc4d419   Mina Almasry   hugetlb_cgroup: a...
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
  	page_counter_uncharge(__hugetlb_cgroup_counter_from_cgroup(h_cg, idx,
  								   rsvd),
  			      nr_pages);
  
  	if (rsvd)
  		css_put(&h_cg->css);
  }
  
  void hugetlb_cgroup_uncharge_cgroup(int idx, unsigned long nr_pages,
  				    struct hugetlb_cgroup *h_cg)
  {
  	__hugetlb_cgroup_uncharge_cgroup(idx, nr_pages, h_cg, false);
  }
  
  void hugetlb_cgroup_uncharge_cgroup_rsvd(int idx, unsigned long nr_pages,
  					 struct hugetlb_cgroup *h_cg)
  {
  	__hugetlb_cgroup_uncharge_cgroup(idx, nr_pages, h_cg, true);
  }
e9fe92ae0   Mina Almasry   hugetlb_cgroup: a...
352
353
  void hugetlb_cgroup_uncharge_counter(struct resv_map *resv, unsigned long start,
  				     unsigned long end)
1adc4d419   Mina Almasry   hugetlb_cgroup: a...
354
  {
e9fe92ae0   Mina Almasry   hugetlb_cgroup: a...
355
356
  	if (hugetlb_cgroup_disabled() || !resv || !resv->reservation_counter ||
  	    !resv->css)
1adc4d419   Mina Almasry   hugetlb_cgroup: a...
357
  		return;
e9fe92ae0   Mina Almasry   hugetlb_cgroup: a...
358
359
360
  	page_counter_uncharge(resv->reservation_counter,
  			      (end - start) * resv->pages_per_hpage);
  	css_put(resv->css);
6d76dcf40   Aneesh Kumar K.V   hugetlb/cgroup: a...
361
  }
075a61d07   Mina Almasry   hugetlb_cgroup: a...
362
363
364
365
366
367
368
369
370
371
372
373
374
375
  void hugetlb_cgroup_uncharge_file_region(struct resv_map *resv,
  					 struct file_region *rg,
  					 unsigned long nr_pages)
  {
  	if (hugetlb_cgroup_disabled() || !resv || !rg || !nr_pages)
  		return;
  
  	if (rg->reservation_counter && resv->pages_per_hpage && nr_pages > 0 &&
  	    !resv->reservation_counter) {
  		page_counter_uncharge(rg->reservation_counter,
  				      nr_pages * resv->pages_per_hpage);
  		css_put(rg->css);
  	}
  }
71f87bee3   Johannes Weiner   mm: hugetlb_cgrou...
376
377
  enum {
  	RES_USAGE,
cdc2fcfea   Mina Almasry   hugetlb_cgroup: a...
378
  	RES_RSVD_USAGE,
71f87bee3   Johannes Weiner   mm: hugetlb_cgrou...
379
  	RES_LIMIT,
cdc2fcfea   Mina Almasry   hugetlb_cgroup: a...
380
  	RES_RSVD_LIMIT,
71f87bee3   Johannes Weiner   mm: hugetlb_cgrou...
381
  	RES_MAX_USAGE,
cdc2fcfea   Mina Almasry   hugetlb_cgroup: a...
382
  	RES_RSVD_MAX_USAGE,
71f87bee3   Johannes Weiner   mm: hugetlb_cgrou...
383
  	RES_FAILCNT,
cdc2fcfea   Mina Almasry   hugetlb_cgroup: a...
384
  	RES_RSVD_FAILCNT,
71f87bee3   Johannes Weiner   mm: hugetlb_cgrou...
385
  };
716f479d2   Tejun Heo   hugetlb_cgroup: c...
386
387
  static u64 hugetlb_cgroup_read_u64(struct cgroup_subsys_state *css,
  				   struct cftype *cft)
abb8206cb   Aneesh Kumar K.V   hugetlb/cgroup: a...
388
  {
71f87bee3   Johannes Weiner   mm: hugetlb_cgrou...
389
  	struct page_counter *counter;
cdc2fcfea   Mina Almasry   hugetlb_cgroup: a...
390
  	struct page_counter *rsvd_counter;
182446d08   Tejun Heo   cgroup: pass arou...
391
  	struct hugetlb_cgroup *h_cg = hugetlb_cgroup_from_css(css);
abb8206cb   Aneesh Kumar K.V   hugetlb/cgroup: a...
392

71f87bee3   Johannes Weiner   mm: hugetlb_cgrou...
393
  	counter = &h_cg->hugepage[MEMFILE_IDX(cft->private)];
cdc2fcfea   Mina Almasry   hugetlb_cgroup: a...
394
  	rsvd_counter = &h_cg->rsvd_hugepage[MEMFILE_IDX(cft->private)];
abb8206cb   Aneesh Kumar K.V   hugetlb/cgroup: a...
395

71f87bee3   Johannes Weiner   mm: hugetlb_cgrou...
396
397
398
  	switch (MEMFILE_ATTR(cft->private)) {
  	case RES_USAGE:
  		return (u64)page_counter_read(counter) * PAGE_SIZE;
cdc2fcfea   Mina Almasry   hugetlb_cgroup: a...
399
400
  	case RES_RSVD_USAGE:
  		return (u64)page_counter_read(rsvd_counter) * PAGE_SIZE;
71f87bee3   Johannes Weiner   mm: hugetlb_cgrou...
401
  	case RES_LIMIT:
bbec2e151   Roman Gushchin   mm: rename page_c...
402
  		return (u64)counter->max * PAGE_SIZE;
cdc2fcfea   Mina Almasry   hugetlb_cgroup: a...
403
404
  	case RES_RSVD_LIMIT:
  		return (u64)rsvd_counter->max * PAGE_SIZE;
71f87bee3   Johannes Weiner   mm: hugetlb_cgrou...
405
406
  	case RES_MAX_USAGE:
  		return (u64)counter->watermark * PAGE_SIZE;
cdc2fcfea   Mina Almasry   hugetlb_cgroup: a...
407
408
  	case RES_RSVD_MAX_USAGE:
  		return (u64)rsvd_counter->watermark * PAGE_SIZE;
71f87bee3   Johannes Weiner   mm: hugetlb_cgrou...
409
410
  	case RES_FAILCNT:
  		return counter->failcnt;
cdc2fcfea   Mina Almasry   hugetlb_cgroup: a...
411
412
  	case RES_RSVD_FAILCNT:
  		return rsvd_counter->failcnt;
71f87bee3   Johannes Weiner   mm: hugetlb_cgrou...
413
414
415
  	default:
  		BUG();
  	}
abb8206cb   Aneesh Kumar K.V   hugetlb/cgroup: a...
416
  }
faced7e08   Giuseppe Scrivano   mm: hugetlb contr...
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
  static int hugetlb_cgroup_read_u64_max(struct seq_file *seq, void *v)
  {
  	int idx;
  	u64 val;
  	struct cftype *cft = seq_cft(seq);
  	unsigned long limit;
  	struct page_counter *counter;
  	struct hugetlb_cgroup *h_cg = hugetlb_cgroup_from_css(seq_css(seq));
  
  	idx = MEMFILE_IDX(cft->private);
  	counter = &h_cg->hugepage[idx];
  
  	limit = round_down(PAGE_COUNTER_MAX,
  			   1 << huge_page_order(&hstates[idx]));
  
  	switch (MEMFILE_ATTR(cft->private)) {
cdc2fcfea   Mina Almasry   hugetlb_cgroup: a...
433
434
  	case RES_RSVD_USAGE:
  		counter = &h_cg->rsvd_hugepage[idx];
e4a9bc589   Joe Perches   mm: use fallthrough;
435
  		fallthrough;
faced7e08   Giuseppe Scrivano   mm: hugetlb contr...
436
437
438
439
440
  	case RES_USAGE:
  		val = (u64)page_counter_read(counter);
  		seq_printf(seq, "%llu
  ", val * PAGE_SIZE);
  		break;
cdc2fcfea   Mina Almasry   hugetlb_cgroup: a...
441
442
  	case RES_RSVD_LIMIT:
  		counter = &h_cg->rsvd_hugepage[idx];
e4a9bc589   Joe Perches   mm: use fallthrough;
443
  		fallthrough;
faced7e08   Giuseppe Scrivano   mm: hugetlb contr...
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
  	case RES_LIMIT:
  		val = (u64)counter->max;
  		if (val == limit)
  			seq_puts(seq, "max
  ");
  		else
  			seq_printf(seq, "%llu
  ", val * PAGE_SIZE);
  		break;
  	default:
  		BUG();
  	}
  
  	return 0;
  }
71f87bee3   Johannes Weiner   mm: hugetlb_cgrou...
459
  static DEFINE_MUTEX(hugetlb_limit_mutex);
451af504d   Tejun Heo   cgroup: replace c...
460
  static ssize_t hugetlb_cgroup_write(struct kernfs_open_file *of,
faced7e08   Giuseppe Scrivano   mm: hugetlb contr...
461
462
  				    char *buf, size_t nbytes, loff_t off,
  				    const char *max)
abb8206cb   Aneesh Kumar K.V   hugetlb/cgroup: a...
463
  {
71f87bee3   Johannes Weiner   mm: hugetlb_cgrou...
464
465
  	int ret, idx;
  	unsigned long nr_pages;
451af504d   Tejun Heo   cgroup: replace c...
466
  	struct hugetlb_cgroup *h_cg = hugetlb_cgroup_from_css(of_css(of));
cdc2fcfea   Mina Almasry   hugetlb_cgroup: a...
467
  	bool rsvd = false;
abb8206cb   Aneesh Kumar K.V   hugetlb/cgroup: a...
468

71f87bee3   Johannes Weiner   mm: hugetlb_cgrou...
469
470
  	if (hugetlb_cgroup_is_root(h_cg)) /* Can't set limit on root */
  		return -EINVAL;
451af504d   Tejun Heo   cgroup: replace c...
471
  	buf = strstrip(buf);
faced7e08   Giuseppe Scrivano   mm: hugetlb contr...
472
  	ret = page_counter_memparse(buf, max, &nr_pages);
71f87bee3   Johannes Weiner   mm: hugetlb_cgrou...
473
474
  	if (ret)
  		return ret;
451af504d   Tejun Heo   cgroup: replace c...
475
  	idx = MEMFILE_IDX(of_cft(of)->private);
297880f4a   David Rientjes   mm, hugetlb_cgrou...
476
  	nr_pages = round_down(nr_pages, 1 << huge_page_order(&hstates[idx]));
abb8206cb   Aneesh Kumar K.V   hugetlb/cgroup: a...
477

71f87bee3   Johannes Weiner   mm: hugetlb_cgrou...
478
  	switch (MEMFILE_ATTR(of_cft(of)->private)) {
cdc2fcfea   Mina Almasry   hugetlb_cgroup: a...
479
480
  	case RES_RSVD_LIMIT:
  		rsvd = true;
e4a9bc589   Joe Perches   mm: use fallthrough;
481
  		fallthrough;
abb8206cb   Aneesh Kumar K.V   hugetlb/cgroup: a...
482
  	case RES_LIMIT:
71f87bee3   Johannes Weiner   mm: hugetlb_cgrou...
483
  		mutex_lock(&hugetlb_limit_mutex);
cdc2fcfea   Mina Almasry   hugetlb_cgroup: a...
484
  		ret = page_counter_set_max(
1adc4d419   Mina Almasry   hugetlb_cgroup: a...
485
  			__hugetlb_cgroup_counter_from_cgroup(h_cg, idx, rsvd),
cdc2fcfea   Mina Almasry   hugetlb_cgroup: a...
486
  			nr_pages);
71f87bee3   Johannes Weiner   mm: hugetlb_cgrou...
487
  		mutex_unlock(&hugetlb_limit_mutex);
abb8206cb   Aneesh Kumar K.V   hugetlb/cgroup: a...
488
489
490
491
492
  		break;
  	default:
  		ret = -EINVAL;
  		break;
  	}
451af504d   Tejun Heo   cgroup: replace c...
493
  	return ret ?: nbytes;
abb8206cb   Aneesh Kumar K.V   hugetlb/cgroup: a...
494
  }
faced7e08   Giuseppe Scrivano   mm: hugetlb contr...
495
496
497
498
499
500
501
502
503
504
505
  static ssize_t hugetlb_cgroup_write_legacy(struct kernfs_open_file *of,
  					   char *buf, size_t nbytes, loff_t off)
  {
  	return hugetlb_cgroup_write(of, buf, nbytes, off, "-1");
  }
  
  static ssize_t hugetlb_cgroup_write_dfl(struct kernfs_open_file *of,
  					char *buf, size_t nbytes, loff_t off)
  {
  	return hugetlb_cgroup_write(of, buf, nbytes, off, "max");
  }
6770c64e5   Tejun Heo   cgroup: replace c...
506
507
  static ssize_t hugetlb_cgroup_reset(struct kernfs_open_file *of,
  				    char *buf, size_t nbytes, loff_t off)
abb8206cb   Aneesh Kumar K.V   hugetlb/cgroup: a...
508
  {
71f87bee3   Johannes Weiner   mm: hugetlb_cgrou...
509
  	int ret = 0;
cdc2fcfea   Mina Almasry   hugetlb_cgroup: a...
510
  	struct page_counter *counter, *rsvd_counter;
6770c64e5   Tejun Heo   cgroup: replace c...
511
  	struct hugetlb_cgroup *h_cg = hugetlb_cgroup_from_css(of_css(of));
abb8206cb   Aneesh Kumar K.V   hugetlb/cgroup: a...
512

71f87bee3   Johannes Weiner   mm: hugetlb_cgrou...
513
  	counter = &h_cg->hugepage[MEMFILE_IDX(of_cft(of)->private)];
cdc2fcfea   Mina Almasry   hugetlb_cgroup: a...
514
  	rsvd_counter = &h_cg->rsvd_hugepage[MEMFILE_IDX(of_cft(of)->private)];
abb8206cb   Aneesh Kumar K.V   hugetlb/cgroup: a...
515

71f87bee3   Johannes Weiner   mm: hugetlb_cgrou...
516
  	switch (MEMFILE_ATTR(of_cft(of)->private)) {
abb8206cb   Aneesh Kumar K.V   hugetlb/cgroup: a...
517
  	case RES_MAX_USAGE:
71f87bee3   Johannes Weiner   mm: hugetlb_cgrou...
518
  		page_counter_reset_watermark(counter);
abb8206cb   Aneesh Kumar K.V   hugetlb/cgroup: a...
519
  		break;
cdc2fcfea   Mina Almasry   hugetlb_cgroup: a...
520
521
522
  	case RES_RSVD_MAX_USAGE:
  		page_counter_reset_watermark(rsvd_counter);
  		break;
abb8206cb   Aneesh Kumar K.V   hugetlb/cgroup: a...
523
  	case RES_FAILCNT:
71f87bee3   Johannes Weiner   mm: hugetlb_cgrou...
524
  		counter->failcnt = 0;
abb8206cb   Aneesh Kumar K.V   hugetlb/cgroup: a...
525
  		break;
cdc2fcfea   Mina Almasry   hugetlb_cgroup: a...
526
527
528
  	case RES_RSVD_FAILCNT:
  		rsvd_counter->failcnt = 0;
  		break;
abb8206cb   Aneesh Kumar K.V   hugetlb/cgroup: a...
529
530
531
532
  	default:
  		ret = -EINVAL;
  		break;
  	}
6770c64e5   Tejun Heo   cgroup: replace c...
533
  	return ret ?: nbytes;
abb8206cb   Aneesh Kumar K.V   hugetlb/cgroup: a...
534
535
536
537
538
539
540
541
542
543
544
545
  }
  
  static char *mem_fmt(char *buf, int size, unsigned long hsize)
  {
  	if (hsize >= (1UL << 30))
  		snprintf(buf, size, "%luGB", hsize >> 30);
  	else if (hsize >= (1UL << 20))
  		snprintf(buf, size, "%luMB", hsize >> 20);
  	else
  		snprintf(buf, size, "%luKB", hsize >> 10);
  	return buf;
  }
faced7e08   Giuseppe Scrivano   mm: hugetlb contr...
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
  static int __hugetlb_events_show(struct seq_file *seq, bool local)
  {
  	int idx;
  	long max;
  	struct cftype *cft = seq_cft(seq);
  	struct hugetlb_cgroup *h_cg = hugetlb_cgroup_from_css(seq_css(seq));
  
  	idx = MEMFILE_IDX(cft->private);
  
  	if (local)
  		max = atomic_long_read(&h_cg->events_local[idx][HUGETLB_MAX]);
  	else
  		max = atomic_long_read(&h_cg->events[idx][HUGETLB_MAX]);
  
  	seq_printf(seq, "max %lu
  ", max);
  
  	return 0;
  }
  
  static int hugetlb_events_show(struct seq_file *seq, void *v)
  {
  	return __hugetlb_events_show(seq, false);
  }
  
  static int hugetlb_events_local_show(struct seq_file *seq, void *v)
  {
  	return __hugetlb_events_show(seq, true);
  }
  
  static void __init __hugetlb_cgroup_file_dfl_init(int idx)
abb8206cb   Aneesh Kumar K.V   hugetlb/cgroup: a...
577
578
579
580
581
582
  {
  	char buf[32];
  	struct cftype *cft;
  	struct hstate *h = &hstates[idx];
  
  	/* format the size */
cdc2fcfea   Mina Almasry   hugetlb_cgroup: a...
583
  	mem_fmt(buf, sizeof(buf), huge_page_size(h));
abb8206cb   Aneesh Kumar K.V   hugetlb/cgroup: a...
584
585
  
  	/* Add the limit file */
faced7e08   Giuseppe Scrivano   mm: hugetlb contr...
586
587
588
589
590
591
  	cft = &h->cgroup_files_dfl[0];
  	snprintf(cft->name, MAX_CFTYPE_NAME, "%s.max", buf);
  	cft->private = MEMFILE_PRIVATE(idx, RES_LIMIT);
  	cft->seq_show = hugetlb_cgroup_read_u64_max;
  	cft->write = hugetlb_cgroup_write_dfl;
  	cft->flags = CFTYPE_NOT_ON_ROOT;
cdc2fcfea   Mina Almasry   hugetlb_cgroup: a...
592
  	/* Add the reservation limit file */
faced7e08   Giuseppe Scrivano   mm: hugetlb contr...
593
  	cft = &h->cgroup_files_dfl[1];
cdc2fcfea   Mina Almasry   hugetlb_cgroup: a...
594
595
596
597
598
599
600
601
  	snprintf(cft->name, MAX_CFTYPE_NAME, "%s.rsvd.max", buf);
  	cft->private = MEMFILE_PRIVATE(idx, RES_RSVD_LIMIT);
  	cft->seq_show = hugetlb_cgroup_read_u64_max;
  	cft->write = hugetlb_cgroup_write_dfl;
  	cft->flags = CFTYPE_NOT_ON_ROOT;
  
  	/* Add the current usage file */
  	cft = &h->cgroup_files_dfl[2];
faced7e08   Giuseppe Scrivano   mm: hugetlb contr...
602
603
604
605
  	snprintf(cft->name, MAX_CFTYPE_NAME, "%s.current", buf);
  	cft->private = MEMFILE_PRIVATE(idx, RES_USAGE);
  	cft->seq_show = hugetlb_cgroup_read_u64_max;
  	cft->flags = CFTYPE_NOT_ON_ROOT;
cdc2fcfea   Mina Almasry   hugetlb_cgroup: a...
606
607
608
609
610
611
  	/* Add the current reservation usage file */
  	cft = &h->cgroup_files_dfl[3];
  	snprintf(cft->name, MAX_CFTYPE_NAME, "%s.rsvd.current", buf);
  	cft->private = MEMFILE_PRIVATE(idx, RES_RSVD_USAGE);
  	cft->seq_show = hugetlb_cgroup_read_u64_max;
  	cft->flags = CFTYPE_NOT_ON_ROOT;
faced7e08   Giuseppe Scrivano   mm: hugetlb contr...
612
  	/* Add the events file */
cdc2fcfea   Mina Almasry   hugetlb_cgroup: a...
613
  	cft = &h->cgroup_files_dfl[4];
faced7e08   Giuseppe Scrivano   mm: hugetlb contr...
614
615
616
  	snprintf(cft->name, MAX_CFTYPE_NAME, "%s.events", buf);
  	cft->private = MEMFILE_PRIVATE(idx, 0);
  	cft->seq_show = hugetlb_events_show;
d5a169597   Xu Wang   hugetlb_cgroup: c...
617
  	cft->file_offset = offsetof(struct hugetlb_cgroup, events_file[idx]);
faced7e08   Giuseppe Scrivano   mm: hugetlb contr...
618
619
620
  	cft->flags = CFTYPE_NOT_ON_ROOT;
  
  	/* Add the events.local file */
cdc2fcfea   Mina Almasry   hugetlb_cgroup: a...
621
  	cft = &h->cgroup_files_dfl[5];
faced7e08   Giuseppe Scrivano   mm: hugetlb contr...
622
623
624
625
  	snprintf(cft->name, MAX_CFTYPE_NAME, "%s.events.local", buf);
  	cft->private = MEMFILE_PRIVATE(idx, 0);
  	cft->seq_show = hugetlb_events_local_show;
  	cft->file_offset = offsetof(struct hugetlb_cgroup,
d5a169597   Xu Wang   hugetlb_cgroup: c...
626
  				    events_local_file[idx]);
faced7e08   Giuseppe Scrivano   mm: hugetlb contr...
627
628
629
  	cft->flags = CFTYPE_NOT_ON_ROOT;
  
  	/* NULL terminate the last cft */
cdc2fcfea   Mina Almasry   hugetlb_cgroup: a...
630
  	cft = &h->cgroup_files_dfl[6];
faced7e08   Giuseppe Scrivano   mm: hugetlb contr...
631
632
633
634
635
636
637
638
639
640
641
642
643
  	memset(cft, 0, sizeof(*cft));
  
  	WARN_ON(cgroup_add_dfl_cftypes(&hugetlb_cgrp_subsys,
  				       h->cgroup_files_dfl));
  }
  
  static void __init __hugetlb_cgroup_file_legacy_init(int idx)
  {
  	char buf[32];
  	struct cftype *cft;
  	struct hstate *h = &hstates[idx];
  
  	/* format the size */
cdc2fcfea   Mina Almasry   hugetlb_cgroup: a...
644
  	mem_fmt(buf, sizeof(buf), huge_page_size(h));
faced7e08   Giuseppe Scrivano   mm: hugetlb contr...
645
646
647
  
  	/* Add the limit file */
  	cft = &h->cgroup_files_legacy[0];
abb8206cb   Aneesh Kumar K.V   hugetlb/cgroup: a...
648
649
  	snprintf(cft->name, MAX_CFTYPE_NAME, "%s.limit_in_bytes", buf);
  	cft->private = MEMFILE_PRIVATE(idx, RES_LIMIT);
716f479d2   Tejun Heo   hugetlb_cgroup: c...
650
  	cft->read_u64 = hugetlb_cgroup_read_u64;
faced7e08   Giuseppe Scrivano   mm: hugetlb contr...
651
  	cft->write = hugetlb_cgroup_write_legacy;
abb8206cb   Aneesh Kumar K.V   hugetlb/cgroup: a...
652

cdc2fcfea   Mina Almasry   hugetlb_cgroup: a...
653
  	/* Add the reservation limit file */
faced7e08   Giuseppe Scrivano   mm: hugetlb contr...
654
  	cft = &h->cgroup_files_legacy[1];
cdc2fcfea   Mina Almasry   hugetlb_cgroup: a...
655
656
657
658
659
660
661
  	snprintf(cft->name, MAX_CFTYPE_NAME, "%s.rsvd.limit_in_bytes", buf);
  	cft->private = MEMFILE_PRIVATE(idx, RES_RSVD_LIMIT);
  	cft->read_u64 = hugetlb_cgroup_read_u64;
  	cft->write = hugetlb_cgroup_write_legacy;
  
  	/* Add the usage file */
  	cft = &h->cgroup_files_legacy[2];
abb8206cb   Aneesh Kumar K.V   hugetlb/cgroup: a...
662
663
  	snprintf(cft->name, MAX_CFTYPE_NAME, "%s.usage_in_bytes", buf);
  	cft->private = MEMFILE_PRIVATE(idx, RES_USAGE);
716f479d2   Tejun Heo   hugetlb_cgroup: c...
664
  	cft->read_u64 = hugetlb_cgroup_read_u64;
abb8206cb   Aneesh Kumar K.V   hugetlb/cgroup: a...
665

cdc2fcfea   Mina Almasry   hugetlb_cgroup: a...
666
667
668
669
670
  	/* Add the reservation usage file */
  	cft = &h->cgroup_files_legacy[3];
  	snprintf(cft->name, MAX_CFTYPE_NAME, "%s.rsvd.usage_in_bytes", buf);
  	cft->private = MEMFILE_PRIVATE(idx, RES_RSVD_USAGE);
  	cft->read_u64 = hugetlb_cgroup_read_u64;
abb8206cb   Aneesh Kumar K.V   hugetlb/cgroup: a...
671
  	/* Add the MAX usage file */
cdc2fcfea   Mina Almasry   hugetlb_cgroup: a...
672
  	cft = &h->cgroup_files_legacy[4];
abb8206cb   Aneesh Kumar K.V   hugetlb/cgroup: a...
673
674
  	snprintf(cft->name, MAX_CFTYPE_NAME, "%s.max_usage_in_bytes", buf);
  	cft->private = MEMFILE_PRIVATE(idx, RES_MAX_USAGE);
6770c64e5   Tejun Heo   cgroup: replace c...
675
  	cft->write = hugetlb_cgroup_reset;
716f479d2   Tejun Heo   hugetlb_cgroup: c...
676
  	cft->read_u64 = hugetlb_cgroup_read_u64;
abb8206cb   Aneesh Kumar K.V   hugetlb/cgroup: a...
677

cdc2fcfea   Mina Almasry   hugetlb_cgroup: a...
678
679
680
681
682
683
  	/* Add the MAX reservation usage file */
  	cft = &h->cgroup_files_legacy[5];
  	snprintf(cft->name, MAX_CFTYPE_NAME, "%s.rsvd.max_usage_in_bytes", buf);
  	cft->private = MEMFILE_PRIVATE(idx, RES_RSVD_MAX_USAGE);
  	cft->write = hugetlb_cgroup_reset;
  	cft->read_u64 = hugetlb_cgroup_read_u64;
abb8206cb   Aneesh Kumar K.V   hugetlb/cgroup: a...
684
  	/* Add the failcntfile */
cdc2fcfea   Mina Almasry   hugetlb_cgroup: a...
685
  	cft = &h->cgroup_files_legacy[6];
abb8206cb   Aneesh Kumar K.V   hugetlb/cgroup: a...
686
  	snprintf(cft->name, MAX_CFTYPE_NAME, "%s.failcnt", buf);
cdc2fcfea   Mina Almasry   hugetlb_cgroup: a...
687
688
689
690
691
692
693
694
  	cft->private = MEMFILE_PRIVATE(idx, RES_FAILCNT);
  	cft->write = hugetlb_cgroup_reset;
  	cft->read_u64 = hugetlb_cgroup_read_u64;
  
  	/* Add the reservation failcntfile */
  	cft = &h->cgroup_files_legacy[7];
  	snprintf(cft->name, MAX_CFTYPE_NAME, "%s.rsvd.failcnt", buf);
  	cft->private = MEMFILE_PRIVATE(idx, RES_RSVD_FAILCNT);
6770c64e5   Tejun Heo   cgroup: replace c...
695
  	cft->write = hugetlb_cgroup_reset;
716f479d2   Tejun Heo   hugetlb_cgroup: c...
696
  	cft->read_u64 = hugetlb_cgroup_read_u64;
abb8206cb   Aneesh Kumar K.V   hugetlb/cgroup: a...
697
698
  
  	/* NULL terminate the last cft */
cdc2fcfea   Mina Almasry   hugetlb_cgroup: a...
699
  	cft = &h->cgroup_files_legacy[8];
abb8206cb   Aneesh Kumar K.V   hugetlb/cgroup: a...
700
  	memset(cft, 0, sizeof(*cft));
2cf669a58   Tejun Heo   cgroup: replace c...
701
  	WARN_ON(cgroup_add_legacy_cftypes(&hugetlb_cgrp_subsys,
faced7e08   Giuseppe Scrivano   mm: hugetlb contr...
702
703
704
705
706
707
708
  					  h->cgroup_files_legacy));
  }
  
  static void __init __hugetlb_cgroup_file_init(int idx)
  {
  	__hugetlb_cgroup_file_dfl_init(idx);
  	__hugetlb_cgroup_file_legacy_init(idx);
7179e7bf4   Jianguo Wu   mm/hugetlb: creat...
709
710
711
712
713
714
715
716
717
718
  }
  
  void __init hugetlb_cgroup_file_init(void)
  {
  	struct hstate *h;
  
  	for_each_hstate(h) {
  		/*
  		 * Add cgroup control files only if the huge page consists
  		 * of more than two normal pages. This is because we use
1d798ca3f   Kirill A. Shutemov   mm: make compound...
719
  		 * page[2].private for storing cgroup details.
7179e7bf4   Jianguo Wu   mm/hugetlb: creat...
720
721
722
723
  		 */
  		if (huge_page_order(h) >= HUGETLB_CGROUP_MIN_ORDER)
  			__hugetlb_cgroup_file_init(hstate_index(h));
  	}
abb8206cb   Aneesh Kumar K.V   hugetlb/cgroup: a...
724
  }
75754681f   Aneesh Kumar K.V   hugetlb/cgroup: r...
725
726
727
728
  /*
   * hugetlb_lock will make sure a parallel cgroup rmdir won't happen
   * when we migrate hugepages
   */
8e6ac7fab   Aneesh Kumar K.V   hugetlb/cgroup: m...
729
730
731
  void hugetlb_cgroup_migrate(struct page *oldhpage, struct page *newhpage)
  {
  	struct hugetlb_cgroup *h_cg;
1adc4d419   Mina Almasry   hugetlb_cgroup: a...
732
  	struct hugetlb_cgroup *h_cg_rsvd;
94ae8ba71   Aneesh Kumar K.V   hugetlb/cgroup: a...
733
  	struct hstate *h = page_hstate(oldhpage);
8e6ac7fab   Aneesh Kumar K.V   hugetlb/cgroup: m...
734
735
736
  
  	if (hugetlb_cgroup_disabled())
  		return;
309381fea   Sasha Levin   mm: dump page whe...
737
  	VM_BUG_ON_PAGE(!PageHuge(oldhpage), oldhpage);
8e6ac7fab   Aneesh Kumar K.V   hugetlb/cgroup: m...
738
739
  	spin_lock(&hugetlb_lock);
  	h_cg = hugetlb_cgroup_from_page(oldhpage);
1adc4d419   Mina Almasry   hugetlb_cgroup: a...
740
  	h_cg_rsvd = hugetlb_cgroup_from_page_rsvd(oldhpage);
8e6ac7fab   Aneesh Kumar K.V   hugetlb/cgroup: m...
741
  	set_hugetlb_cgroup(oldhpage, NULL);
9808895e1   Mina Almasry   mm/hugetlb_cgroup...
742
  	set_hugetlb_cgroup_rsvd(oldhpage, NULL);
8e6ac7fab   Aneesh Kumar K.V   hugetlb/cgroup: m...
743
744
  
  	/* move the h_cg details to new cgroup */
9808895e1   Mina Almasry   mm/hugetlb_cgroup...
745
  	set_hugetlb_cgroup(newhpage, h_cg);
1adc4d419   Mina Almasry   hugetlb_cgroup: a...
746
  	set_hugetlb_cgroup_rsvd(newhpage, h_cg_rsvd);
94ae8ba71   Aneesh Kumar K.V   hugetlb/cgroup: a...
747
  	list_move(&newhpage->lru, &h->hugepage_activelist);
8e6ac7fab   Aneesh Kumar K.V   hugetlb/cgroup: m...
748
  	spin_unlock(&hugetlb_lock);
8e6ac7fab   Aneesh Kumar K.V   hugetlb/cgroup: m...
749
750
  	return;
  }
faced7e08   Giuseppe Scrivano   mm: hugetlb contr...
751
752
753
  static struct cftype hugetlb_files[] = {
  	{} /* terminate */
  };
073219e99   Tejun Heo   cgroup: clean up ...
754
  struct cgroup_subsys hugetlb_cgrp_subsys = {
92fb97487   Tejun Heo   cgroup: rename ->...
755
756
757
  	.css_alloc	= hugetlb_cgroup_css_alloc,
  	.css_offline	= hugetlb_cgroup_css_offline,
  	.css_free	= hugetlb_cgroup_css_free,
faced7e08   Giuseppe Scrivano   mm: hugetlb contr...
758
759
  	.dfl_cftypes	= hugetlb_files,
  	.legacy_cftypes	= hugetlb_files,
2bc64a204   Aneesh Kumar K.V   mm/hugetlb: add n...
760
  };