Blame view

include/linux/memcontrol.h 11 KB
8cdea7c05   Balbir Singh   Memory controller...
1
2
3
4
5
  /* memcontrol.h - Memory Controller
   *
   * Copyright IBM Corporation, 2007
   * Author Balbir Singh <balbir@linux.vnet.ibm.com>
   *
78fb74669   Pavel Emelianov   Memory controller...
6
7
8
   * Copyright 2007 OpenVZ SWsoft Inc
   * Author: Pavel Emelianov <xemul@openvz.org>
   *
8cdea7c05   Balbir Singh   Memory controller...
9
10
11
12
13
14
15
16
17
18
19
20
21
   * This program is free software; you can redistribute it and/or modify
   * it under the terms of the GNU General Public License as published by
   * the Free Software Foundation; either version 2 of the License, or
   * (at your option) any later version.
   *
   * This program is distributed in the hope that it will be useful,
   * but WITHOUT ANY WARRANTY; without even the implied warranty of
   * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
   * GNU General Public License for more details.
   */
  
  #ifndef _LINUX_MEMCONTROL_H
  #define _LINUX_MEMCONTROL_H
f8d665422   Hirokazu Takahashi   memcg: add mem_cg...
22
  #include <linux/cgroup.h>
456f998ec   Ying Han   memcg: add the pa...
23
  #include <linux/vm_event_item.h>
78fb74669   Pavel Emelianov   Memory controller...
24
25
  struct mem_cgroup;
  struct page_cgroup;
8697d3319   Balbir Singh   Memory controller...
26
27
  struct page;
  struct mm_struct;
78fb74669   Pavel Emelianov   Memory controller...
28

2a7106f2c   Greg Thelen   memcg: create ext...
29
30
31
32
  /* Stats that can be updated by kernel. */
  enum mem_cgroup_page_stat_item {
  	MEMCG_NR_FILE_MAPPED, /* # of pages charged as file rss */
  };
5660048cc   Johannes Weiner   mm: move memcg hi...
33
34
35
36
37
  struct mem_cgroup_reclaim_cookie {
  	struct zone *zone;
  	int priority;
  	unsigned int generation;
  };
00f0b8259   Balbir Singh   Memory controller...
38
  #ifdef CONFIG_CGROUP_MEM_RES_CTLR
2c26fdd70   KAMEZAWA Hiroyuki   memcg: revert gfp...
39
40
41
42
43
44
45
46
47
48
  /*
   * All "charge" functions with gfp_mask should use GFP_KERNEL or
   * (gfp_mask & GFP_RECLAIM_MASK). In current implementatin, memcg doesn't
   * alloc memory but reclaims memory from all available zones. So, "where I want
   * memory from" bits of gfp_mask has no meaning. So any bits of that field is
   * available but adding a rule is better. charge functions' gfp_mask should
   * be set to GFP_KERNEL or gfp_mask & GFP_RECLAIM_MASK for avoiding ambiguous
   * codes.
   * (Of course, if memcg does memory allocation in future, GFP_KERNEL is sane.)
   */
78fb74669   Pavel Emelianov   Memory controller...
49

7a81b88cb   KAMEZAWA Hiroyuki   memcg: introduce ...
50
  extern int mem_cgroup_newpage_charge(struct page *page, struct mm_struct *mm,
e1a1cd590   Balbir Singh   Memory controller...
51
  				gfp_t gfp_mask);
7a81b88cb   KAMEZAWA Hiroyuki   memcg: introduce ...
52
  /* for swap handling */
8c7c6e34a   KAMEZAWA Hiroyuki   memcg: mem+swap c...
53
  extern int mem_cgroup_try_charge_swapin(struct mm_struct *mm,
72835c86c   Johannes Weiner   mm: unify remaini...
54
  		struct page *page, gfp_t mask, struct mem_cgroup **memcgp);
7a81b88cb   KAMEZAWA Hiroyuki   memcg: introduce ...
55
  extern void mem_cgroup_commit_charge_swapin(struct page *page,
72835c86c   Johannes Weiner   mm: unify remaini...
56
57
  					struct mem_cgroup *memcg);
  extern void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *memcg);
7a81b88cb   KAMEZAWA Hiroyuki   memcg: introduce ...
58

8289546e5   Hugh Dickins   memcg: remove mem...
59
60
  extern int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm,
  					gfp_t gfp_mask);
925b7673c   Johannes Weiner   mm: make per-memc...
61
62
63
64
65
66
67
68
  
  struct lruvec *mem_cgroup_zone_lruvec(struct zone *, struct mem_cgroup *);
  struct lruvec *mem_cgroup_lru_add_list(struct zone *, struct page *,
  				       enum lru_list);
  void mem_cgroup_lru_del_list(struct page *, enum lru_list);
  void mem_cgroup_lru_del(struct page *);
  struct lruvec *mem_cgroup_lru_move_lists(struct zone *, struct page *,
  					 enum lru_list, enum lru_list);
569b846df   KAMEZAWA Hiroyuki   memcg: coalesce u...
69
70
71
72
  
  /* For coalescing uncharge for reducing memcg' overhead*/
  extern void mem_cgroup_uncharge_start(void);
  extern void mem_cgroup_uncharge_end(void);
3c541e14b   Balbir Singh   Memory controller...
73
  extern void mem_cgroup_uncharge_page(struct page *page);
69029cd55   KAMEZAWA Hiroyuki   memcg: remove ref...
74
  extern void mem_cgroup_uncharge_cache_page(struct page *page);
c9b0ed514   KAMEZAWA Hiroyuki   memcg: helper fun...
75

c0ff4b854   Raghavendra K T   memcg: rename mem...
76
77
  extern void mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask);
  int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *memcg);
3062fc67d   David Rientjes   memcontrol: move ...
78

e42d9d5d4   Wu Fengguang   memcg: rename and...
79
  extern struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page);
cf475ad28   Balbir Singh   cgroups: add an o...
80
  extern struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p);
a433658c3   KOSAKI Motohiro   vmscan,memcg: mem...
81
  extern struct mem_cgroup *try_get_mem_cgroup_from_mm(struct mm_struct *mm);
cf475ad28   Balbir Singh   cgroups: add an o...
82

e1aab161e   Glauber Costa   socket: initial c...
83
  extern struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg);
d1a4c0b37   Glauber Costa   tcp memory pressu...
84
  extern struct mem_cgroup *mem_cgroup_from_cont(struct cgroup *cont);
e1aab161e   Glauber Costa   socket: initial c...
85

2e4d40915   Lai Jiangshan   memcontrol: rcu_r...
86
87
88
  static inline
  int mm_match_cgroup(const struct mm_struct *mm, const struct mem_cgroup *cgroup)
  {
c0ff4b854   Raghavendra K T   memcg: rename mem...
89
  	struct mem_cgroup *memcg;
2e4d40915   Lai Jiangshan   memcontrol: rcu_r...
90
  	rcu_read_lock();
c0ff4b854   Raghavendra K T   memcg: rename mem...
91
  	memcg = mem_cgroup_from_task(rcu_dereference((mm)->owner));
2e4d40915   Lai Jiangshan   memcontrol: rcu_r...
92
  	rcu_read_unlock();
c0ff4b854   Raghavendra K T   memcg: rename mem...
93
  	return cgroup == memcg;
2e4d40915   Lai Jiangshan   memcontrol: rcu_r...
94
  }
8a9f3ccd2   Balbir Singh   Memory controller...
95

c0ff4b854   Raghavendra K T   memcg: rename mem...
96
  extern struct cgroup_subsys_state *mem_cgroup_css(struct mem_cgroup *memcg);
d324236b3   Wu Fengguang   memcg: add access...
97

e8589cc18   KAMEZAWA Hiroyuki   memcg: better mig...
98
  extern int
ac39cf8cb   akpm@linux-foundation.org   memcg: fix mis-ac...
99
  mem_cgroup_prepare_migration(struct page *page,
72835c86c   Johannes Weiner   mm: unify remaini...
100
  	struct page *newpage, struct mem_cgroup **memcgp, gfp_t gfp_mask);
c0ff4b854   Raghavendra K T   memcg: rename mem...
101
  extern void mem_cgroup_end_migration(struct mem_cgroup *memcg,
50de1dd96   Daisuke Nishimura   memcg: fix memory...
102
  	struct page *oldpage, struct page *newpage, bool migration_ok);
ae41be374   KAMEZAWA Hiroyuki   bugfix for memory...
103

5660048cc   Johannes Weiner   mm: move memcg hi...
104
105
106
107
  struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *,
  				   struct mem_cgroup *,
  				   struct mem_cgroup_reclaim_cookie *);
  void mem_cgroup_iter_break(struct mem_cgroup *, struct mem_cgroup *);
58ae83db2   KAMEZAWA Hiroyuki   per-zone and recl...
108
109
110
  /*
   * For memory reclaim.
   */
9b272977e   Johannes Weiner   memcg: skip scann...
111
112
113
114
  int mem_cgroup_inactive_anon_is_low(struct mem_cgroup *memcg,
  				    struct zone *zone);
  int mem_cgroup_inactive_file_is_low(struct mem_cgroup *memcg,
  				    struct zone *zone);
889976dbc   Ying Han   memcg: reclaim me...
115
  int mem_cgroup_select_victim_node(struct mem_cgroup *memcg);
1bac180bd   Ying Han   memcg: rename mem...
116
  unsigned long mem_cgroup_zone_nr_lru_pages(struct mem_cgroup *memcg,
bb2a0de92   KAMEZAWA Hiroyuki   memcg: consolidat...
117
  					int nid, int zid, unsigned int lrumask);
3e2f41f1f   KOSAKI Motohiro   memcg: add zone_r...
118
119
120
121
  struct zone_reclaim_stat *mem_cgroup_get_reclaim_stat(struct mem_cgroup *memcg,
  						      struct zone *zone);
  struct zone_reclaim_stat*
  mem_cgroup_get_reclaim_stat_from_page(struct page *page);
e222432bf   Balbir Singh   memcg: show memcg...
122
123
  extern void mem_cgroup_print_oom_info(struct mem_cgroup *memcg,
  					struct task_struct *p);
ab936cbcd   KAMEZAWA Hiroyuki   memcg: add mem_cg...
124
125
  extern void mem_cgroup_replace_page_cache(struct page *oldpage,
  					struct page *newpage);
58ae83db2   KAMEZAWA Hiroyuki   per-zone and recl...
126

4e5f01c2b   KAMEZAWA Hiroyuki   memcg: clear pc->...
127
  extern void mem_cgroup_reset_owner(struct page *page);
c077719be   KAMEZAWA Hiroyuki   memcg: mem+swap c...
128
129
130
  #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
  extern int do_swap_account;
  #endif
f8d665422   Hirokazu Takahashi   memcg: add mem_cg...
131
132
133
134
135
136
137
  
  static inline bool mem_cgroup_disabled(void)
  {
  	if (mem_cgroup_subsys.disabled)
  		return true;
  	return false;
  }
2a7106f2c   Greg Thelen   memcg: create ext...
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
  void mem_cgroup_update_page_stat(struct page *page,
  				 enum mem_cgroup_page_stat_item idx,
  				 int val);
  
  static inline void mem_cgroup_inc_page_stat(struct page *page,
  					    enum mem_cgroup_page_stat_item idx)
  {
  	mem_cgroup_update_page_stat(page, idx, 1);
  }
  
  static inline void mem_cgroup_dec_page_stat(struct page *page,
  					    enum mem_cgroup_page_stat_item idx)
  {
  	mem_cgroup_update_page_stat(page, idx, -1);
  }
4e4169535   Balbir Singh   memory controller...
153
  unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
0ae5e89c6   Ying Han   memcg: count the ...
154
155
  						gfp_t gfp_mask,
  						unsigned long *total_scanned);
c0ff4b854   Raghavendra K T   memcg: rename mem...
156
  u64 mem_cgroup_get_limit(struct mem_cgroup *memcg);
a63d83f42   David Rientjes   oom: badness heur...
157

456f998ec   Ying Han   memcg: add the pa...
158
  void mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx);
ca3e02141   KAMEZAWA Hiroyuki   memcg: fix USED b...
159
  #ifdef CONFIG_TRANSPARENT_HUGEPAGE
e94c8a9cb   KAMEZAWA Hiroyuki   memcg: make mem_c...
160
  void mem_cgroup_split_huge_fixup(struct page *head);
ca3e02141   KAMEZAWA Hiroyuki   memcg: fix USED b...
161
  #endif
f212ad7cf   Daisuke Nishimura   memcg: add memcg ...
162
163
164
165
  #ifdef CONFIG_DEBUG_VM
  bool mem_cgroup_bad_page_check(struct page *page);
  void mem_cgroup_print_bad_page(struct page *page);
  #endif
52d4b9ac0   KAMEZAWA Hiroyuki   memcg: allocate a...
166
  #else /* CONFIG_CGROUP_MEM_RES_CTLR */
7a81b88cb   KAMEZAWA Hiroyuki   memcg: introduce ...
167
168
169
  struct mem_cgroup;
  
  static inline int mem_cgroup_newpage_charge(struct page *page,
8289546e5   Hugh Dickins   memcg: remove mem...
170
  					struct mm_struct *mm, gfp_t gfp_mask)
8a9f3ccd2   Balbir Singh   Memory controller...
171
172
173
  {
  	return 0;
  }
8289546e5   Hugh Dickins   memcg: remove mem...
174
175
  static inline int mem_cgroup_cache_charge(struct page *page,
  					struct mm_struct *mm, gfp_t gfp_mask)
8a9f3ccd2   Balbir Singh   Memory controller...
176
  {
8289546e5   Hugh Dickins   memcg: remove mem...
177
  	return 0;
8a9f3ccd2   Balbir Singh   Memory controller...
178
  }
8c7c6e34a   KAMEZAWA Hiroyuki   memcg: mem+swap c...
179
  static inline int mem_cgroup_try_charge_swapin(struct mm_struct *mm,
72835c86c   Johannes Weiner   mm: unify remaini...
180
  		struct page *page, gfp_t gfp_mask, struct mem_cgroup **memcgp)
7a81b88cb   KAMEZAWA Hiroyuki   memcg: introduce ...
181
182
183
184
185
  {
  	return 0;
  }
  
  static inline void mem_cgroup_commit_charge_swapin(struct page *page,
72835c86c   Johannes Weiner   mm: unify remaini...
186
  					  struct mem_cgroup *memcg)
7a81b88cb   KAMEZAWA Hiroyuki   memcg: introduce ...
187
188
  {
  }
72835c86c   Johannes Weiner   mm: unify remaini...
189
  static inline void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *memcg)
7a81b88cb   KAMEZAWA Hiroyuki   memcg: introduce ...
190
191
  {
  }
569b846df   KAMEZAWA Hiroyuki   memcg: coalesce u...
192
193
194
195
196
197
198
  static inline void mem_cgroup_uncharge_start(void)
  {
  }
  
  static inline void mem_cgroup_uncharge_end(void)
  {
  }
8a9f3ccd2   Balbir Singh   Memory controller...
199
200
201
  static inline void mem_cgroup_uncharge_page(struct page *page)
  {
  }
69029cd55   KAMEZAWA Hiroyuki   memcg: remove ref...
202
203
204
  static inline void mem_cgroup_uncharge_cache_page(struct page *page)
  {
  }
925b7673c   Johannes Weiner   mm: make per-memc...
205
206
  static inline struct lruvec *mem_cgroup_zone_lruvec(struct zone *zone,
  						    struct mem_cgroup *memcg)
08e552c69   KAMEZAWA Hiroyuki   memcg: synchroniz...
207
  {
925b7673c   Johannes Weiner   mm: make per-memc...
208
  	return &zone->lruvec;
08e552c69   KAMEZAWA Hiroyuki   memcg: synchroniz...
209
  }
925b7673c   Johannes Weiner   mm: make per-memc...
210
211
212
  static inline struct lruvec *mem_cgroup_lru_add_list(struct zone *zone,
  						     struct page *page,
  						     enum lru_list lru)
3f58a8294   Minchan Kim   memcg: move memcg...
213
  {
925b7673c   Johannes Weiner   mm: make per-memc...
214
  	return &zone->lruvec;
3f58a8294   Minchan Kim   memcg: move memcg...
215
  }
925b7673c   Johannes Weiner   mm: make per-memc...
216
  static inline void mem_cgroup_lru_del_list(struct page *page, enum lru_list lru)
08e552c69   KAMEZAWA Hiroyuki   memcg: synchroniz...
217
  {
08e552c69   KAMEZAWA Hiroyuki   memcg: synchroniz...
218
  }
925b7673c   Johannes Weiner   mm: make per-memc...
219
  static inline void mem_cgroup_lru_del(struct page *page)
08e552c69   KAMEZAWA Hiroyuki   memcg: synchroniz...
220
  {
08e552c69   KAMEZAWA Hiroyuki   memcg: synchroniz...
221
  }
925b7673c   Johannes Weiner   mm: make per-memc...
222
223
224
225
  static inline struct lruvec *mem_cgroup_lru_move_lists(struct zone *zone,
  						       struct page *page,
  						       enum lru_list from,
  						       enum lru_list to)
66e1707bc   Balbir Singh   Memory controller...
226
  {
925b7673c   Johannes Weiner   mm: make per-memc...
227
  	return &zone->lruvec;
66e1707bc   Balbir Singh   Memory controller...
228
  }
e42d9d5d4   Wu Fengguang   memcg: rename and...
229
230
231
232
  static inline struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page)
  {
  	return NULL;
  }
a433658c3   KOSAKI Motohiro   vmscan,memcg: mem...
233
234
235
236
  static inline struct mem_cgroup *try_get_mem_cgroup_from_mm(struct mm_struct *mm)
  {
  	return NULL;
  }
c0ff4b854   Raghavendra K T   memcg: rename mem...
237
238
  static inline int mm_match_cgroup(struct mm_struct *mm,
  		struct mem_cgroup *memcg)
bed7161a5   Balbir Singh   Memory controller...
239
  {
60c12b120   David Rientjes   memcontrol: add v...
240
  	return 1;
bed7161a5   Balbir Singh   Memory controller...
241
  }
4c4a22148   David Rientjes   memcontrol: move ...
242
  static inline int task_in_mem_cgroup(struct task_struct *task,
c0ff4b854   Raghavendra K T   memcg: rename mem...
243
  				     const struct mem_cgroup *memcg)
4c4a22148   David Rientjes   memcontrol: move ...
244
245
246
  {
  	return 1;
  }
c0ff4b854   Raghavendra K T   memcg: rename mem...
247
248
  static inline struct cgroup_subsys_state
  		*mem_cgroup_css(struct mem_cgroup *memcg)
d324236b3   Wu Fengguang   memcg: add access...
249
250
251
  {
  	return NULL;
  }
e8589cc18   KAMEZAWA Hiroyuki   memcg: better mig...
252
  static inline int
ac39cf8cb   akpm@linux-foundation.org   memcg: fix mis-ac...
253
  mem_cgroup_prepare_migration(struct page *page, struct page *newpage,
72835c86c   Johannes Weiner   mm: unify remaini...
254
  	struct mem_cgroup **memcgp, gfp_t gfp_mask)
ae41be374   KAMEZAWA Hiroyuki   bugfix for memory...
255
256
257
  {
  	return 0;
  }
c0ff4b854   Raghavendra K T   memcg: rename mem...
258
  static inline void mem_cgroup_end_migration(struct mem_cgroup *memcg,
50de1dd96   Daisuke Nishimura   memcg: fix memory...
259
  		struct page *oldpage, struct page *newpage, bool migration_ok)
ae41be374   KAMEZAWA Hiroyuki   bugfix for memory...
260
261
  {
  }
5660048cc   Johannes Weiner   mm: move memcg hi...
262
263
264
265
266
267
268
269
270
271
272
273
  static inline struct mem_cgroup *
  mem_cgroup_iter(struct mem_cgroup *root,
  		struct mem_cgroup *prev,
  		struct mem_cgroup_reclaim_cookie *reclaim)
  {
  	return NULL;
  }
  
  static inline void mem_cgroup_iter_break(struct mem_cgroup *root,
  					 struct mem_cgroup *prev)
  {
  }
c0ff4b854   Raghavendra K T   memcg: rename mem...
274
  static inline int mem_cgroup_get_reclaim_priority(struct mem_cgroup *memcg)
6c48a1d04   KAMEZAWA Hiroyuki   per-zone and recl...
275
276
277
  {
  	return 0;
  }
c0ff4b854   Raghavendra K T   memcg: rename mem...
278
  static inline void mem_cgroup_note_reclaim_priority(struct mem_cgroup *memcg,
6c48a1d04   KAMEZAWA Hiroyuki   per-zone and recl...
279
280
281
  						int priority)
  {
  }
c0ff4b854   Raghavendra K T   memcg: rename mem...
282
  static inline void mem_cgroup_record_reclaim_priority(struct mem_cgroup *memcg,
6c48a1d04   KAMEZAWA Hiroyuki   per-zone and recl...
283
284
285
  						int priority)
  {
  }
f8d665422   Hirokazu Takahashi   memcg: add mem_cg...
286
287
288
289
  static inline bool mem_cgroup_disabled(void)
  {
  	return true;
  }
a636b327f   KAMEZAWA Hiroyuki   memcg: avoid unne...
290

14797e236   KOSAKI Motohiro   memcg: add inacti...
291
  static inline int
9b272977e   Johannes Weiner   memcg: skip scann...
292
  mem_cgroup_inactive_anon_is_low(struct mem_cgroup *memcg, struct zone *zone)
14797e236   KOSAKI Motohiro   memcg: add inacti...
293
294
295
  {
  	return 1;
  }
56e49d218   Rik van Riel   vmscan: evict use...
296
  static inline int
9b272977e   Johannes Weiner   memcg: skip scann...
297
  mem_cgroup_inactive_file_is_low(struct mem_cgroup *memcg, struct zone *zone)
56e49d218   Rik van Riel   vmscan: evict use...
298
299
300
  {
  	return 1;
  }
a3d8e0549   KOSAKI Motohiro   memcg: add mem_cg...
301
  static inline unsigned long
bb2a0de92   KAMEZAWA Hiroyuki   memcg: consolidat...
302
303
  mem_cgroup_zone_nr_lru_pages(struct mem_cgroup *memcg, int nid, int zid,
  				unsigned int lru_mask)
a3d8e0549   KOSAKI Motohiro   memcg: add mem_cg...
304
305
306
  {
  	return 0;
  }
3e2f41f1f   KOSAKI Motohiro   memcg: add zone_r...
307
308
309
310
311
312
313
314
315
316
317
  static inline struct zone_reclaim_stat*
  mem_cgroup_get_reclaim_stat(struct mem_cgroup *memcg, struct zone *zone)
  {
  	return NULL;
  }
  
  static inline struct zone_reclaim_stat*
  mem_cgroup_get_reclaim_stat_from_page(struct page *page)
  {
  	return NULL;
  }
e222432bf   Balbir Singh   memcg: show memcg...
318
319
320
321
  static inline void
  mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p)
  {
  }
2a7106f2c   Greg Thelen   memcg: create ext...
322
323
324
325
326
327
328
  static inline void mem_cgroup_inc_page_stat(struct page *page,
  					    enum mem_cgroup_page_stat_item idx)
  {
  }
  
  static inline void mem_cgroup_dec_page_stat(struct page *page,
  					    enum mem_cgroup_page_stat_item idx)
d69b042f3   Balbir Singh   memcg: add file-b...
329
330
  {
  }
4e4169535   Balbir Singh   memory controller...
331
332
  static inline
  unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
0ae5e89c6   Ying Han   memcg: count the ...
333
334
  					    gfp_t gfp_mask,
  					    unsigned long *total_scanned)
4e4169535   Balbir Singh   memory controller...
335
336
337
  {
  	return 0;
  }
a63d83f42   David Rientjes   oom: badness heur...
338
  static inline
c0ff4b854   Raghavendra K T   memcg: rename mem...
339
  u64 mem_cgroup_get_limit(struct mem_cgroup *memcg)
a63d83f42   David Rientjes   oom: badness heur...
340
341
342
  {
  	return 0;
  }
e94c8a9cb   KAMEZAWA Hiroyuki   memcg: make mem_c...
343
  static inline void mem_cgroup_split_huge_fixup(struct page *head)
ca3e02141   KAMEZAWA Hiroyuki   memcg: fix USED b...
344
345
  {
  }
456f998ec   Ying Han   memcg: add the pa...
346
347
348
349
  static inline
  void mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx)
  {
  }
ab936cbcd   KAMEZAWA Hiroyuki   memcg: add mem_cg...
350
351
352
353
  static inline void mem_cgroup_replace_page_cache(struct page *oldpage,
  				struct page *newpage)
  {
  }
4e5f01c2b   KAMEZAWA Hiroyuki   memcg: clear pc->...
354
355
356
357
  
  static inline void mem_cgroup_reset_owner(struct page *page)
  {
  }
78fb74669   Pavel Emelianov   Memory controller...
358
  #endif /* CONFIG_CGROUP_MEM_CONT */
f212ad7cf   Daisuke Nishimura   memcg: add memcg ...
359
360
361
362
363
364
365
366
367
368
369
370
  #if !defined(CONFIG_CGROUP_MEM_RES_CTLR) || !defined(CONFIG_DEBUG_VM)
  static inline bool
  mem_cgroup_bad_page_check(struct page *page)
  {
  	return false;
  }
  
  static inline void
  mem_cgroup_print_bad_page(struct page *page)
  {
  }
  #endif
e1aab161e   Glauber Costa   socket: initial c...
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
  enum {
  	UNDER_LIMIT,
  	SOFT_LIMIT,
  	OVER_LIMIT,
  };
  
  struct sock;
  #ifdef CONFIG_CGROUP_MEM_RES_CTLR_KMEM
  void sock_update_memcg(struct sock *sk);
  void sock_release_memcg(struct sock *sk);
  #else
  static inline void sock_update_memcg(struct sock *sk)
  {
  }
  static inline void sock_release_memcg(struct sock *sk)
  {
  }
  #endif /* CONFIG_CGROUP_MEM_RES_CTLR_KMEM */
8cdea7c05   Balbir Singh   Memory controller...
389
  #endif /* _LINUX_MEMCONTROL_H */