Blame view

include/linux/memcontrol.h 15.4 KB
8cdea7c05   Balbir Singh   Memory controller...
1
2
3
4
5
  /* memcontrol.h - Memory Controller
   *
   * Copyright IBM Corporation, 2007
   * Author Balbir Singh <balbir@linux.vnet.ibm.com>
   *
78fb74669   Pavel Emelianov   Memory controller...
6
7
8
   * Copyright 2007 OpenVZ SWsoft Inc
   * Author: Pavel Emelianov <xemul@openvz.org>
   *
8cdea7c05   Balbir Singh   Memory controller...
9
10
11
12
13
14
15
16
17
18
19
20
21
   * This program is free software; you can redistribute it and/or modify
   * it under the terms of the GNU General Public License as published by
   * the Free Software Foundation; either version 2 of the License, or
   * (at your option) any later version.
   *
   * This program is distributed in the hope that it will be useful,
   * but WITHOUT ANY WARRANTY; without even the implied warranty of
   * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
   * GNU General Public License for more details.
   */
  
  #ifndef _LINUX_MEMCONTROL_H
  #define _LINUX_MEMCONTROL_H
f8d665422   Hirokazu Takahashi   memcg: add mem_cg...
22
  #include <linux/cgroup.h>
456f998ec   Ying Han   memcg: add the pa...
23
  #include <linux/vm_event_item.h>
7ae1e1d0f   Glauber Costa   memcg: kmem contr...
24
  #include <linux/hardirq.h>
a8964b9b8   Glauber Costa   memcg: use static...
25
  #include <linux/jump_label.h>
456f998ec   Ying Han   memcg: add the pa...
26

78fb74669   Pavel Emelianov   Memory controller...
27
  struct mem_cgroup;
8697d3319   Balbir Singh   Memory controller...
28
29
  struct page;
  struct mm_struct;
2633d7a02   Glauber Costa   slab/slub: consid...
30
  struct kmem_cache;
78fb74669   Pavel Emelianov   Memory controller...
31

68b4876d9   Sha Zhengju   memcg: remove MEM...
32
33
34
35
36
37
38
39
40
41
42
43
  /*
   * The corresponding mem_cgroup_stat_names is defined in mm/memcontrol.c,
   * These two lists should keep in accord with each other.
   */
  enum mem_cgroup_stat_index {
  	/*
  	 * For MEM_CONTAINER_TYPE_ALL, usage = pagecache + rss.
  	 */
  	MEM_CGROUP_STAT_CACHE,		/* # of pages charged as cache */
  	MEM_CGROUP_STAT_RSS,		/* # of pages charged as anon rss */
  	MEM_CGROUP_STAT_RSS_HUGE,	/* # of pages charged as anon huge */
  	MEM_CGROUP_STAT_FILE_MAPPED,	/* # of pages charged as file rss */
3ea67d06e   Sha Zhengju   memcg: add per cg...
44
  	MEM_CGROUP_STAT_WRITEBACK,	/* # of pages under writeback */
68b4876d9   Sha Zhengju   memcg: remove MEM...
45
46
  	MEM_CGROUP_STAT_SWAP,		/* # of pages, swapped out */
  	MEM_CGROUP_STAT_NSTATS,
2a7106f2c   Greg Thelen   memcg: create ext...
47
  };
5660048cc   Johannes Weiner   mm: move memcg hi...
48
49
50
51
52
  struct mem_cgroup_reclaim_cookie {
  	struct zone *zone;
  	int priority;
  	unsigned int generation;
  };
241994ed8   Johannes Weiner   mm: memcontrol: d...
53
54
55
56
57
58
59
60
61
62
63
64
65
  enum mem_cgroup_events_index {
  	MEM_CGROUP_EVENTS_PGPGIN,	/* # of pages paged in */
  	MEM_CGROUP_EVENTS_PGPGOUT,	/* # of pages paged out */
  	MEM_CGROUP_EVENTS_PGFAULT,	/* # of page-faults */
  	MEM_CGROUP_EVENTS_PGMAJFAULT,	/* # of major page-faults */
  	MEM_CGROUP_EVENTS_NSTATS,
  	/* default hierarchy events */
  	MEMCG_LOW = MEM_CGROUP_EVENTS_NSTATS,
  	MEMCG_HIGH,
  	MEMCG_MAX,
  	MEMCG_OOM,
  	MEMCG_NR_EVENTS,
  };
c255a4580   Andrew Morton   memcg: rename con...
66
  #ifdef CONFIG_MEMCG
241994ed8   Johannes Weiner   mm: memcontrol: d...
67
68
69
70
71
  void mem_cgroup_events(struct mem_cgroup *memcg,
  		       enum mem_cgroup_events_index idx,
  		       unsigned int nr);
  
  bool mem_cgroup_low(struct mem_cgroup *root, struct mem_cgroup *memcg);
00501b531   Johannes Weiner   mm: memcontrol: r...
72
73
74
75
76
  int mem_cgroup_try_charge(struct page *page, struct mm_struct *mm,
  			  gfp_t gfp_mask, struct mem_cgroup **memcgp);
  void mem_cgroup_commit_charge(struct page *page, struct mem_cgroup *memcg,
  			      bool lrucare);
  void mem_cgroup_cancel_charge(struct page *page, struct mem_cgroup *memcg);
0a31bc97c   Johannes Weiner   mm: memcontrol: r...
77
  void mem_cgroup_uncharge(struct page *page);
747db954c   Johannes Weiner   mm: memcontrol: u...
78
  void mem_cgroup_uncharge_list(struct list_head *page_list);
569b846df   KAMEZAWA Hiroyuki   memcg: coalesce u...
79

0a31bc97c   Johannes Weiner   mm: memcontrol: r...
80
81
  void mem_cgroup_migrate(struct page *oldpage, struct page *newpage,
  			bool lrucare);
569b846df   KAMEZAWA Hiroyuki   memcg: coalesce u...
82

0a31bc97c   Johannes Weiner   mm: memcontrol: r...
83
84
  struct lruvec *mem_cgroup_zone_lruvec(struct zone *, struct mem_cgroup *);
  struct lruvec *mem_cgroup_page_lruvec(struct page *, struct zone *);
c9b0ed514   KAMEZAWA Hiroyuki   memcg: helper fun...
85

2314b42db   Johannes Weiner   mm: memcontrol: d...
86
87
88
  bool mem_cgroup_is_descendant(struct mem_cgroup *memcg,
  			      struct mem_cgroup *root);
  bool task_in_mem_cgroup(struct task_struct *task, struct mem_cgroup *memcg);
3062fc67d   David Rientjes   memcontrol: move ...
89

e42d9d5d4   Wu Fengguang   memcg: rename and...
90
  extern struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page);
cf475ad28   Balbir Singh   cgroups: add an o...
91
  extern struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p);
e1aab161e   Glauber Costa   socket: initial c...
92
  extern struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg);
182446d08   Tejun Heo   cgroup: pass arou...
93
  extern struct mem_cgroup *mem_cgroup_from_css(struct cgroup_subsys_state *css);
e1aab161e   Glauber Costa   socket: initial c...
94

2314b42db   Johannes Weiner   mm: memcontrol: d...
95
96
  static inline bool mm_match_cgroup(struct mm_struct *mm,
  				   struct mem_cgroup *memcg)
2e4d40915   Lai Jiangshan   memcontrol: rcu_r...
97
  {
587af308c   Johannes Weiner   mm: memcg: clean ...
98
  	struct mem_cgroup *task_memcg;
413918bb6   Johannes Weiner   mm: memcontrol: p...
99
  	bool match = false;
c3ac9a8ad   Johannes Weiner   mm: memcg: count ...
100

2e4d40915   Lai Jiangshan   memcontrol: rcu_r...
101
  	rcu_read_lock();
587af308c   Johannes Weiner   mm: memcg: clean ...
102
  	task_memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
413918bb6   Johannes Weiner   mm: memcontrol: p...
103
  	if (task_memcg)
2314b42db   Johannes Weiner   mm: memcontrol: d...
104
  		match = mem_cgroup_is_descendant(task_memcg, memcg);
2e4d40915   Lai Jiangshan   memcontrol: rcu_r...
105
  	rcu_read_unlock();
c3ac9a8ad   Johannes Weiner   mm: memcg: count ...
106
  	return match;
2e4d40915   Lai Jiangshan   memcontrol: rcu_r...
107
  }
8a9f3ccd2   Balbir Singh   Memory controller...
108

c0ff4b854   Raghavendra K T   memcg: rename mem...
109
  extern struct cgroup_subsys_state *mem_cgroup_css(struct mem_cgroup *memcg);
d324236b3   Wu Fengguang   memcg: add access...
110

694fbc0fe   Andrew Morton   revert "memcg: en...
111
112
113
  struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *,
  				   struct mem_cgroup *,
  				   struct mem_cgroup_reclaim_cookie *);
5660048cc   Johannes Weiner   mm: move memcg hi...
114
  void mem_cgroup_iter_break(struct mem_cgroup *, struct mem_cgroup *);
58ae83db2   KAMEZAWA Hiroyuki   per-zone and recl...
115
116
117
  /*
   * For memory reclaim.
   */
c56d5c7df   Konstantin Khlebnikov   mm/vmscan: push l...
118
  int mem_cgroup_inactive_anon_is_low(struct lruvec *lruvec);
90cbc2508   Vladimir Davydov   vmscan: force sca...
119
  bool mem_cgroup_lruvec_online(struct lruvec *lruvec);
889976dbc   Ying Han   memcg: reclaim me...
120
  int mem_cgroup_select_victim_node(struct mem_cgroup *memcg);
4d7dcca21   Hugh Dickins   mm/memcg: get_lru...
121
  unsigned long mem_cgroup_get_lru_size(struct lruvec *lruvec, enum lru_list);
fa9add641   Hugh Dickins   mm/memcg: apply a...
122
  void mem_cgroup_update_lru_size(struct lruvec *, enum lru_list, int);
e222432bf   Balbir Singh   memcg: show memcg...
123
124
  extern void mem_cgroup_print_oom_info(struct mem_cgroup *memcg,
  					struct task_struct *p);
58ae83db2   KAMEZAWA Hiroyuki   per-zone and recl...
125

494264208   Johannes Weiner   mm: memcg: handle...
126
  static inline void mem_cgroup_oom_enable(void)
519e52473   Johannes Weiner   mm: memcg: enable...
127
  {
494264208   Johannes Weiner   mm: memcg: handle...
128
129
  	WARN_ON(current->memcg_oom.may_oom);
  	current->memcg_oom.may_oom = 1;
519e52473   Johannes Weiner   mm: memcg: enable...
130
  }
494264208   Johannes Weiner   mm: memcg: handle...
131
  static inline void mem_cgroup_oom_disable(void)
519e52473   Johannes Weiner   mm: memcg: enable...
132
  {
494264208   Johannes Weiner   mm: memcg: handle...
133
134
  	WARN_ON(!current->memcg_oom.may_oom);
  	current->memcg_oom.may_oom = 0;
519e52473   Johannes Weiner   mm: memcg: enable...
135
  }
3812c8c8f   Johannes Weiner   mm: memcg: do not...
136
137
  static inline bool task_in_memcg_oom(struct task_struct *p)
  {
494264208   Johannes Weiner   mm: memcg: handle...
138
  	return p->memcg_oom.memcg;
3812c8c8f   Johannes Weiner   mm: memcg: do not...
139
  }
494264208   Johannes Weiner   mm: memcg: handle...
140
  bool mem_cgroup_oom_synchronize(bool wait);
3812c8c8f   Johannes Weiner   mm: memcg: do not...
141

c255a4580   Andrew Morton   memcg: rename con...
142
  #ifdef CONFIG_MEMCG_SWAP
c077719be   KAMEZAWA Hiroyuki   memcg: mem+swap c...
143
144
  extern int do_swap_account;
  #endif
f8d665422   Hirokazu Takahashi   memcg: add mem_cg...
145
146
147
  
  static inline bool mem_cgroup_disabled(void)
  {
073219e99   Tejun Heo   cgroup: clean up ...
148
  	if (memory_cgrp_subsys.disabled)
f8d665422   Hirokazu Takahashi   memcg: add mem_cg...
149
150
151
  		return true;
  	return false;
  }
6de226191   Johannes Weiner   mm: memcontrol: t...
152
  struct mem_cgroup *mem_cgroup_begin_page_stat(struct page *page);
d7365e783   Johannes Weiner   mm: memcontrol: f...
153
154
  void mem_cgroup_update_page_stat(struct mem_cgroup *memcg,
  				 enum mem_cgroup_stat_index idx, int val);
6de226191   Johannes Weiner   mm: memcontrol: t...
155
  void mem_cgroup_end_page_stat(struct mem_cgroup *memcg);
d7365e783   Johannes Weiner   mm: memcontrol: f...
156
157
  
  static inline void mem_cgroup_inc_page_stat(struct mem_cgroup *memcg,
68b4876d9   Sha Zhengju   memcg: remove MEM...
158
  					    enum mem_cgroup_stat_index idx)
2a7106f2c   Greg Thelen   memcg: create ext...
159
  {
d7365e783   Johannes Weiner   mm: memcontrol: f...
160
  	mem_cgroup_update_page_stat(memcg, idx, 1);
2a7106f2c   Greg Thelen   memcg: create ext...
161
  }
d7365e783   Johannes Weiner   mm: memcontrol: f...
162
  static inline void mem_cgroup_dec_page_stat(struct mem_cgroup *memcg,
68b4876d9   Sha Zhengju   memcg: remove MEM...
163
  					    enum mem_cgroup_stat_index idx)
2a7106f2c   Greg Thelen   memcg: create ext...
164
  {
d7365e783   Johannes Weiner   mm: memcontrol: f...
165
  	mem_cgroup_update_page_stat(memcg, idx, -1);
2a7106f2c   Greg Thelen   memcg: create ext...
166
  }
0608f43da   Andrew Morton   revert "memcg, vm...
167
168
169
  unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
  						gfp_t gfp_mask,
  						unsigned long *total_scanned);
a63d83f42   David Rientjes   oom: badness heur...
170

68ae564bb   David Rientjes   mm, memcg: avoid ...
171
172
173
174
175
176
177
178
  void __mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx);
  static inline void mem_cgroup_count_vm_event(struct mm_struct *mm,
  					     enum vm_event_item idx)
  {
  	if (mem_cgroup_disabled())
  		return;
  	__mem_cgroup_count_vm_event(mm, idx);
  }
ca3e02141   KAMEZAWA Hiroyuki   memcg: fix USED b...
179
  #ifdef CONFIG_TRANSPARENT_HUGEPAGE
e94c8a9cb   KAMEZAWA Hiroyuki   memcg: make mem_c...
180
  void mem_cgroup_split_huge_fixup(struct page *head);
ca3e02141   KAMEZAWA Hiroyuki   memcg: fix USED b...
181
  #endif
c255a4580   Andrew Morton   memcg: rename con...
182
  #else /* CONFIG_MEMCG */
7a81b88cb   KAMEZAWA Hiroyuki   memcg: introduce ...
183
  struct mem_cgroup;
241994ed8   Johannes Weiner   mm: memcontrol: d...
184
185
186
187
188
189
190
191
192
193
194
  static inline void mem_cgroup_events(struct mem_cgroup *memcg,
  				     enum mem_cgroup_events_index idx,
  				     unsigned int nr)
  {
  }
  
  static inline bool mem_cgroup_low(struct mem_cgroup *root,
  				  struct mem_cgroup *memcg)
  {
  	return false;
  }
00501b531   Johannes Weiner   mm: memcontrol: r...
195
196
197
  static inline int mem_cgroup_try_charge(struct page *page, struct mm_struct *mm,
  					gfp_t gfp_mask,
  					struct mem_cgroup **memcgp)
7a81b88cb   KAMEZAWA Hiroyuki   memcg: introduce ...
198
  {
00501b531   Johannes Weiner   mm: memcontrol: r...
199
  	*memcgp = NULL;
7a81b88cb   KAMEZAWA Hiroyuki   memcg: introduce ...
200
201
  	return 0;
  }
00501b531   Johannes Weiner   mm: memcontrol: r...
202
203
204
  static inline void mem_cgroup_commit_charge(struct page *page,
  					    struct mem_cgroup *memcg,
  					    bool lrucare)
7a81b88cb   KAMEZAWA Hiroyuki   memcg: introduce ...
205
206
  {
  }
00501b531   Johannes Weiner   mm: memcontrol: r...
207
208
  static inline void mem_cgroup_cancel_charge(struct page *page,
  					    struct mem_cgroup *memcg)
7a81b88cb   KAMEZAWA Hiroyuki   memcg: introduce ...
209
210
  {
  }
0a31bc97c   Johannes Weiner   mm: memcontrol: r...
211
  static inline void mem_cgroup_uncharge(struct page *page)
569b846df   KAMEZAWA Hiroyuki   memcg: coalesce u...
212
213
  {
  }
747db954c   Johannes Weiner   mm: memcontrol: u...
214
  static inline void mem_cgroup_uncharge_list(struct list_head *page_list)
8a9f3ccd2   Balbir Singh   Memory controller...
215
216
  {
  }
0a31bc97c   Johannes Weiner   mm: memcontrol: r...
217
218
219
  static inline void mem_cgroup_migrate(struct page *oldpage,
  				      struct page *newpage,
  				      bool lrucare)
69029cd55   KAMEZAWA Hiroyuki   memcg: remove ref...
220
221
  {
  }
925b7673c   Johannes Weiner   mm: make per-memc...
222
223
  static inline struct lruvec *mem_cgroup_zone_lruvec(struct zone *zone,
  						    struct mem_cgroup *memcg)
08e552c69   KAMEZAWA Hiroyuki   memcg: synchroniz...
224
  {
925b7673c   Johannes Weiner   mm: make per-memc...
225
  	return &zone->lruvec;
08e552c69   KAMEZAWA Hiroyuki   memcg: synchroniz...
226
  }
fa9add641   Hugh Dickins   mm/memcg: apply a...
227
228
  static inline struct lruvec *mem_cgroup_page_lruvec(struct page *page,
  						    struct zone *zone)
66e1707bc   Balbir Singh   Memory controller...
229
  {
925b7673c   Johannes Weiner   mm: make per-memc...
230
  	return &zone->lruvec;
66e1707bc   Balbir Singh   Memory controller...
231
  }
e42d9d5d4   Wu Fengguang   memcg: rename and...
232
233
234
235
  static inline struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page)
  {
  	return NULL;
  }
587af308c   Johannes Weiner   mm: memcg: clean ...
236
  static inline bool mm_match_cgroup(struct mm_struct *mm,
c0ff4b854   Raghavendra K T   memcg: rename mem...
237
  		struct mem_cgroup *memcg)
bed7161a5   Balbir Singh   Memory controller...
238
  {
587af308c   Johannes Weiner   mm: memcg: clean ...
239
  	return true;
bed7161a5   Balbir Singh   Memory controller...
240
  }
ffbdccf5e   David Rientjes   mm, memcg: don't ...
241
242
  static inline bool task_in_mem_cgroup(struct task_struct *task,
  				      const struct mem_cgroup *memcg)
4c4a22148   David Rientjes   memcontrol: move ...
243
  {
ffbdccf5e   David Rientjes   mm, memcg: don't ...
244
  	return true;
4c4a22148   David Rientjes   memcontrol: move ...
245
  }
c0ff4b854   Raghavendra K T   memcg: rename mem...
246
247
  static inline struct cgroup_subsys_state
  		*mem_cgroup_css(struct mem_cgroup *memcg)
d324236b3   Wu Fengguang   memcg: add access...
248
249
250
  {
  	return NULL;
  }
5660048cc   Johannes Weiner   mm: move memcg hi...
251
252
253
254
255
256
257
258
259
260
261
262
  static inline struct mem_cgroup *
  mem_cgroup_iter(struct mem_cgroup *root,
  		struct mem_cgroup *prev,
  		struct mem_cgroup_reclaim_cookie *reclaim)
  {
  	return NULL;
  }
  
  static inline void mem_cgroup_iter_break(struct mem_cgroup *root,
  					 struct mem_cgroup *prev)
  {
  }
f8d665422   Hirokazu Takahashi   memcg: add mem_cg...
263
264
265
266
  static inline bool mem_cgroup_disabled(void)
  {
  	return true;
  }
a636b327f   KAMEZAWA Hiroyuki   memcg: avoid unne...
267

14797e236   KOSAKI Motohiro   memcg: add inacti...
268
  static inline int
c56d5c7df   Konstantin Khlebnikov   mm/vmscan: push l...
269
  mem_cgroup_inactive_anon_is_low(struct lruvec *lruvec)
14797e236   KOSAKI Motohiro   memcg: add inacti...
270
271
272
  {
  	return 1;
  }
90cbc2508   Vladimir Davydov   vmscan: force sca...
273
274
275
276
  static inline bool mem_cgroup_lruvec_online(struct lruvec *lruvec)
  {
  	return true;
  }
a3d8e0549   KOSAKI Motohiro   memcg: add mem_cg...
277
  static inline unsigned long
4d7dcca21   Hugh Dickins   mm/memcg: get_lru...
278
  mem_cgroup_get_lru_size(struct lruvec *lruvec, enum lru_list lru)
a3d8e0549   KOSAKI Motohiro   memcg: add mem_cg...
279
280
281
  {
  	return 0;
  }
fa9add641   Hugh Dickins   mm/memcg: apply a...
282
283
284
  static inline void
  mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru,
  			      int increment)
3e2f41f1f   KOSAKI Motohiro   memcg: add zone_r...
285
  {
3e2f41f1f   KOSAKI Motohiro   memcg: add zone_r...
286
  }
e222432bf   Balbir Singh   memcg: show memcg...
287
288
289
290
  static inline void
  mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p)
  {
  }
6de226191   Johannes Weiner   mm: memcontrol: t...
291
  static inline struct mem_cgroup *mem_cgroup_begin_page_stat(struct page *page)
89c06bd52   KAMEZAWA Hiroyuki   memcg: use new lo...
292
  {
d7365e783   Johannes Weiner   mm: memcontrol: f...
293
  	return NULL;
89c06bd52   KAMEZAWA Hiroyuki   memcg: use new lo...
294
  }
6de226191   Johannes Weiner   mm: memcontrol: t...
295
  static inline void mem_cgroup_end_page_stat(struct mem_cgroup *memcg)
89c06bd52   KAMEZAWA Hiroyuki   memcg: use new lo...
296
297
  {
  }
494264208   Johannes Weiner   mm: memcg: handle...
298
  static inline void mem_cgroup_oom_enable(void)
519e52473   Johannes Weiner   mm: memcg: enable...
299
300
  {
  }
494264208   Johannes Weiner   mm: memcg: handle...
301
  static inline void mem_cgroup_oom_disable(void)
519e52473   Johannes Weiner   mm: memcg: enable...
302
303
  {
  }
3812c8c8f   Johannes Weiner   mm: memcg: do not...
304
305
306
307
  static inline bool task_in_memcg_oom(struct task_struct *p)
  {
  	return false;
  }
494264208   Johannes Weiner   mm: memcg: handle...
308
  static inline bool mem_cgroup_oom_synchronize(bool wait)
3812c8c8f   Johannes Weiner   mm: memcg: do not...
309
310
311
  {
  	return false;
  }
d7365e783   Johannes Weiner   mm: memcontrol: f...
312
  static inline void mem_cgroup_inc_page_stat(struct mem_cgroup *memcg,
68b4876d9   Sha Zhengju   memcg: remove MEM...
313
  					    enum mem_cgroup_stat_index idx)
2a7106f2c   Greg Thelen   memcg: create ext...
314
315
  {
  }
d7365e783   Johannes Weiner   mm: memcontrol: f...
316
  static inline void mem_cgroup_dec_page_stat(struct mem_cgroup *memcg,
68b4876d9   Sha Zhengju   memcg: remove MEM...
317
  					    enum mem_cgroup_stat_index idx)
d69b042f3   Balbir Singh   memcg: add file-b...
318
319
  {
  }
4e4169535   Balbir Singh   memory controller...
320
  static inline
0608f43da   Andrew Morton   revert "memcg, vm...
321
322
323
  unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
  					    gfp_t gfp_mask,
  					    unsigned long *total_scanned)
4e4169535   Balbir Singh   memory controller...
324
  {
0608f43da   Andrew Morton   revert "memcg, vm...
325
  	return 0;
4e4169535   Balbir Singh   memory controller...
326
  }
e94c8a9cb   KAMEZAWA Hiroyuki   memcg: make mem_c...
327
  static inline void mem_cgroup_split_huge_fixup(struct page *head)
ca3e02141   KAMEZAWA Hiroyuki   memcg: fix USED b...
328
329
  {
  }
456f998ec   Ying Han   memcg: add the pa...
330
331
332
333
  static inline
  void mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx)
  {
  }
c255a4580   Andrew Morton   memcg: rename con...
334
  #endif /* CONFIG_MEMCG */
78fb74669   Pavel Emelianov   Memory controller...
335

e1aab161e   Glauber Costa   socket: initial c...
336
337
338
339
340
341
342
  enum {
  	UNDER_LIMIT,
  	SOFT_LIMIT,
  	OVER_LIMIT,
  };
  
  struct sock;
cd59085a9   David Rientjes   memcg, kmem: fix ...
343
  #if defined(CONFIG_INET) && defined(CONFIG_MEMCG_KMEM)
e1aab161e   Glauber Costa   socket: initial c...
344
345
346
347
348
349
350
351
352
  void sock_update_memcg(struct sock *sk);
  void sock_release_memcg(struct sock *sk);
  #else
  static inline void sock_update_memcg(struct sock *sk)
  {
  }
  static inline void sock_release_memcg(struct sock *sk)
  {
  }
cd59085a9   David Rientjes   memcg, kmem: fix ...
353
  #endif /* CONFIG_INET && CONFIG_MEMCG_KMEM */
7ae1e1d0f   Glauber Costa   memcg: kmem contr...
354
355
  
  #ifdef CONFIG_MEMCG_KMEM
a8964b9b8   Glauber Costa   memcg: use static...
356
  extern struct static_key memcg_kmem_enabled_key;
749c54151   Glauber Costa   memcg: aggregate ...
357

dbcf73e26   Vladimir Davydov   memcg: rename som...
358
  extern int memcg_nr_cache_ids;
05257a1a3   Vladimir Davydov   memcg: add rwsem ...
359
360
  extern void memcg_get_cache_ids(void);
  extern void memcg_put_cache_ids(void);
ebe945c27   Glauber Costa   memcg: add commen...
361
362
363
364
365
366
  
  /*
   * Helper macro to loop through all memcg-specific caches. Callers must still
   * check if the cache is valid (it is either valid or NULL).
   * the slab_mutex must be held when looping through those caches
   */
749c54151   Glauber Costa   memcg: aggregate ...
367
  #define for_each_memcg_cache_index(_idx)	\
dbcf73e26   Vladimir Davydov   memcg: rename som...
368
  	for ((_idx) = 0; (_idx) < memcg_nr_cache_ids; (_idx)++)
749c54151   Glauber Costa   memcg: aggregate ...
369

7ae1e1d0f   Glauber Costa   memcg: kmem contr...
370
371
  static inline bool memcg_kmem_enabled(void)
  {
a8964b9b8   Glauber Costa   memcg: use static...
372
  	return static_key_false(&memcg_kmem_enabled_key);
7ae1e1d0f   Glauber Costa   memcg: kmem contr...
373
  }
cb731d6c6   Vladimir Davydov   vmscan: per memor...
374
  bool memcg_kmem_is_active(struct mem_cgroup *memcg);
7ae1e1d0f   Glauber Costa   memcg: kmem contr...
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
  /*
   * In general, we'll do everything in our power to not incur in any overhead
   * for non-memcg users for the kmem functions. Not even a function call, if we
   * can avoid it.
   *
   * Therefore, we'll inline all those functions so that in the best case, we'll
   * see that kmemcg is off for everybody and proceed quickly.  If it is on,
   * we'll still do most of the flag checking inline. We check a lot of
   * conditions, but because they are pretty simple, they are expected to be
   * fast.
   */
  bool __memcg_kmem_newpage_charge(gfp_t gfp, struct mem_cgroup **memcg,
  					int order);
  void __memcg_kmem_commit_charge(struct page *page,
  				       struct mem_cgroup *memcg, int order);
  void __memcg_kmem_uncharge_pages(struct page *page, int order);
2633d7a02   Glauber Costa   slab/slub: consid...
391
  int memcg_cache_id(struct mem_cgroup *memcg);
5722d094a   Vladimir Davydov   memcg, slab: clea...
392

8135be5a8   Vladimir Davydov   memcg: fix possib...
393
394
  struct kmem_cache *__memcg_kmem_get_cache(struct kmem_cache *cachep);
  void __memcg_kmem_put_cache(struct kmem_cache *cachep);
d7f25f8a2   Glauber Costa   memcg: infrastruc...
395

60d3fd32a   Vladimir Davydov   list_lru: introdu...
396
  struct mem_cgroup *__mem_cgroup_from_kmem(void *ptr);
dbf22eb6d   Vladimir Davydov   memcg: zap __memc...
397
398
399
  int memcg_charge_kmem(struct mem_cgroup *memcg, gfp_t gfp,
  		      unsigned long nr_pages);
  void memcg_uncharge_kmem(struct mem_cgroup *memcg, unsigned long nr_pages);
5dfb41750   Vladimir Davydov   sl[au]b: charge s...
400

7ae1e1d0f   Glauber Costa   memcg: kmem contr...
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
  /**
   * memcg_kmem_newpage_charge: verify if a new kmem allocation is allowed.
   * @gfp: the gfp allocation flags.
   * @memcg: a pointer to the memcg this was charged against.
   * @order: allocation order.
   *
   * returns true if the memcg where the current task belongs can hold this
   * allocation.
   *
   * We return true automatically if this allocation is not to be accounted to
   * any memcg.
   */
  static inline bool
  memcg_kmem_newpage_charge(gfp_t gfp, struct mem_cgroup **memcg, int order)
  {
  	if (!memcg_kmem_enabled())
  		return true;
8f4fc071b   Vladimir Davydov   gfp: add __GFP_NO...
418
419
  	if (gfp & __GFP_NOACCOUNT)
  		return true;
7ae1e1d0f   Glauber Costa   memcg: kmem contr...
420
421
422
  	/*
  	 * __GFP_NOFAIL allocations will move on even if charging is not
  	 * possible. Therefore we don't even try, and have this allocation
3e32cb2e0   Johannes Weiner   mm: memcontrol: l...
423
424
  	 * unaccounted. We could in theory charge it forcibly, but we hope
  	 * those allocations are rare, and won't be worth the trouble.
7ae1e1d0f   Glauber Costa   memcg: kmem contr...
425
  	 */
52383431b   Vladimir Davydov   mm: get rid of __...
426
  	if (gfp & __GFP_NOFAIL)
7ae1e1d0f   Glauber Costa   memcg: kmem contr...
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
  		return true;
  	if (in_interrupt() || (!current->mm) || (current->flags & PF_KTHREAD))
  		return true;
  
  	/* If the test is dying, just let it go. */
  	if (unlikely(fatal_signal_pending(current)))
  		return true;
  
  	return __memcg_kmem_newpage_charge(gfp, memcg, order);
  }
  
  /**
   * memcg_kmem_uncharge_pages: uncharge pages from memcg
   * @page: pointer to struct page being freed
   * @order: allocation order.
7ae1e1d0f   Glauber Costa   memcg: kmem contr...
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
   */
  static inline void
  memcg_kmem_uncharge_pages(struct page *page, int order)
  {
  	if (memcg_kmem_enabled())
  		__memcg_kmem_uncharge_pages(page, order);
  }
  
  /**
   * memcg_kmem_commit_charge: embeds correct memcg in a page
   * @page: pointer to struct page recently allocated
   * @memcg: the memcg structure we charged against
   * @order: allocation order.
   *
   * Needs to be called after memcg_kmem_newpage_charge, regardless of success or
   * failure of the allocation. if @page is NULL, this function will revert the
1306a85ae   Johannes Weiner   mm: embed the mem...
458
   * charges. Otherwise, it will commit @page to @memcg.
7ae1e1d0f   Glauber Costa   memcg: kmem contr...
459
460
461
462
463
464
465
   */
  static inline void
  memcg_kmem_commit_charge(struct page *page, struct mem_cgroup *memcg, int order)
  {
  	if (memcg_kmem_enabled() && memcg)
  		__memcg_kmem_commit_charge(page, memcg, order);
  }
d7f25f8a2   Glauber Costa   memcg: infrastruc...
466
467
468
469
470
  /**
   * memcg_kmem_get_cache: selects the correct per-memcg cache for allocation
   * @cachep: the original global kmem cache
   * @gfp: allocation flags.
   *
5dfb41750   Vladimir Davydov   sl[au]b: charge s...
471
   * All memory allocated from a per-memcg cache is charged to the owner memcg.
d7f25f8a2   Glauber Costa   memcg: infrastruc...
472
473
474
475
476
477
   */
  static __always_inline struct kmem_cache *
  memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp)
  {
  	if (!memcg_kmem_enabled())
  		return cachep;
8f4fc071b   Vladimir Davydov   gfp: add __GFP_NO...
478
479
  	if (gfp & __GFP_NOACCOUNT)
  		return cachep;
d7f25f8a2   Glauber Costa   memcg: infrastruc...
480
481
482
483
484
485
  	if (gfp & __GFP_NOFAIL)
  		return cachep;
  	if (in_interrupt() || (!current->mm) || (current->flags & PF_KTHREAD))
  		return cachep;
  	if (unlikely(fatal_signal_pending(current)))
  		return cachep;
056b7ccef   Zhang Zhen   mm/memcontrol.c: ...
486
  	return __memcg_kmem_get_cache(cachep);
d7f25f8a2   Glauber Costa   memcg: infrastruc...
487
  }
8135be5a8   Vladimir Davydov   memcg: fix possib...
488
489
490
491
492
493
  
  static __always_inline void memcg_kmem_put_cache(struct kmem_cache *cachep)
  {
  	if (memcg_kmem_enabled())
  		__memcg_kmem_put_cache(cachep);
  }
60d3fd32a   Vladimir Davydov   list_lru: introdu...
494
495
496
497
498
499
500
  
  static __always_inline struct mem_cgroup *mem_cgroup_from_kmem(void *ptr)
  {
  	if (!memcg_kmem_enabled())
  		return NULL;
  	return __mem_cgroup_from_kmem(ptr);
  }
7ae1e1d0f   Glauber Costa   memcg: kmem contr...
501
  #else
749c54151   Glauber Costa   memcg: aggregate ...
502
503
  #define for_each_memcg_cache_index(_idx)	\
  	for (; NULL; )
b9ce5ef49   Glauber Costa   sl[au]b: always g...
504
505
506
507
  static inline bool memcg_kmem_enabled(void)
  {
  	return false;
  }
cb731d6c6   Vladimir Davydov   vmscan: per memor...
508
509
510
511
  static inline bool memcg_kmem_is_active(struct mem_cgroup *memcg)
  {
  	return false;
  }
7ae1e1d0f   Glauber Costa   memcg: kmem contr...
512
513
514
515
516
517
518
519
520
521
522
523
524
525
  static inline bool
  memcg_kmem_newpage_charge(gfp_t gfp, struct mem_cgroup **memcg, int order)
  {
  	return true;
  }
  
  static inline void memcg_kmem_uncharge_pages(struct page *page, int order)
  {
  }
  
  static inline void
  memcg_kmem_commit_charge(struct page *page, struct mem_cgroup *memcg, int order)
  {
  }
2633d7a02   Glauber Costa   slab/slub: consid...
526
527
528
529
530
  
  static inline int memcg_cache_id(struct mem_cgroup *memcg)
  {
  	return -1;
  }
05257a1a3   Vladimir Davydov   memcg: add rwsem ...
531
532
533
534
535
536
537
  static inline void memcg_get_cache_ids(void)
  {
  }
  
  static inline void memcg_put_cache_ids(void)
  {
  }
d7f25f8a2   Glauber Costa   memcg: infrastruc...
538
539
540
541
542
  static inline struct kmem_cache *
  memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp)
  {
  	return cachep;
  }
8135be5a8   Vladimir Davydov   memcg: fix possib...
543
544
545
546
  
  static inline void memcg_kmem_put_cache(struct kmem_cache *cachep)
  {
  }
60d3fd32a   Vladimir Davydov   list_lru: introdu...
547
548
549
550
551
  
  static inline struct mem_cgroup *mem_cgroup_from_kmem(void *ptr)
  {
  	return NULL;
  }
7ae1e1d0f   Glauber Costa   memcg: kmem contr...
552
  #endif /* CONFIG_MEMCG_KMEM */
8cdea7c05   Balbir Singh   Memory controller...
553
  #endif /* _LINUX_MEMCONTROL_H */