Blame view

include/linux/memcontrol.h 28.3 KB
8cdea7c05   Balbir Singh   Memory controller...
1
2
3
4
5
  /* memcontrol.h - Memory Controller
   *
   * Copyright IBM Corporation, 2007
   * Author Balbir Singh <balbir@linux.vnet.ibm.com>
   *
78fb74669   Pavel Emelianov   Memory controller...
6
7
8
   * Copyright 2007 OpenVZ SWsoft Inc
   * Author: Pavel Emelianov <xemul@openvz.org>
   *
8cdea7c05   Balbir Singh   Memory controller...
9
10
11
12
13
14
15
16
17
18
19
20
21
   * This program is free software; you can redistribute it and/or modify
   * it under the terms of the GNU General Public License as published by
   * the Free Software Foundation; either version 2 of the License, or
   * (at your option) any later version.
   *
   * This program is distributed in the hope that it will be useful,
   * but WITHOUT ANY WARRANTY; without even the implied warranty of
   * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
   * GNU General Public License for more details.
   */
  
  #ifndef _LINUX_MEMCONTROL_H
  #define _LINUX_MEMCONTROL_H
f8d665422   Hirokazu Takahashi   memcg: add mem_cg...
22
  #include <linux/cgroup.h>
456f998ec   Ying Han   memcg: add the pa...
23
  #include <linux/vm_event_item.h>
7ae1e1d0f   Glauber Costa   memcg: kmem contr...
24
  #include <linux/hardirq.h>
a8964b9b8   Glauber Costa   memcg: use static...
25
  #include <linux/jump_label.h>
33398cf2f   Michal Hocko   memcg: export str...
26
27
28
  #include <linux/page_counter.h>
  #include <linux/vmpressure.h>
  #include <linux/eventfd.h>
00f3ca2c2   Johannes Weiner   mm: memcontrol: p...
29
30
  #include <linux/mm.h>
  #include <linux/vmstat.h>
33398cf2f   Michal Hocko   memcg: export str...
31
  #include <linux/writeback.h>
fdf1cdb91   Johannes Weiner   mm: remove unnece...
32
  #include <linux/page-flags.h>
456f998ec   Ying Han   memcg: add the pa...
33

78fb74669   Pavel Emelianov   Memory controller...
34
  struct mem_cgroup;
8697d3319   Balbir Singh   Memory controller...
35
36
  struct page;
  struct mm_struct;
2633d7a02   Glauber Costa   slab/slub: consid...
37
  struct kmem_cache;
78fb74669   Pavel Emelianov   Memory controller...
38

71cd31135   Johannes Weiner   mm: memcontrol: r...
39
40
41
42
43
44
45
46
47
  /* Cgroup-specific page state, on top of universal node page state */
  enum memcg_stat_item {
  	MEMCG_CACHE = NR_VM_NODE_STAT_ITEMS,
  	MEMCG_RSS,
  	MEMCG_RSS_HUGE,
  	MEMCG_SWAP,
  	MEMCG_SOCK,
  	/* XXX: why are these zone and not node counters? */
  	MEMCG_KERNEL_STACK_KB,
b2807f07f   Johannes Weiner   mm: memcontrol: a...
48
  	MEMCG_NR_STAT,
2a7106f2c   Greg Thelen   memcg: create ext...
49
  };
71cd31135   Johannes Weiner   mm: memcontrol: r...
50
51
52
53
54
55
56
57
  /* Cgroup-specific events, on top of universal VM events */
  enum memcg_event_item {
  	MEMCG_LOW = NR_VM_EVENT_ITEMS,
  	MEMCG_HIGH,
  	MEMCG_MAX,
  	MEMCG_OOM,
  	MEMCG_NR_EVENTS,
  };
5660048cc   Johannes Weiner   mm: move memcg hi...
58
  struct mem_cgroup_reclaim_cookie {
ef8f23279   Mel Gorman   mm, memcg: move m...
59
  	pg_data_t *pgdat;
5660048cc   Johannes Weiner   mm: move memcg hi...
60
61
62
  	int priority;
  	unsigned int generation;
  };
71cd31135   Johannes Weiner   mm: memcontrol: r...
63
64
65
66
67
68
69
70
71
  #ifdef CONFIG_MEMCG
  
  #define MEM_CGROUP_ID_SHIFT	16
  #define MEM_CGROUP_ID_MAX	USHRT_MAX
  
  struct mem_cgroup_id {
  	int id;
  	atomic_t ref;
  };
33398cf2f   Michal Hocko   memcg: export str...
72
73
74
75
76
77
78
79
80
81
82
83
  /*
   * Per memcg event counter is incremented at every pagein/pageout. With THP,
   * it will be incremated by the number of pages. This counter is used for
   * for trigger some periodic events. This is straightforward and better
   * than using jiffies etc. to handle periodic memcg event.
   */
  enum mem_cgroup_events_target {
  	MEM_CGROUP_TARGET_THRESH,
  	MEM_CGROUP_TARGET_SOFTLIMIT,
  	MEM_CGROUP_TARGET_NUMAINFO,
  	MEM_CGROUP_NTARGETS,
  };
33398cf2f   Michal Hocko   memcg: export str...
84
  struct mem_cgroup_stat_cpu {
b2807f07f   Johannes Weiner   mm: memcontrol: a...
85
  	long count[MEMCG_NR_STAT];
33398cf2f   Michal Hocko   memcg: export str...
86
87
88
89
90
91
92
93
94
95
  	unsigned long events[MEMCG_NR_EVENTS];
  	unsigned long nr_page_events;
  	unsigned long targets[MEM_CGROUP_NTARGETS];
  };
  
  struct mem_cgroup_reclaim_iter {
  	struct mem_cgroup *position;
  	/* scan generation, increased every round-trip */
  	unsigned int generation;
  };
00f3ca2c2   Johannes Weiner   mm: memcontrol: p...
96
97
98
  struct lruvec_stat {
  	long count[NR_VM_NODE_STAT_ITEMS];
  };
33398cf2f   Michal Hocko   memcg: export str...
99
100
101
  /*
   * per-zone information in memory controller.
   */
ef8f23279   Mel Gorman   mm, memcg: move m...
102
  struct mem_cgroup_per_node {
33398cf2f   Michal Hocko   memcg: export str...
103
  	struct lruvec		lruvec;
00f3ca2c2   Johannes Weiner   mm: memcontrol: p...
104
  	struct lruvec_stat __percpu *lruvec_stat;
b4536f0c8   Michal Hocko   mm, memcg: fix th...
105
  	unsigned long		lru_zone_size[MAX_NR_ZONES][NR_LRU_LISTS];
33398cf2f   Michal Hocko   memcg: export str...
106
107
108
109
110
111
112
113
114
115
  
  	struct mem_cgroup_reclaim_iter	iter[DEF_PRIORITY + 1];
  
  	struct rb_node		tree_node;	/* RB tree node */
  	unsigned long		usage_in_excess;/* Set to the value by which */
  						/* the soft limit is exceeded*/
  	bool			on_tree;
  	struct mem_cgroup	*memcg;		/* Back pointer, we cannot */
  						/* use container_of	   */
  };
33398cf2f   Michal Hocko   memcg: export str...
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
  struct mem_cgroup_threshold {
  	struct eventfd_ctx *eventfd;
  	unsigned long threshold;
  };
  
  /* For threshold */
  struct mem_cgroup_threshold_ary {
  	/* An array index points to threshold just below or equal to usage. */
  	int current_threshold;
  	/* Size of entries[] */
  	unsigned int size;
  	/* Array of thresholds */
  	struct mem_cgroup_threshold entries[0];
  };
  
  struct mem_cgroup_thresholds {
  	/* Primary thresholds array */
  	struct mem_cgroup_threshold_ary *primary;
  	/*
  	 * Spare threshold array.
  	 * This is needed to make mem_cgroup_unregister_event() "never fail".
  	 * It must be able to store at least primary->size - 1 entries.
  	 */
  	struct mem_cgroup_threshold_ary *spare;
  };
567e9ab2e   Johannes Weiner   mm: memcontrol: g...
141
142
143
144
145
  enum memcg_kmem_state {
  	KMEM_NONE,
  	KMEM_ALLOCATED,
  	KMEM_ONLINE,
  };
33398cf2f   Michal Hocko   memcg: export str...
146
147
148
149
150
151
152
153
  /*
   * The memory controller data structure. The memory controller controls both
   * page cache and RSS per cgroup. We would eventually like to provide
   * statistics based on the statistics developed by Rik Van Riel for clock-pro,
   * to help the administrator determine what knobs to tune.
   */
  struct mem_cgroup {
  	struct cgroup_subsys_state css;
73f576c04   Johannes Weiner   mm: memcontrol: f...
154
155
  	/* Private memcg ID. Used to ID objects that outlive the cgroup */
  	struct mem_cgroup_id id;
33398cf2f   Michal Hocko   memcg: export str...
156
157
  	/* Accounted resources */
  	struct page_counter memory;
37e843511   Vladimir Davydov   mm: memcontrol: c...
158
  	struct page_counter swap;
0db152981   Johannes Weiner   mm: memcontrol: f...
159
160
  
  	/* Legacy consumer-oriented counters */
33398cf2f   Michal Hocko   memcg: export str...
161
162
  	struct page_counter memsw;
  	struct page_counter kmem;
0db152981   Johannes Weiner   mm: memcontrol: f...
163
  	struct page_counter tcpmem;
33398cf2f   Michal Hocko   memcg: export str...
164
165
166
167
  
  	/* Normal memory consumption range */
  	unsigned long low;
  	unsigned long high;
f7e1cb6ec   Johannes Weiner   mm: memcontrol: a...
168
169
  	/* Range enforcement for interrupt charges */
  	struct work_struct high_work;
33398cf2f   Michal Hocko   memcg: export str...
170
171
172
173
  	unsigned long soft_limit;
  
  	/* vmpressure notifications */
  	struct vmpressure vmpressure;
33398cf2f   Michal Hocko   memcg: export str...
174
175
176
177
178
179
180
181
182
183
184
185
  	/*
  	 * Should the accounting and control be hierarchical, per subtree?
  	 */
  	bool use_hierarchy;
  
  	/* protected by memcg_oom_lock */
  	bool		oom_lock;
  	int		under_oom;
  
  	int	swappiness;
  	/* OOM-Killer disable */
  	int		oom_kill_disable;
472912a2b   Tejun Heo   memcg: generate f...
186
187
  	/* handle for "memory.events" */
  	struct cgroup_file events_file;
33398cf2f   Michal Hocko   memcg: export str...
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
  	/* protect arrays of thresholds */
  	struct mutex thresholds_lock;
  
  	/* thresholds for memory usage. RCU-protected */
  	struct mem_cgroup_thresholds thresholds;
  
  	/* thresholds for mem+swap usage. RCU-protected */
  	struct mem_cgroup_thresholds memsw_thresholds;
  
  	/* For oom notifier event fd */
  	struct list_head oom_notify;
  
  	/*
  	 * Should we move charges of a task when a task is moved into this
  	 * mem_cgroup ? And what type of charges should we move ?
  	 */
  	unsigned long move_charge_at_immigrate;
  	/*
  	 * set > 0 if pages under this cgroup are moving to other cgroup.
  	 */
  	atomic_t		moving_account;
  	/* taken only while moving_account > 0 */
  	spinlock_t		move_lock;
  	struct task_struct	*move_lock_task;
  	unsigned long		move_lock_flags;
  	/*
  	 * percpu counter.
  	 */
  	struct mem_cgroup_stat_cpu __percpu *stat;
33398cf2f   Michal Hocko   memcg: export str...
217

d886f4e48   Johannes Weiner   mm: memcontrol: r...
218
219
220
  	unsigned long		socket_pressure;
  
  	/* Legacy tcp memory accounting */
0db152981   Johannes Weiner   mm: memcontrol: f...
221
222
  	bool			tcpmem_active;
  	int			tcpmem_pressure;
d886f4e48   Johannes Weiner   mm: memcontrol: r...
223

127424c86   Johannes Weiner   mm: memcontrol: m...
224
  #ifndef CONFIG_SLOB
33398cf2f   Michal Hocko   memcg: export str...
225
226
          /* Index in the kmem_cache->memcg_params.memcg_caches array */
  	int kmemcg_id;
567e9ab2e   Johannes Weiner   mm: memcontrol: g...
227
  	enum memcg_kmem_state kmem_state;
bc2791f85   Tejun Heo   slab: link memcg ...
228
  	struct list_head kmem_caches;
33398cf2f   Michal Hocko   memcg: export str...
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
  #endif
  
  	int last_scanned_node;
  #if MAX_NUMNODES > 1
  	nodemask_t	scan_nodes;
  	atomic_t	numainfo_events;
  	atomic_t	numainfo_updating;
  #endif
  
  #ifdef CONFIG_CGROUP_WRITEBACK
  	struct list_head cgwb_list;
  	struct wb_domain cgwb_domain;
  #endif
  
  	/* List of events which userspace want to receive */
  	struct list_head event_list;
  	spinlock_t event_list_lock;
  
  	struct mem_cgroup_per_node *nodeinfo[0];
  	/* WARNING: nodeinfo must be the last member here */
  };
7d828602e   Johannes Weiner   mm: memcontrol: e...
250
251
  
  extern struct mem_cgroup *root_mem_cgroup;
56161634e   Tejun Heo   memcg: add mem_cg...
252

23047a96d   Johannes Weiner   mm: workingset: p...
253
254
255
256
  static inline bool mem_cgroup_disabled(void)
  {
  	return !cgroup_subsys_enabled(memory_cgrp_subsys);
  }
31176c781   Johannes Weiner   mm: memcontrol: c...
257
  static inline void mem_cgroup_event(struct mem_cgroup *memcg,
df0e53d06   Johannes Weiner   mm: memcontrol: r...
258
  				    enum memcg_event_item event)
33398cf2f   Michal Hocko   memcg: export str...
259
  {
df0e53d06   Johannes Weiner   mm: memcontrol: r...
260
  	this_cpu_inc(memcg->stat->events[event]);
472912a2b   Tejun Heo   memcg: generate f...
261
  	cgroup_file_notify(&memcg->events_file);
33398cf2f   Michal Hocko   memcg: export str...
262
  }
241994ed8   Johannes Weiner   mm: memcontrol: d...
263
264
  
  bool mem_cgroup_low(struct mem_cgroup *root, struct mem_cgroup *memcg);
00501b531   Johannes Weiner   mm: memcontrol: r...
265
  int mem_cgroup_try_charge(struct page *page, struct mm_struct *mm,
f627c2f53   Kirill A. Shutemov   memcg: adjust to ...
266
267
  			  gfp_t gfp_mask, struct mem_cgroup **memcgp,
  			  bool compound);
00501b531   Johannes Weiner   mm: memcontrol: r...
268
  void mem_cgroup_commit_charge(struct page *page, struct mem_cgroup *memcg,
f627c2f53   Kirill A. Shutemov   memcg: adjust to ...
269
270
271
  			      bool lrucare, bool compound);
  void mem_cgroup_cancel_charge(struct page *page, struct mem_cgroup *memcg,
  		bool compound);
0a31bc97c   Johannes Weiner   mm: memcontrol: r...
272
  void mem_cgroup_uncharge(struct page *page);
747db954c   Johannes Weiner   mm: memcontrol: u...
273
  void mem_cgroup_uncharge_list(struct list_head *page_list);
569b846df   KAMEZAWA Hiroyuki   memcg: coalesce u...
274

6a93ca8fd   Johannes Weiner   mm: migrate: do n...
275
  void mem_cgroup_migrate(struct page *oldpage, struct page *newpage);
569b846df   KAMEZAWA Hiroyuki   memcg: coalesce u...
276

ef8f23279   Mel Gorman   mm, memcg: move m...
277
278
  static struct mem_cgroup_per_node *
  mem_cgroup_nodeinfo(struct mem_cgroup *memcg, int nid)
55779ec75   Johannes Weiner   mm: fix vm-scalab...
279
  {
ef8f23279   Mel Gorman   mm, memcg: move m...
280
  	return memcg->nodeinfo[nid];
55779ec75   Johannes Weiner   mm: fix vm-scalab...
281
282
283
  }
  
  /**
a9dd0a831   Mel Gorman   mm, vmscan: make ...
284
285
   * mem_cgroup_lruvec - get the lru list vector for a node or a memcg zone
   * @node: node of the wanted lruvec
55779ec75   Johannes Weiner   mm: fix vm-scalab...
286
287
   * @memcg: memcg of the wanted lruvec
   *
a9dd0a831   Mel Gorman   mm, vmscan: make ...
288
289
   * Returns the lru list vector holding pages for a given @node or a given
   * @memcg and @zone. This can be the node lruvec, if the memory controller
55779ec75   Johannes Weiner   mm: fix vm-scalab...
290
291
   * is disabled.
   */
a9dd0a831   Mel Gorman   mm, vmscan: make ...
292
  static inline struct lruvec *mem_cgroup_lruvec(struct pglist_data *pgdat,
ef8f23279   Mel Gorman   mm, memcg: move m...
293
  				struct mem_cgroup *memcg)
55779ec75   Johannes Weiner   mm: fix vm-scalab...
294
  {
ef8f23279   Mel Gorman   mm, memcg: move m...
295
  	struct mem_cgroup_per_node *mz;
55779ec75   Johannes Weiner   mm: fix vm-scalab...
296
297
298
  	struct lruvec *lruvec;
  
  	if (mem_cgroup_disabled()) {
a9dd0a831   Mel Gorman   mm, vmscan: make ...
299
  		lruvec = node_lruvec(pgdat);
55779ec75   Johannes Weiner   mm: fix vm-scalab...
300
301
  		goto out;
  	}
ef8f23279   Mel Gorman   mm, memcg: move m...
302
  	mz = mem_cgroup_nodeinfo(memcg, pgdat->node_id);
55779ec75   Johannes Weiner   mm: fix vm-scalab...
303
304
305
306
  	lruvec = &mz->lruvec;
  out:
  	/*
  	 * Since a node can be onlined after the mem_cgroup was created,
599d0c954   Mel Gorman   mm, vmscan: move ...
307
  	 * we have to be prepared to initialize lruvec->pgdat here;
55779ec75   Johannes Weiner   mm: fix vm-scalab...
308
309
  	 * and if offlined then reonlined, we need to reinitialize it.
  	 */
ef8f23279   Mel Gorman   mm, memcg: move m...
310
311
  	if (unlikely(lruvec->pgdat != pgdat))
  		lruvec->pgdat = pgdat;
55779ec75   Johannes Weiner   mm: fix vm-scalab...
312
313
  	return lruvec;
  }
599d0c954   Mel Gorman   mm, vmscan: move ...
314
  struct lruvec *mem_cgroup_page_lruvec(struct page *, struct pglist_data *);
c9b0ed514   KAMEZAWA Hiroyuki   memcg: helper fun...
315

2314b42db   Johannes Weiner   mm: memcontrol: d...
316
  bool task_in_mem_cgroup(struct task_struct *task, struct mem_cgroup *memcg);
642199948   Michal Hocko   memcg: get rid of...
317
  struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p);
e993d905c   Vladimir Davydov   memcg: zap try_ge...
318

33398cf2f   Michal Hocko   memcg: export str...
319
320
321
322
  static inline
  struct mem_cgroup *mem_cgroup_from_css(struct cgroup_subsys_state *css){
  	return css ? container_of(css, struct mem_cgroup, css) : NULL;
  }
8e8ae6452   Johannes Weiner   mm: memcontrol: h...
323
324
  #define mem_cgroup_from_counter(counter, member)	\
  	container_of(counter, struct mem_cgroup, member)
33398cf2f   Michal Hocko   memcg: export str...
325
326
327
328
  struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *,
  				   struct mem_cgroup *,
  				   struct mem_cgroup_reclaim_cookie *);
  void mem_cgroup_iter_break(struct mem_cgroup *, struct mem_cgroup *);
7c5f64f84   Vladimir Davydov   mm: oom: deduplic...
329
330
  int mem_cgroup_scan_tasks(struct mem_cgroup *,
  			  int (*)(struct task_struct *, void *), void *);
33398cf2f   Michal Hocko   memcg: export str...
331

23047a96d   Johannes Weiner   mm: workingset: p...
332
333
334
335
  static inline unsigned short mem_cgroup_id(struct mem_cgroup *memcg)
  {
  	if (mem_cgroup_disabled())
  		return 0;
73f576c04   Johannes Weiner   mm: memcontrol: f...
336
  	return memcg->id.id;
23047a96d   Johannes Weiner   mm: workingset: p...
337
  }
73f576c04   Johannes Weiner   mm: memcontrol: f...
338
  struct mem_cgroup *mem_cgroup_from_id(unsigned short id);
23047a96d   Johannes Weiner   mm: workingset: p...
339

2262185c5   Roman Gushchin   mm: per-cgroup me...
340
341
342
343
344
345
346
347
348
349
  static inline struct mem_cgroup *lruvec_memcg(struct lruvec *lruvec)
  {
  	struct mem_cgroup_per_node *mz;
  
  	if (mem_cgroup_disabled())
  		return NULL;
  
  	mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
  	return mz->memcg;
  }
8e8ae6452   Johannes Weiner   mm: memcontrol: h...
350
351
352
353
354
355
356
357
358
359
360
361
362
  /**
   * parent_mem_cgroup - find the accounting parent of a memcg
   * @memcg: memcg whose parent to find
   *
   * Returns the parent memcg, or NULL if this is the root or the memory
   * controller is in legacy no-hierarchy mode.
   */
  static inline struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg)
  {
  	if (!memcg->memory.parent)
  		return NULL;
  	return mem_cgroup_from_counter(memcg->memory.parent, memory);
  }
33398cf2f   Michal Hocko   memcg: export str...
363
364
365
366
367
368
369
370
371
  static inline bool mem_cgroup_is_descendant(struct mem_cgroup *memcg,
  			      struct mem_cgroup *root)
  {
  	if (root == memcg)
  		return true;
  	if (!root->use_hierarchy)
  		return false;
  	return cgroup_is_descendant(memcg->css.cgroup, root->css.cgroup);
  }
e1aab161e   Glauber Costa   socket: initial c...
372

2314b42db   Johannes Weiner   mm: memcontrol: d...
373
374
  static inline bool mm_match_cgroup(struct mm_struct *mm,
  				   struct mem_cgroup *memcg)
2e4d40915   Lai Jiangshan   memcontrol: rcu_r...
375
  {
587af308c   Johannes Weiner   mm: memcg: clean ...
376
  	struct mem_cgroup *task_memcg;
413918bb6   Johannes Weiner   mm: memcontrol: p...
377
  	bool match = false;
c3ac9a8ad   Johannes Weiner   mm: memcg: count ...
378

2e4d40915   Lai Jiangshan   memcontrol: rcu_r...
379
  	rcu_read_lock();
587af308c   Johannes Weiner   mm: memcg: clean ...
380
  	task_memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
413918bb6   Johannes Weiner   mm: memcontrol: p...
381
  	if (task_memcg)
2314b42db   Johannes Weiner   mm: memcontrol: d...
382
  		match = mem_cgroup_is_descendant(task_memcg, memcg);
2e4d40915   Lai Jiangshan   memcontrol: rcu_r...
383
  	rcu_read_unlock();
c3ac9a8ad   Johannes Weiner   mm: memcg: count ...
384
  	return match;
2e4d40915   Lai Jiangshan   memcontrol: rcu_r...
385
  }
8a9f3ccd2   Balbir Singh   Memory controller...
386

642199948   Michal Hocko   memcg: get rid of...
387
  struct cgroup_subsys_state *mem_cgroup_css_from_page(struct page *page);
2fc045247   Vladimir Davydov   memcg: add page_c...
388
  ino_t page_cgroup_ino(struct page *page);
d324236b3   Wu Fengguang   memcg: add access...
389

eb01aaab4   Vladimir Davydov   mm: memcontrol: r...
390
391
392
393
394
395
  static inline bool mem_cgroup_online(struct mem_cgroup *memcg)
  {
  	if (mem_cgroup_disabled())
  		return true;
  	return !!(memcg->css.flags & CSS_ONLINE);
  }
58ae83db2   KAMEZAWA Hiroyuki   per-zone and recl...
396
397
398
  /*
   * For memory reclaim.
   */
889976dbc   Ying Han   memcg: reclaim me...
399
  int mem_cgroup_select_victim_node(struct mem_cgroup *memcg);
33398cf2f   Michal Hocko   memcg: export str...
400
401
  
  void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru,
b4536f0c8   Michal Hocko   mm, memcg: fix th...
402
  		int zid, int nr_pages);
33398cf2f   Michal Hocko   memcg: export str...
403

0a6b76dd2   Vladimir Davydov   mm: workingset: m...
404
405
  unsigned long mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg,
  					   int nid, unsigned int lru_mask);
33398cf2f   Michal Hocko   memcg: export str...
406
407
408
  static inline
  unsigned long mem_cgroup_get_lru_size(struct lruvec *lruvec, enum lru_list lru)
  {
ef8f23279   Mel Gorman   mm, memcg: move m...
409
  	struct mem_cgroup_per_node *mz;
b4536f0c8   Michal Hocko   mm, memcg: fix th...
410
411
  	unsigned long nr_pages = 0;
  	int zid;
33398cf2f   Michal Hocko   memcg: export str...
412

ef8f23279   Mel Gorman   mm, memcg: move m...
413
  	mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
b4536f0c8   Michal Hocko   mm, memcg: fix th...
414
415
416
417
418
419
420
421
422
423
424
425
426
  	for (zid = 0; zid < MAX_NR_ZONES; zid++)
  		nr_pages += mz->lru_zone_size[zid][lru];
  	return nr_pages;
  }
  
  static inline
  unsigned long mem_cgroup_get_zone_lru_size(struct lruvec *lruvec,
  		enum lru_list lru, int zone_idx)
  {
  	struct mem_cgroup_per_node *mz;
  
  	mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
  	return mz->lru_zone_size[zone_idx][lru];
33398cf2f   Michal Hocko   memcg: export str...
427
  }
b23afb93d   Tejun Heo   memcg: punt high ...
428
  void mem_cgroup_handle_over_high(void);
7c5f64f84   Vladimir Davydov   mm: oom: deduplic...
429
  unsigned long mem_cgroup_get_limit(struct mem_cgroup *memcg);
642199948   Michal Hocko   memcg: get rid of...
430
431
  void mem_cgroup_print_oom_info(struct mem_cgroup *memcg,
  				struct task_struct *p);
58ae83db2   KAMEZAWA Hiroyuki   per-zone and recl...
432

494264208   Johannes Weiner   mm: memcg: handle...
433
  static inline void mem_cgroup_oom_enable(void)
519e52473   Johannes Weiner   mm: memcg: enable...
434
  {
626ebc410   Tejun Heo   memcg: flatten ta...
435
436
  	WARN_ON(current->memcg_may_oom);
  	current->memcg_may_oom = 1;
519e52473   Johannes Weiner   mm: memcg: enable...
437
  }
494264208   Johannes Weiner   mm: memcg: handle...
438
  static inline void mem_cgroup_oom_disable(void)
519e52473   Johannes Weiner   mm: memcg: enable...
439
  {
626ebc410   Tejun Heo   memcg: flatten ta...
440
441
  	WARN_ON(!current->memcg_may_oom);
  	current->memcg_may_oom = 0;
519e52473   Johannes Weiner   mm: memcg: enable...
442
  }
3812c8c8f   Johannes Weiner   mm: memcg: do not...
443
444
  static inline bool task_in_memcg_oom(struct task_struct *p)
  {
626ebc410   Tejun Heo   memcg: flatten ta...
445
  	return p->memcg_in_oom;
3812c8c8f   Johannes Weiner   mm: memcg: do not...
446
  }
494264208   Johannes Weiner   mm: memcg: handle...
447
  bool mem_cgroup_oom_synchronize(bool wait);
3812c8c8f   Johannes Weiner   mm: memcg: do not...
448

c255a4580   Andrew Morton   memcg: rename con...
449
  #ifdef CONFIG_MEMCG_SWAP
c077719be   KAMEZAWA Hiroyuki   memcg: mem+swap c...
450
451
  extern int do_swap_account;
  #endif
f8d665422   Hirokazu Takahashi   memcg: add mem_cg...
452

739f79fc9   Johannes Weiner   mm: memcontrol: f...
453
454
  struct mem_cgroup *lock_page_memcg(struct page *page);
  void __unlock_page_memcg(struct mem_cgroup *memcg);
62cccb8c8   Johannes Weiner   mm: simplify lock...
455
  void unlock_page_memcg(struct page *page);
d7365e783   Johannes Weiner   mm: memcontrol: f...
456

04fecbf51   Matthias Kaehlcke   mm: memcontrol: u...
457
  /* idx can be of type enum memcg_stat_item or node_stat_item */
ccda7f436   Johannes Weiner   mm: memcontrol: u...
458
  static inline unsigned long memcg_page_state(struct mem_cgroup *memcg,
04fecbf51   Matthias Kaehlcke   mm: memcontrol: u...
459
  					     int idx)
2a2e48854   Johannes Weiner   mm: vmscan: fix I...
460
461
462
463
464
465
466
467
468
469
470
471
  {
  	long val = 0;
  	int cpu;
  
  	for_each_possible_cpu(cpu)
  		val += per_cpu(memcg->stat->count[idx], cpu);
  
  	if (val < 0)
  		val = 0;
  
  	return val;
  }
04fecbf51   Matthias Kaehlcke   mm: memcontrol: u...
472
  /* idx can be of type enum memcg_stat_item or node_stat_item */
00f3ca2c2   Johannes Weiner   mm: memcontrol: p...
473
  static inline void __mod_memcg_state(struct mem_cgroup *memcg,
04fecbf51   Matthias Kaehlcke   mm: memcontrol: u...
474
  				     int idx, int val)
2a2e48854   Johannes Weiner   mm: vmscan: fix I...
475
476
  {
  	if (!mem_cgroup_disabled())
00f3ca2c2   Johannes Weiner   mm: memcontrol: p...
477
  		__this_cpu_add(memcg->stat->count[idx], val);
2a2e48854   Johannes Weiner   mm: vmscan: fix I...
478
  }
04fecbf51   Matthias Kaehlcke   mm: memcontrol: u...
479
  /* idx can be of type enum memcg_stat_item or node_stat_item */
00f3ca2c2   Johannes Weiner   mm: memcontrol: p...
480
  static inline void mod_memcg_state(struct mem_cgroup *memcg,
04fecbf51   Matthias Kaehlcke   mm: memcontrol: u...
481
  				   int idx, int val)
2a2e48854   Johannes Weiner   mm: vmscan: fix I...
482
  {
00f3ca2c2   Johannes Weiner   mm: memcontrol: p...
483
484
  	if (!mem_cgroup_disabled())
  		this_cpu_add(memcg->stat->count[idx], val);
2a2e48854   Johannes Weiner   mm: vmscan: fix I...
485
  }
33398cf2f   Michal Hocko   memcg: export str...
486
  /**
ccda7f436   Johannes Weiner   mm: memcontrol: u...
487
   * mod_memcg_page_state - update page state statistics
62cccb8c8   Johannes Weiner   mm: simplify lock...
488
   * @page: the page
33398cf2f   Michal Hocko   memcg: export str...
489
490
491
   * @idx: page state item to account
   * @val: number of pages (positive or negative)
   *
fdf1cdb91   Johannes Weiner   mm: remove unnece...
492
493
494
   * The @page must be locked or the caller must use lock_page_memcg()
   * to prevent double accounting when the page is concurrently being
   * moved to another memcg:
81f8c3a46   Johannes Weiner   mm: memcontrol: g...
495
   *
fdf1cdb91   Johannes Weiner   mm: remove unnece...
496
   *   lock_page(page) or lock_page_memcg(page)
81f8c3a46   Johannes Weiner   mm: memcontrol: g...
497
   *   if (TestClearPageState(page))
ccda7f436   Johannes Weiner   mm: memcontrol: u...
498
   *     mod_memcg_page_state(page, state, -1);
fdf1cdb91   Johannes Weiner   mm: remove unnece...
499
   *   unlock_page(page) or unlock_page_memcg(page)
2a2e48854   Johannes Weiner   mm: vmscan: fix I...
500
501
   *
   * Kernel pages are an exception to this, since they'll never move.
33398cf2f   Michal Hocko   memcg: export str...
502
   */
00f3ca2c2   Johannes Weiner   mm: memcontrol: p...
503
  static inline void __mod_memcg_page_state(struct page *page,
04fecbf51   Matthias Kaehlcke   mm: memcontrol: u...
504
  					  int idx, int val)
00f3ca2c2   Johannes Weiner   mm: memcontrol: p...
505
506
507
508
  {
  	if (page->mem_cgroup)
  		__mod_memcg_state(page->mem_cgroup, idx, val);
  }
ccda7f436   Johannes Weiner   mm: memcontrol: u...
509
  static inline void mod_memcg_page_state(struct page *page,
04fecbf51   Matthias Kaehlcke   mm: memcontrol: u...
510
  					int idx, int val)
33398cf2f   Michal Hocko   memcg: export str...
511
  {
62cccb8c8   Johannes Weiner   mm: simplify lock...
512
  	if (page->mem_cgroup)
ccda7f436   Johannes Weiner   mm: memcontrol: u...
513
  		mod_memcg_state(page->mem_cgroup, idx, val);
33398cf2f   Michal Hocko   memcg: export str...
514
  }
00f3ca2c2   Johannes Weiner   mm: memcontrol: p...
515
516
  static inline unsigned long lruvec_page_state(struct lruvec *lruvec,
  					      enum node_stat_item idx)
2a7106f2c   Greg Thelen   memcg: create ext...
517
  {
00f3ca2c2   Johannes Weiner   mm: memcontrol: p...
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
  	struct mem_cgroup_per_node *pn;
  	long val = 0;
  	int cpu;
  
  	if (mem_cgroup_disabled())
  		return node_page_state(lruvec_pgdat(lruvec), idx);
  
  	pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
  	for_each_possible_cpu(cpu)
  		val += per_cpu(pn->lruvec_stat->count[idx], cpu);
  
  	if (val < 0)
  		val = 0;
  
  	return val;
2a7106f2c   Greg Thelen   memcg: create ext...
533
  }
00f3ca2c2   Johannes Weiner   mm: memcontrol: p...
534
535
  static inline void __mod_lruvec_state(struct lruvec *lruvec,
  				      enum node_stat_item idx, int val)
2a7106f2c   Greg Thelen   memcg: create ext...
536
  {
00f3ca2c2   Johannes Weiner   mm: memcontrol: p...
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
  	struct mem_cgroup_per_node *pn;
  
  	__mod_node_page_state(lruvec_pgdat(lruvec), idx, val);
  	if (mem_cgroup_disabled())
  		return;
  	pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
  	__mod_memcg_state(pn->memcg, idx, val);
  	__this_cpu_add(pn->lruvec_stat->count[idx], val);
  }
  
  static inline void mod_lruvec_state(struct lruvec *lruvec,
  				    enum node_stat_item idx, int val)
  {
  	struct mem_cgroup_per_node *pn;
  
  	mod_node_page_state(lruvec_pgdat(lruvec), idx, val);
  	if (mem_cgroup_disabled())
  		return;
  	pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
  	mod_memcg_state(pn->memcg, idx, val);
  	this_cpu_add(pn->lruvec_stat->count[idx], val);
  }
  
  static inline void __mod_lruvec_page_state(struct page *page,
  					   enum node_stat_item idx, int val)
  {
  	struct mem_cgroup_per_node *pn;
  
  	__mod_node_page_state(page_pgdat(page), idx, val);
  	if (mem_cgroup_disabled() || !page->mem_cgroup)
  		return;
  	__mod_memcg_state(page->mem_cgroup, idx, val);
  	pn = page->mem_cgroup->nodeinfo[page_to_nid(page)];
  	__this_cpu_add(pn->lruvec_stat->count[idx], val);
  }
  
  static inline void mod_lruvec_page_state(struct page *page,
  					 enum node_stat_item idx, int val)
  {
  	struct mem_cgroup_per_node *pn;
  
  	mod_node_page_state(page_pgdat(page), idx, val);
  	if (mem_cgroup_disabled() || !page->mem_cgroup)
  		return;
  	mod_memcg_state(page->mem_cgroup, idx, val);
  	pn = page->mem_cgroup->nodeinfo[page_to_nid(page)];
  	this_cpu_add(pn->lruvec_stat->count[idx], val);
2a7106f2c   Greg Thelen   memcg: create ext...
584
  }
ef8f23279   Mel Gorman   mm, memcg: move m...
585
  unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order,
0608f43da   Andrew Morton   revert "memcg, vm...
586
587
  						gfp_t gfp_mask,
  						unsigned long *total_scanned);
a63d83f42   David Rientjes   oom: badness heur...
588

2262185c5   Roman Gushchin   mm: per-cgroup me...
589
590
591
592
593
594
595
  static inline void count_memcg_events(struct mem_cgroup *memcg,
  				      enum vm_event_item idx,
  				      unsigned long count)
  {
  	if (!mem_cgroup_disabled())
  		this_cpu_add(memcg->stat->events[idx], count);
  }
04fecbf51   Matthias Kaehlcke   mm: memcontrol: u...
596
  /* idx can be of type enum memcg_stat_item or node_stat_item */
2262185c5   Roman Gushchin   mm: per-cgroup me...
597
  static inline void count_memcg_page_event(struct page *page,
04fecbf51   Matthias Kaehlcke   mm: memcontrol: u...
598
  					  int idx)
2262185c5   Roman Gushchin   mm: per-cgroup me...
599
600
601
602
603
604
605
  {
  	if (page->mem_cgroup)
  		count_memcg_events(page->mem_cgroup, idx, 1);
  }
  
  static inline void count_memcg_event_mm(struct mm_struct *mm,
  					enum vm_event_item idx)
68ae564bb   David Rientjes   mm, memcg: avoid ...
606
  {
33398cf2f   Michal Hocko   memcg: export str...
607
  	struct mem_cgroup *memcg;
68ae564bb   David Rientjes   mm, memcg: avoid ...
608
609
  	if (mem_cgroup_disabled())
  		return;
33398cf2f   Michal Hocko   memcg: export str...
610
611
612
  
  	rcu_read_lock();
  	memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
8e675f7af   Konstantin Khlebnikov   mm/oom_kill: coun...
613
  	if (likely(memcg)) {
df0e53d06   Johannes Weiner   mm: memcontrol: r...
614
  		this_cpu_inc(memcg->stat->events[idx]);
8e675f7af   Konstantin Khlebnikov   mm/oom_kill: coun...
615
616
617
  		if (idx == OOM_KILL)
  			cgroup_file_notify(&memcg->events_file);
  	}
33398cf2f   Michal Hocko   memcg: export str...
618
  	rcu_read_unlock();
68ae564bb   David Rientjes   mm, memcg: avoid ...
619
  }
ca3e02141   KAMEZAWA Hiroyuki   memcg: fix USED b...
620
  #ifdef CONFIG_TRANSPARENT_HUGEPAGE
e94c8a9cb   KAMEZAWA Hiroyuki   memcg: make mem_c...
621
  void mem_cgroup_split_huge_fixup(struct page *head);
ca3e02141   KAMEZAWA Hiroyuki   memcg: fix USED b...
622
  #endif
c255a4580   Andrew Morton   memcg: rename con...
623
  #else /* CONFIG_MEMCG */
23047a96d   Johannes Weiner   mm: workingset: p...
624
625
626
  
  #define MEM_CGROUP_ID_SHIFT	0
  #define MEM_CGROUP_ID_MAX	0
7a81b88cb   KAMEZAWA Hiroyuki   memcg: introduce ...
627
  struct mem_cgroup;
23047a96d   Johannes Weiner   mm: workingset: p...
628
629
630
631
  static inline bool mem_cgroup_disabled(void)
  {
  	return true;
  }
31176c781   Johannes Weiner   mm: memcontrol: c...
632
  static inline void mem_cgroup_event(struct mem_cgroup *memcg,
df0e53d06   Johannes Weiner   mm: memcontrol: r...
633
  				    enum memcg_event_item event)
241994ed8   Johannes Weiner   mm: memcontrol: d...
634
635
636
637
638
639
640
641
  {
  }
  
  static inline bool mem_cgroup_low(struct mem_cgroup *root,
  				  struct mem_cgroup *memcg)
  {
  	return false;
  }
00501b531   Johannes Weiner   mm: memcontrol: r...
642
643
  static inline int mem_cgroup_try_charge(struct page *page, struct mm_struct *mm,
  					gfp_t gfp_mask,
f627c2f53   Kirill A. Shutemov   memcg: adjust to ...
644
645
  					struct mem_cgroup **memcgp,
  					bool compound)
7a81b88cb   KAMEZAWA Hiroyuki   memcg: introduce ...
646
  {
00501b531   Johannes Weiner   mm: memcontrol: r...
647
  	*memcgp = NULL;
7a81b88cb   KAMEZAWA Hiroyuki   memcg: introduce ...
648
649
  	return 0;
  }
00501b531   Johannes Weiner   mm: memcontrol: r...
650
651
  static inline void mem_cgroup_commit_charge(struct page *page,
  					    struct mem_cgroup *memcg,
f627c2f53   Kirill A. Shutemov   memcg: adjust to ...
652
  					    bool lrucare, bool compound)
7a81b88cb   KAMEZAWA Hiroyuki   memcg: introduce ...
653
654
  {
  }
00501b531   Johannes Weiner   mm: memcontrol: r...
655
  static inline void mem_cgroup_cancel_charge(struct page *page,
f627c2f53   Kirill A. Shutemov   memcg: adjust to ...
656
657
  					    struct mem_cgroup *memcg,
  					    bool compound)
7a81b88cb   KAMEZAWA Hiroyuki   memcg: introduce ...
658
659
  {
  }
0a31bc97c   Johannes Weiner   mm: memcontrol: r...
660
  static inline void mem_cgroup_uncharge(struct page *page)
569b846df   KAMEZAWA Hiroyuki   memcg: coalesce u...
661
662
  {
  }
747db954c   Johannes Weiner   mm: memcontrol: u...
663
  static inline void mem_cgroup_uncharge_list(struct list_head *page_list)
8a9f3ccd2   Balbir Singh   Memory controller...
664
665
  {
  }
6a93ca8fd   Johannes Weiner   mm: migrate: do n...
666
  static inline void mem_cgroup_migrate(struct page *old, struct page *new)
69029cd55   KAMEZAWA Hiroyuki   memcg: remove ref...
667
668
  {
  }
a9dd0a831   Mel Gorman   mm, vmscan: make ...
669
  static inline struct lruvec *mem_cgroup_lruvec(struct pglist_data *pgdat,
ef8f23279   Mel Gorman   mm, memcg: move m...
670
  				struct mem_cgroup *memcg)
08e552c69   KAMEZAWA Hiroyuki   memcg: synchroniz...
671
  {
a9dd0a831   Mel Gorman   mm, vmscan: make ...
672
  	return node_lruvec(pgdat);
08e552c69   KAMEZAWA Hiroyuki   memcg: synchroniz...
673
  }
fa9add641   Hugh Dickins   mm/memcg: apply a...
674
  static inline struct lruvec *mem_cgroup_page_lruvec(struct page *page,
599d0c954   Mel Gorman   mm, vmscan: move ...
675
  						    struct pglist_data *pgdat)
66e1707bc   Balbir Singh   Memory controller...
676
  {
599d0c954   Mel Gorman   mm, vmscan: move ...
677
  	return &pgdat->lruvec;
66e1707bc   Balbir Singh   Memory controller...
678
  }
587af308c   Johannes Weiner   mm: memcg: clean ...
679
  static inline bool mm_match_cgroup(struct mm_struct *mm,
c0ff4b854   Raghavendra K T   memcg: rename mem...
680
  		struct mem_cgroup *memcg)
bed7161a5   Balbir Singh   Memory controller...
681
  {
587af308c   Johannes Weiner   mm: memcg: clean ...
682
  	return true;
bed7161a5   Balbir Singh   Memory controller...
683
  }
ffbdccf5e   David Rientjes   mm, memcg: don't ...
684
685
  static inline bool task_in_mem_cgroup(struct task_struct *task,
  				      const struct mem_cgroup *memcg)
4c4a22148   David Rientjes   memcontrol: move ...
686
  {
ffbdccf5e   David Rientjes   mm, memcg: don't ...
687
  	return true;
4c4a22148   David Rientjes   memcontrol: move ...
688
  }
5660048cc   Johannes Weiner   mm: move memcg hi...
689
690
691
692
693
694
695
696
697
698
699
700
  static inline struct mem_cgroup *
  mem_cgroup_iter(struct mem_cgroup *root,
  		struct mem_cgroup *prev,
  		struct mem_cgroup_reclaim_cookie *reclaim)
  {
  	return NULL;
  }
  
  static inline void mem_cgroup_iter_break(struct mem_cgroup *root,
  					 struct mem_cgroup *prev)
  {
  }
7c5f64f84   Vladimir Davydov   mm: oom: deduplic...
701
702
703
704
705
  static inline int mem_cgroup_scan_tasks(struct mem_cgroup *memcg,
  		int (*fn)(struct task_struct *, void *), void *arg)
  {
  	return 0;
  }
23047a96d   Johannes Weiner   mm: workingset: p...
706
  static inline unsigned short mem_cgroup_id(struct mem_cgroup *memcg)
f8d665422   Hirokazu Takahashi   memcg: add mem_cg...
707
  {
23047a96d   Johannes Weiner   mm: workingset: p...
708
709
710
711
712
713
714
715
  	return 0;
  }
  
  static inline struct mem_cgroup *mem_cgroup_from_id(unsigned short id)
  {
  	WARN_ON_ONCE(id);
  	/* XXX: This should always return root_mem_cgroup */
  	return NULL;
f8d665422   Hirokazu Takahashi   memcg: add mem_cg...
716
  }
a636b327f   KAMEZAWA Hiroyuki   memcg: avoid unne...
717

2262185c5   Roman Gushchin   mm: per-cgroup me...
718
719
720
721
  static inline struct mem_cgroup *lruvec_memcg(struct lruvec *lruvec)
  {
  	return NULL;
  }
eb01aaab4   Vladimir Davydov   mm: memcontrol: r...
722
  static inline bool mem_cgroup_online(struct mem_cgroup *memcg)
14797e236   KOSAKI Motohiro   memcg: add inacti...
723
  {
13308ca9e   Yaowei Bai   mm/memcontrol: ma...
724
  	return true;
14797e236   KOSAKI Motohiro   memcg: add inacti...
725
  }
a3d8e0549   KOSAKI Motohiro   memcg: add mem_cg...
726
  static inline unsigned long
4d7dcca21   Hugh Dickins   mm/memcg: get_lru...
727
  mem_cgroup_get_lru_size(struct lruvec *lruvec, enum lru_list lru)
a3d8e0549   KOSAKI Motohiro   memcg: add mem_cg...
728
729
730
  {
  	return 0;
  }
b4536f0c8   Michal Hocko   mm, memcg: fix th...
731
732
733
734
735
736
  static inline
  unsigned long mem_cgroup_get_zone_lru_size(struct lruvec *lruvec,
  		enum lru_list lru, int zone_idx)
  {
  	return 0;
  }
a3d8e0549   KOSAKI Motohiro   memcg: add mem_cg...
737

0a6b76dd2   Vladimir Davydov   mm: workingset: m...
738
739
740
741
742
743
  static inline unsigned long
  mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg,
  			     int nid, unsigned int lru_mask)
  {
  	return 0;
  }
7c5f64f84   Vladimir Davydov   mm: oom: deduplic...
744
745
746
747
  static inline unsigned long mem_cgroup_get_limit(struct mem_cgroup *memcg)
  {
  	return 0;
  }
e222432bf   Balbir Singh   memcg: show memcg...
748
749
750
751
  static inline void
  mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p)
  {
  }
739f79fc9   Johannes Weiner   mm: memcontrol: f...
752
753
754
755
756
757
  static inline struct mem_cgroup *lock_page_memcg(struct page *page)
  {
  	return NULL;
  }
  
  static inline void __unlock_page_memcg(struct mem_cgroup *memcg)
89c06bd52   KAMEZAWA Hiroyuki   memcg: use new lo...
758
759
  {
  }
62cccb8c8   Johannes Weiner   mm: simplify lock...
760
  static inline void unlock_page_memcg(struct page *page)
89c06bd52   KAMEZAWA Hiroyuki   memcg: use new lo...
761
762
  {
  }
b23afb93d   Tejun Heo   memcg: punt high ...
763
764
765
  static inline void mem_cgroup_handle_over_high(void)
  {
  }
494264208   Johannes Weiner   mm: memcg: handle...
766
  static inline void mem_cgroup_oom_enable(void)
519e52473   Johannes Weiner   mm: memcg: enable...
767
768
  {
  }
494264208   Johannes Weiner   mm: memcg: handle...
769
  static inline void mem_cgroup_oom_disable(void)
519e52473   Johannes Weiner   mm: memcg: enable...
770
771
  {
  }
3812c8c8f   Johannes Weiner   mm: memcg: do not...
772
773
774
775
  static inline bool task_in_memcg_oom(struct task_struct *p)
  {
  	return false;
  }
494264208   Johannes Weiner   mm: memcg: handle...
776
  static inline bool mem_cgroup_oom_synchronize(bool wait)
3812c8c8f   Johannes Weiner   mm: memcg: do not...
777
778
779
  {
  	return false;
  }
ccda7f436   Johannes Weiner   mm: memcontrol: u...
780
  static inline unsigned long memcg_page_state(struct mem_cgroup *memcg,
04fecbf51   Matthias Kaehlcke   mm: memcontrol: u...
781
  					     int idx)
2a2e48854   Johannes Weiner   mm: vmscan: fix I...
782
783
784
  {
  	return 0;
  }
00f3ca2c2   Johannes Weiner   mm: memcontrol: p...
785
  static inline void __mod_memcg_state(struct mem_cgroup *memcg,
04fecbf51   Matthias Kaehlcke   mm: memcontrol: u...
786
  				     int idx,
00f3ca2c2   Johannes Weiner   mm: memcontrol: p...
787
  				     int nr)
2a2e48854   Johannes Weiner   mm: vmscan: fix I...
788
789
  {
  }
00f3ca2c2   Johannes Weiner   mm: memcontrol: p...
790
  static inline void mod_memcg_state(struct mem_cgroup *memcg,
04fecbf51   Matthias Kaehlcke   mm: memcontrol: u...
791
  				   int idx,
00f3ca2c2   Johannes Weiner   mm: memcontrol: p...
792
  				   int nr)
2a2e48854   Johannes Weiner   mm: vmscan: fix I...
793
794
  {
  }
00f3ca2c2   Johannes Weiner   mm: memcontrol: p...
795
  static inline void __mod_memcg_page_state(struct page *page,
04fecbf51   Matthias Kaehlcke   mm: memcontrol: u...
796
  					  int idx,
00f3ca2c2   Johannes Weiner   mm: memcontrol: p...
797
  					  int nr)
2a2e48854   Johannes Weiner   mm: vmscan: fix I...
798
799
  {
  }
ccda7f436   Johannes Weiner   mm: memcontrol: u...
800
  static inline void mod_memcg_page_state(struct page *page,
04fecbf51   Matthias Kaehlcke   mm: memcontrol: u...
801
  					int idx,
ccda7f436   Johannes Weiner   mm: memcontrol: u...
802
  					int nr)
553af430e   Johannes Weiner   mm: rmap: fix hug...
803
804
  {
  }
00f3ca2c2   Johannes Weiner   mm: memcontrol: p...
805
806
  static inline unsigned long lruvec_page_state(struct lruvec *lruvec,
  					      enum node_stat_item idx)
2a7106f2c   Greg Thelen   memcg: create ext...
807
  {
00f3ca2c2   Johannes Weiner   mm: memcontrol: p...
808
  	return node_page_state(lruvec_pgdat(lruvec), idx);
2a7106f2c   Greg Thelen   memcg: create ext...
809
  }
00f3ca2c2   Johannes Weiner   mm: memcontrol: p...
810
811
  static inline void __mod_lruvec_state(struct lruvec *lruvec,
  				      enum node_stat_item idx, int val)
d69b042f3   Balbir Singh   memcg: add file-b...
812
  {
00f3ca2c2   Johannes Weiner   mm: memcontrol: p...
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
  	__mod_node_page_state(lruvec_pgdat(lruvec), idx, val);
  }
  
  static inline void mod_lruvec_state(struct lruvec *lruvec,
  				    enum node_stat_item idx, int val)
  {
  	mod_node_page_state(lruvec_pgdat(lruvec), idx, val);
  }
  
  static inline void __mod_lruvec_page_state(struct page *page,
  					   enum node_stat_item idx, int val)
  {
  	__mod_node_page_state(page_pgdat(page), idx, val);
  }
  
  static inline void mod_lruvec_page_state(struct page *page,
  					 enum node_stat_item idx, int val)
  {
  	mod_node_page_state(page_pgdat(page), idx, val);
d69b042f3   Balbir Singh   memcg: add file-b...
832
  }
4e4169535   Balbir Singh   memory controller...
833
  static inline
ef8f23279   Mel Gorman   mm, memcg: move m...
834
  unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order,
0608f43da   Andrew Morton   revert "memcg, vm...
835
836
  					    gfp_t gfp_mask,
  					    unsigned long *total_scanned)
4e4169535   Balbir Singh   memory controller...
837
  {
0608f43da   Andrew Morton   revert "memcg, vm...
838
  	return 0;
4e4169535   Balbir Singh   memory controller...
839
  }
e94c8a9cb   KAMEZAWA Hiroyuki   memcg: make mem_c...
840
  static inline void mem_cgroup_split_huge_fixup(struct page *head)
ca3e02141   KAMEZAWA Hiroyuki   memcg: fix USED b...
841
842
  {
  }
2262185c5   Roman Gushchin   mm: per-cgroup me...
843
844
845
846
847
848
849
  static inline void count_memcg_events(struct mem_cgroup *memcg,
  				      enum vm_event_item idx,
  				      unsigned long count)
  {
  }
  
  static inline void count_memcg_page_event(struct page *page,
04fecbf51   Matthias Kaehlcke   mm: memcontrol: u...
850
  					  int idx)
2262185c5   Roman Gushchin   mm: per-cgroup me...
851
852
  {
  }
456f998ec   Ying Han   memcg: add the pa...
853
  static inline
2262185c5   Roman Gushchin   mm: per-cgroup me...
854
  void count_memcg_event_mm(struct mm_struct *mm, enum vm_event_item idx)
456f998ec   Ying Han   memcg: add the pa...
855
856
  {
  }
c255a4580   Andrew Morton   memcg: rename con...
857
  #endif /* CONFIG_MEMCG */
78fb74669   Pavel Emelianov   Memory controller...
858

04fecbf51   Matthias Kaehlcke   mm: memcontrol: u...
859
  /* idx can be of type enum memcg_stat_item or node_stat_item */
00f3ca2c2   Johannes Weiner   mm: memcontrol: p...
860
  static inline void __inc_memcg_state(struct mem_cgroup *memcg,
04fecbf51   Matthias Kaehlcke   mm: memcontrol: u...
861
  				     int idx)
00f3ca2c2   Johannes Weiner   mm: memcontrol: p...
862
863
864
  {
  	__mod_memcg_state(memcg, idx, 1);
  }
04fecbf51   Matthias Kaehlcke   mm: memcontrol: u...
865
  /* idx can be of type enum memcg_stat_item or node_stat_item */
00f3ca2c2   Johannes Weiner   mm: memcontrol: p...
866
  static inline void __dec_memcg_state(struct mem_cgroup *memcg,
04fecbf51   Matthias Kaehlcke   mm: memcontrol: u...
867
  				     int idx)
00f3ca2c2   Johannes Weiner   mm: memcontrol: p...
868
869
870
  {
  	__mod_memcg_state(memcg, idx, -1);
  }
04fecbf51   Matthias Kaehlcke   mm: memcontrol: u...
871
  /* idx can be of type enum memcg_stat_item or node_stat_item */
00f3ca2c2   Johannes Weiner   mm: memcontrol: p...
872
  static inline void __inc_memcg_page_state(struct page *page,
04fecbf51   Matthias Kaehlcke   mm: memcontrol: u...
873
  					  int idx)
00f3ca2c2   Johannes Weiner   mm: memcontrol: p...
874
875
876
  {
  	__mod_memcg_page_state(page, idx, 1);
  }
04fecbf51   Matthias Kaehlcke   mm: memcontrol: u...
877
  /* idx can be of type enum memcg_stat_item or node_stat_item */
00f3ca2c2   Johannes Weiner   mm: memcontrol: p...
878
  static inline void __dec_memcg_page_state(struct page *page,
04fecbf51   Matthias Kaehlcke   mm: memcontrol: u...
879
  					  int idx)
00f3ca2c2   Johannes Weiner   mm: memcontrol: p...
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
  {
  	__mod_memcg_page_state(page, idx, -1);
  }
  
  static inline void __inc_lruvec_state(struct lruvec *lruvec,
  				      enum node_stat_item idx)
  {
  	__mod_lruvec_state(lruvec, idx, 1);
  }
  
  static inline void __dec_lruvec_state(struct lruvec *lruvec,
  				      enum node_stat_item idx)
  {
  	__mod_lruvec_state(lruvec, idx, -1);
  }
  
  static inline void __inc_lruvec_page_state(struct page *page,
  					   enum node_stat_item idx)
  {
  	__mod_lruvec_page_state(page, idx, 1);
  }
  
  static inline void __dec_lruvec_page_state(struct page *page,
  					   enum node_stat_item idx)
  {
  	__mod_lruvec_page_state(page, idx, -1);
  }
04fecbf51   Matthias Kaehlcke   mm: memcontrol: u...
907
  /* idx can be of type enum memcg_stat_item or node_stat_item */
00f3ca2c2   Johannes Weiner   mm: memcontrol: p...
908
  static inline void inc_memcg_state(struct mem_cgroup *memcg,
04fecbf51   Matthias Kaehlcke   mm: memcontrol: u...
909
  				   int idx)
00f3ca2c2   Johannes Weiner   mm: memcontrol: p...
910
911
912
  {
  	mod_memcg_state(memcg, idx, 1);
  }
04fecbf51   Matthias Kaehlcke   mm: memcontrol: u...
913
  /* idx can be of type enum memcg_stat_item or node_stat_item */
00f3ca2c2   Johannes Weiner   mm: memcontrol: p...
914
  static inline void dec_memcg_state(struct mem_cgroup *memcg,
04fecbf51   Matthias Kaehlcke   mm: memcontrol: u...
915
  				   int idx)
00f3ca2c2   Johannes Weiner   mm: memcontrol: p...
916
917
918
  {
  	mod_memcg_state(memcg, idx, -1);
  }
04fecbf51   Matthias Kaehlcke   mm: memcontrol: u...
919
  /* idx can be of type enum memcg_stat_item or node_stat_item */
00f3ca2c2   Johannes Weiner   mm: memcontrol: p...
920
  static inline void inc_memcg_page_state(struct page *page,
04fecbf51   Matthias Kaehlcke   mm: memcontrol: u...
921
  					int idx)
00f3ca2c2   Johannes Weiner   mm: memcontrol: p...
922
923
924
  {
  	mod_memcg_page_state(page, idx, 1);
  }
04fecbf51   Matthias Kaehlcke   mm: memcontrol: u...
925
  /* idx can be of type enum memcg_stat_item or node_stat_item */
00f3ca2c2   Johannes Weiner   mm: memcontrol: p...
926
  static inline void dec_memcg_page_state(struct page *page,
04fecbf51   Matthias Kaehlcke   mm: memcontrol: u...
927
  					int idx)
00f3ca2c2   Johannes Weiner   mm: memcontrol: p...
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
  {
  	mod_memcg_page_state(page, idx, -1);
  }
  
  static inline void inc_lruvec_state(struct lruvec *lruvec,
  				    enum node_stat_item idx)
  {
  	mod_lruvec_state(lruvec, idx, 1);
  }
  
  static inline void dec_lruvec_state(struct lruvec *lruvec,
  				    enum node_stat_item idx)
  {
  	mod_lruvec_state(lruvec, idx, -1);
  }
  
  static inline void inc_lruvec_page_state(struct page *page,
  					 enum node_stat_item idx)
  {
  	mod_lruvec_page_state(page, idx, 1);
  }
  
  static inline void dec_lruvec_page_state(struct page *page,
  					 enum node_stat_item idx)
  {
  	mod_lruvec_page_state(page, idx, -1);
  }
52ebea749   Tejun Heo   writeback: make b...
955
  #ifdef CONFIG_CGROUP_WRITEBACK
841710aa6   Tejun Heo   writeback: implem...
956

52ebea749   Tejun Heo   writeback: make b...
957
  struct list_head *mem_cgroup_cgwb_list(struct mem_cgroup *memcg);
841710aa6   Tejun Heo   writeback: implem...
958
  struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb);
c5edf9cdc   Tejun Heo   writeback: fix in...
959
960
961
  void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages,
  			 unsigned long *pheadroom, unsigned long *pdirty,
  			 unsigned long *pwriteback);
841710aa6   Tejun Heo   writeback: implem...
962
963
964
965
966
967
968
  
  #else	/* CONFIG_CGROUP_WRITEBACK */
  
  static inline struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb)
  {
  	return NULL;
  }
c2aa723a6   Tejun Heo   writeback: implem...
969
  static inline void mem_cgroup_wb_stats(struct bdi_writeback *wb,
c5edf9cdc   Tejun Heo   writeback: fix in...
970
971
  				       unsigned long *pfilepages,
  				       unsigned long *pheadroom,
c2aa723a6   Tejun Heo   writeback: implem...
972
973
974
975
  				       unsigned long *pdirty,
  				       unsigned long *pwriteback)
  {
  }
841710aa6   Tejun Heo   writeback: implem...
976
  #endif	/* CONFIG_CGROUP_WRITEBACK */
52ebea749   Tejun Heo   writeback: make b...
977

e1aab161e   Glauber Costa   socket: initial c...
978
  struct sock;
baac50bbc   Johannes Weiner   net: tcp_memcontr...
979
980
  bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages);
  void mem_cgroup_uncharge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages);
d886f4e48   Johannes Weiner   mm: memcontrol: r...
981
  #ifdef CONFIG_MEMCG
ef12947c9   Johannes Weiner   mm: memcontrol: s...
982
983
  extern struct static_key_false memcg_sockets_enabled_key;
  #define mem_cgroup_sockets_enabled static_branch_unlikely(&memcg_sockets_enabled_key)
2d7580738   Johannes Weiner   mm: memcontrol: c...
984
985
  void mem_cgroup_sk_alloc(struct sock *sk);
  void mem_cgroup_sk_free(struct sock *sk);
baac50bbc   Johannes Weiner   net: tcp_memcontr...
986
  static inline bool mem_cgroup_under_socket_pressure(struct mem_cgroup *memcg)
e805605c7   Johannes Weiner   net: tcp_memcontr...
987
  {
0db152981   Johannes Weiner   mm: memcontrol: f...
988
  	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && memcg->tcpmem_pressure)
8e8ae6452   Johannes Weiner   mm: memcontrol: h...
989
  		return true;
8e8ae6452   Johannes Weiner   mm: memcontrol: h...
990
991
992
993
994
  	do {
  		if (time_before(jiffies, memcg->socket_pressure))
  			return true;
  	} while ((memcg = parent_mem_cgroup(memcg)));
  	return false;
e805605c7   Johannes Weiner   net: tcp_memcontr...
995
996
  }
  #else
80e95fe0f   Johannes Weiner   mm: memcontrol: g...
997
  #define mem_cgroup_sockets_enabled 0
2d7580738   Johannes Weiner   mm: memcontrol: c...
998
999
  static inline void mem_cgroup_sk_alloc(struct sock *sk) { };
  static inline void mem_cgroup_sk_free(struct sock *sk) { };
baac50bbc   Johannes Weiner   net: tcp_memcontr...
1000
  static inline bool mem_cgroup_under_socket_pressure(struct mem_cgroup *memcg)
e805605c7   Johannes Weiner   net: tcp_memcontr...
1001
1002
1003
1004
  {
  	return false;
  }
  #endif
7ae1e1d0f   Glauber Costa   memcg: kmem contr...
1005

452647784   Vladimir Davydov   mm: memcontrol: c...
1006
1007
1008
1009
1010
1011
  struct kmem_cache *memcg_kmem_get_cache(struct kmem_cache *cachep);
  void memcg_kmem_put_cache(struct kmem_cache *cachep);
  int memcg_kmem_charge_memcg(struct page *page, gfp_t gfp, int order,
  			    struct mem_cgroup *memcg);
  int memcg_kmem_charge(struct page *page, gfp_t gfp, int order);
  void memcg_kmem_uncharge(struct page *page, int order);
127424c86   Johannes Weiner   mm: memcontrol: m...
1012
  #if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB)
ef12947c9   Johannes Weiner   mm: memcontrol: s...
1013
  extern struct static_key_false memcg_kmem_enabled_key;
17cc4dfed   Tejun Heo   slab: use memcg_k...
1014
  extern struct workqueue_struct *memcg_kmem_cache_wq;
749c54151   Glauber Costa   memcg: aggregate ...
1015

dbcf73e26   Vladimir Davydov   memcg: rename som...
1016
  extern int memcg_nr_cache_ids;
642199948   Michal Hocko   memcg: get rid of...
1017
1018
  void memcg_get_cache_ids(void);
  void memcg_put_cache_ids(void);
ebe945c27   Glauber Costa   memcg: add commen...
1019
1020
1021
1022
1023
1024
  
  /*
   * Helper macro to loop through all memcg-specific caches. Callers must still
   * check if the cache is valid (it is either valid or NULL).
   * the slab_mutex must be held when looping through those caches
   */
749c54151   Glauber Costa   memcg: aggregate ...
1025
  #define for_each_memcg_cache_index(_idx)	\
dbcf73e26   Vladimir Davydov   memcg: rename som...
1026
  	for ((_idx) = 0; (_idx) < memcg_nr_cache_ids; (_idx)++)
749c54151   Glauber Costa   memcg: aggregate ...
1027

7ae1e1d0f   Glauber Costa   memcg: kmem contr...
1028
1029
  static inline bool memcg_kmem_enabled(void)
  {
ef12947c9   Johannes Weiner   mm: memcontrol: s...
1030
  	return static_branch_unlikely(&memcg_kmem_enabled_key);
7ae1e1d0f   Glauber Costa   memcg: kmem contr...
1031
1032
1033
  }
  
  /*
9f706d682   Jesper Dangaard Brouer   mm: fix some spel...
1034
   * helper for accessing a memcg's index. It will be used as an index in the
33398cf2f   Michal Hocko   memcg: export str...
1035
1036
1037
1038
1039
1040
1041
   * child cache array in kmem_cache, and also to derive its name. This function
   * will return -1 when this is not a kmem-limited memcg.
   */
  static inline int memcg_cache_id(struct mem_cgroup *memcg)
  {
  	return memcg ? memcg->kmemcg_id : -1;
  }
5722d094a   Vladimir Davydov   memcg, slab: clea...
1042

7ae1e1d0f   Glauber Costa   memcg: kmem contr...
1043
  #else
749c54151   Glauber Costa   memcg: aggregate ...
1044
1045
  #define for_each_memcg_cache_index(_idx)	\
  	for (; NULL; )
b9ce5ef49   Glauber Costa   sl[au]b: always g...
1046
1047
1048
1049
  static inline bool memcg_kmem_enabled(void)
  {
  	return false;
  }
2633d7a02   Glauber Costa   slab/slub: consid...
1050
1051
1052
1053
  static inline int memcg_cache_id(struct mem_cgroup *memcg)
  {
  	return -1;
  }
05257a1a3   Vladimir Davydov   memcg: add rwsem ...
1054
1055
1056
1057
1058
1059
1060
  static inline void memcg_get_cache_ids(void)
  {
  }
  
  static inline void memcg_put_cache_ids(void)
  {
  }
127424c86   Johannes Weiner   mm: memcontrol: m...
1061
  #endif /* CONFIG_MEMCG && !CONFIG_SLOB */
8cdea7c05   Balbir Singh   Memory controller...
1062
  #endif /* _LINUX_MEMCONTROL_H */