Blame view

mm/memcontrol.c 141 KB
8cdea7c05   Balbir Singh   Memory controller...
1
2
3
4
5
  /* memcontrol.c - Memory Controller
   *
   * Copyright IBM Corporation, 2007
   * Author Balbir Singh <balbir@linux.vnet.ibm.com>
   *
78fb74669   Pavel Emelianov   Memory controller...
6
7
8
   * Copyright 2007 OpenVZ SWsoft Inc
   * Author: Pavel Emelianov <xemul@openvz.org>
   *
2e72b6347   Kirill A. Shutemov   memcg: implement ...
9
10
11
12
   * Memory thresholds
   * Copyright (C) 2009 Nokia Corporation
   * Author: Kirill A. Shutemov
   *
8cdea7c05   Balbir Singh   Memory controller...
13
14
15
16
17
18
19
20
21
22
23
24
25
26
   * This program is free software; you can redistribute it and/or modify
   * it under the terms of the GNU General Public License as published by
   * the Free Software Foundation; either version 2 of the License, or
   * (at your option) any later version.
   *
   * This program is distributed in the hope that it will be useful,
   * but WITHOUT ANY WARRANTY; without even the implied warranty of
   * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
   * GNU General Public License for more details.
   */
  
  #include <linux/res_counter.h>
  #include <linux/memcontrol.h>
  #include <linux/cgroup.h>
78fb74669   Pavel Emelianov   Memory controller...
27
  #include <linux/mm.h>
4ffef5fef   Daisuke Nishimura   memcg: move charg...
28
  #include <linux/hugetlb.h>
d13d14430   KAMEZAWA Hiroyuki   memcg: handle swa...
29
  #include <linux/pagemap.h>
d52aa412d   KAMEZAWA Hiroyuki   memory cgroup enh...
30
  #include <linux/smp.h>
8a9f3ccd2   Balbir Singh   Memory controller...
31
  #include <linux/page-flags.h>
66e1707bc   Balbir Singh   Memory controller...
32
  #include <linux/backing-dev.h>
8a9f3ccd2   Balbir Singh   Memory controller...
33
34
  #include <linux/bit_spinlock.h>
  #include <linux/rcupdate.h>
e222432bf   Balbir Singh   memcg: show memcg...
35
  #include <linux/limits.h>
8c7c6e34a   KAMEZAWA Hiroyuki   memcg: mem+swap c...
36
  #include <linux/mutex.h>
f64c3f549   Balbir Singh   memory controller...
37
  #include <linux/rbtree.h>
b6ac57d50   Balbir Singh   memcgroup: move m...
38
  #include <linux/slab.h>
66e1707bc   Balbir Singh   Memory controller...
39
  #include <linux/swap.h>
024914477   Daisuke Nishimura   memcg: move charg...
40
  #include <linux/swapops.h>
66e1707bc   Balbir Singh   Memory controller...
41
  #include <linux/spinlock.h>
2e72b6347   Kirill A. Shutemov   memcg: implement ...
42
43
  #include <linux/eventfd.h>
  #include <linux/sort.h>
66e1707bc   Balbir Singh   Memory controller...
44
  #include <linux/fs.h>
d2ceb9b7d   KAMEZAWA Hiroyuki   memory cgroup enh...
45
  #include <linux/seq_file.h>
333279487   KAMEZAWA Hiroyuki   memcgroup: use vm...
46
  #include <linux/vmalloc.h>
b69408e88   Christoph Lameter   vmscan: Use an in...
47
  #include <linux/mm_inline.h>
52d4b9ac0   KAMEZAWA Hiroyuki   memcg: allocate a...
48
  #include <linux/page_cgroup.h>
cdec2e426   KAMEZAWA Hiroyuki   memcg: coalesce c...
49
  #include <linux/cpu.h>
158e0a2d1   KAMEZAWA Hiroyuki   memcg: use find_l...
50
  #include <linux/oom.h>
08e552c69   KAMEZAWA Hiroyuki   memcg: synchroniz...
51
  #include "internal.h"
8cdea7c05   Balbir Singh   Memory controller...
52

8697d3319   Balbir Singh   Memory controller...
53
  #include <asm/uaccess.h>
cc8e970c3   KOSAKI Motohiro   memcg: add mm_vms...
54
  #include <trace/events/vmscan.h>
a181b0e88   KAMEZAWA Hiroyuki   memcg: make globa...
55
  struct cgroup_subsys mem_cgroup_subsys __read_mostly;
a181b0e88   KAMEZAWA Hiroyuki   memcg: make globa...
56
  #define MEM_CGROUP_RECLAIM_RETRIES	5
4b3bde4c9   Balbir Singh   memcg: remove the...
57
  struct mem_cgroup *root_mem_cgroup __read_mostly;
8cdea7c05   Balbir Singh   Memory controller...
58

c077719be   KAMEZAWA Hiroyuki   memcg: mem+swap c...
59
  #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
338c84310   Li Zefan   memcg: remove som...
60
  /* Turned on only when memory cgroup is enabled && really_do_swap_account = 1 */
c077719be   KAMEZAWA Hiroyuki   memcg: mem+swap c...
61
  int do_swap_account __read_mostly;
a42c390cf   Michal Hocko   cgroups: make swa...
62
63
64
65
66
67
68
  
  /* for remember boot option*/
  #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP_ENABLED
  static int really_do_swap_account __initdata = 1;
  #else
  static int really_do_swap_account __initdata = 0;
  #endif
c077719be   KAMEZAWA Hiroyuki   memcg: mem+swap c...
69
70
71
  #else
  #define do_swap_account		(0)
  #endif
8cdea7c05   Balbir Singh   Memory controller...
72
  /*
d52aa412d   KAMEZAWA Hiroyuki   memory cgroup enh...
73
74
75
76
77
78
79
   * Statistics for memory cgroup.
   */
  enum mem_cgroup_stat_index {
  	/*
  	 * For MEM_CONTAINER_TYPE_ALL, usage = pagecache + rss.
  	 */
  	MEM_CGROUP_STAT_CACHE, 	   /* # of pages charged as cache */
d69b042f3   Balbir Singh   memcg: add file-b...
80
  	MEM_CGROUP_STAT_RSS,	   /* # of pages charged as anon rss */
d8046582d   KAMEZAWA Hiroyuki   memcg: make memcg...
81
  	MEM_CGROUP_STAT_FILE_MAPPED,  /* # of pages charged as file rss */
0c3e73e84   Balbir Singh   memcg: improve re...
82
  	MEM_CGROUP_STAT_SWAPOUT, /* # of pages, swapped out */
711d3d2c9   KAMEZAWA Hiroyuki   memcg: cpu hotplu...
83
  	MEM_CGROUP_STAT_DATA, /* end of data requires synchronization */
32047e2a8   KAMEZAWA Hiroyuki   memcg: avoid lock...
84
  	MEM_CGROUP_ON_MOVE,	/* someone is moving account between groups */
d52aa412d   KAMEZAWA Hiroyuki   memory cgroup enh...
85
86
  	MEM_CGROUP_STAT_NSTATS,
  };
e9f8974f2   Johannes Weiner   memcg: break out ...
87
88
89
90
  enum mem_cgroup_events_index {
  	MEM_CGROUP_EVENTS_PGPGIN,	/* # of pages paged in */
  	MEM_CGROUP_EVENTS_PGPGOUT,	/* # of pages paged out */
  	MEM_CGROUP_EVENTS_COUNT,	/* # of pages paged in/out */
456f998ec   Ying Han   memcg: add the pa...
91
92
  	MEM_CGROUP_EVENTS_PGFAULT,	/* # of page-faults */
  	MEM_CGROUP_EVENTS_PGMAJFAULT,	/* # of major page-faults */
e9f8974f2   Johannes Weiner   memcg: break out ...
93
94
  	MEM_CGROUP_EVENTS_NSTATS,
  };
7a159cc9d   Johannes Weiner   memcg: use native...
95
96
97
98
99
100
101
102
103
  /*
   * Per memcg event counter is incremented at every pagein/pageout. With THP,
   * it will be incremated by the number of pages. This counter is used for
   * for trigger some periodic events. This is straightforward and better
   * than using jiffies etc. to handle periodic memcg event.
   */
  enum mem_cgroup_events_target {
  	MEM_CGROUP_TARGET_THRESH,
  	MEM_CGROUP_TARGET_SOFTLIMIT,
453a9bf34   KAMEZAWA Hiroyuki   memcg: fix numa s...
104
  	MEM_CGROUP_TARGET_NUMAINFO,
7a159cc9d   Johannes Weiner   memcg: use native...
105
106
107
108
  	MEM_CGROUP_NTARGETS,
  };
  #define THRESHOLDS_EVENTS_TARGET (128)
  #define SOFTLIMIT_EVENTS_TARGET (1024)
453a9bf34   KAMEZAWA Hiroyuki   memcg: fix numa s...
109
  #define NUMAINFO_EVENTS_TARGET	(1024)
e9f8974f2   Johannes Weiner   memcg: break out ...
110

d52aa412d   KAMEZAWA Hiroyuki   memory cgroup enh...
111
  struct mem_cgroup_stat_cpu {
7a159cc9d   Johannes Weiner   memcg: use native...
112
  	long count[MEM_CGROUP_STAT_NSTATS];
e9f8974f2   Johannes Weiner   memcg: break out ...
113
  	unsigned long events[MEM_CGROUP_EVENTS_NSTATS];
7a159cc9d   Johannes Weiner   memcg: use native...
114
  	unsigned long targets[MEM_CGROUP_NTARGETS];
d52aa412d   KAMEZAWA Hiroyuki   memory cgroup enh...
115
  };
d52aa412d   KAMEZAWA Hiroyuki   memory cgroup enh...
116
  /*
6d12e2d8d   KAMEZAWA Hiroyuki   per-zone and recl...
117
118
   * per-zone information in memory controller.
   */
6d12e2d8d   KAMEZAWA Hiroyuki   per-zone and recl...
119
  struct mem_cgroup_per_zone {
072c56c13   KAMEZAWA Hiroyuki   per-zone and recl...
120
121
122
  	/*
  	 * spin_lock to protect the per cgroup LRU
  	 */
b69408e88   Christoph Lameter   vmscan: Use an in...
123
124
  	struct list_head	lists[NR_LRU_LISTS];
  	unsigned long		count[NR_LRU_LISTS];
3e2f41f1f   KOSAKI Motohiro   memcg: add zone_r...
125
126
  
  	struct zone_reclaim_stat reclaim_stat;
f64c3f549   Balbir Singh   memory controller...
127
128
129
130
  	struct rb_node		tree_node;	/* RB tree node */
  	unsigned long long	usage_in_excess;/* Set to the value by which */
  						/* the soft limit is exceeded*/
  	bool			on_tree;
4e4169535   Balbir Singh   memory controller...
131
132
  	struct mem_cgroup	*mem;		/* Back pointer, we cannot */
  						/* use container_of	   */
6d12e2d8d   KAMEZAWA Hiroyuki   per-zone and recl...
133
134
135
136
137
138
139
140
141
142
143
144
145
  };
  /* Macro for accessing counter */
  #define MEM_CGROUP_ZSTAT(mz, idx)	((mz)->count[(idx)])
  
  struct mem_cgroup_per_node {
  	struct mem_cgroup_per_zone zoneinfo[MAX_NR_ZONES];
  };
  
  struct mem_cgroup_lru_info {
  	struct mem_cgroup_per_node *nodeinfo[MAX_NUMNODES];
  };
  
  /*
f64c3f549   Balbir Singh   memory controller...
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
   * Cgroups above their limits are maintained in a RB-Tree, independent of
   * their hierarchy representation
   */
  
  struct mem_cgroup_tree_per_zone {
  	struct rb_root rb_root;
  	spinlock_t lock;
  };
  
  struct mem_cgroup_tree_per_node {
  	struct mem_cgroup_tree_per_zone rb_tree_per_zone[MAX_NR_ZONES];
  };
  
  struct mem_cgroup_tree {
  	struct mem_cgroup_tree_per_node *rb_tree_per_node[MAX_NUMNODES];
  };
  
  static struct mem_cgroup_tree soft_limit_tree __read_mostly;
2e72b6347   Kirill A. Shutemov   memcg: implement ...
164
165
166
167
  struct mem_cgroup_threshold {
  	struct eventfd_ctx *eventfd;
  	u64 threshold;
  };
9490ff275   KAMEZAWA Hiroyuki   memcg: oom notifier
168
  /* For threshold */
2e72b6347   Kirill A. Shutemov   memcg: implement ...
169
170
  struct mem_cgroup_threshold_ary {
  	/* An array index points to threshold just below usage. */
5407a5625   Phil Carmody   mm: remove unnece...
171
  	int current_threshold;
2e72b6347   Kirill A. Shutemov   memcg: implement ...
172
173
174
175
176
  	/* Size of entries[] */
  	unsigned int size;
  	/* Array of thresholds */
  	struct mem_cgroup_threshold entries[0];
  };
2c488db27   Kirill A. Shutemov   memcg: clean up m...
177
178
179
180
181
182
183
184
185
186
187
  
  struct mem_cgroup_thresholds {
  	/* Primary thresholds array */
  	struct mem_cgroup_threshold_ary *primary;
  	/*
  	 * Spare threshold array.
  	 * This is needed to make mem_cgroup_unregister_event() "never fail".
  	 * It must be able to store at least primary->size - 1 entries.
  	 */
  	struct mem_cgroup_threshold_ary *spare;
  };
9490ff275   KAMEZAWA Hiroyuki   memcg: oom notifier
188
189
190
191
192
  /* for OOM */
  struct mem_cgroup_eventfd_list {
  	struct list_head list;
  	struct eventfd_ctx *eventfd;
  };
2e72b6347   Kirill A. Shutemov   memcg: implement ...
193

2e72b6347   Kirill A. Shutemov   memcg: implement ...
194
  static void mem_cgroup_threshold(struct mem_cgroup *mem);
9490ff275   KAMEZAWA Hiroyuki   memcg: oom notifier
195
  static void mem_cgroup_oom_notify(struct mem_cgroup *mem);
2e72b6347   Kirill A. Shutemov   memcg: implement ...
196

f64c3f549   Balbir Singh   memory controller...
197
  /*
8cdea7c05   Balbir Singh   Memory controller...
198
199
200
201
202
203
   * The memory controller data structure. The memory controller controls both
   * page cache and RSS per cgroup. We would eventually like to provide
   * statistics based on the statistics developed by Rik Van Riel for clock-pro,
   * to help the administrator determine what knobs to tune.
   *
   * TODO: Add a water mark for the memory controller. Reclaim will begin when
8a9f3ccd2   Balbir Singh   Memory controller...
204
205
206
   * we hit the water mark. May be even add a low water mark, such that
   * no reclaim occurs from a cgroup at it's low water mark, this is
   * a feature that will be implemented much later in the future.
8cdea7c05   Balbir Singh   Memory controller...
207
208
209
210
211
212
213
   */
  struct mem_cgroup {
  	struct cgroup_subsys_state css;
  	/*
  	 * the counter to account for memory usage
  	 */
  	struct res_counter res;
78fb74669   Pavel Emelianov   Memory controller...
214
  	/*
8c7c6e34a   KAMEZAWA Hiroyuki   memcg: mem+swap c...
215
216
217
218
  	 * the counter to account for mem+swap usage.
  	 */
  	struct res_counter memsw;
  	/*
78fb74669   Pavel Emelianov   Memory controller...
219
220
  	 * Per cgroup active and inactive list, similar to the
  	 * per zone LRU lists.
78fb74669   Pavel Emelianov   Memory controller...
221
  	 */
6d12e2d8d   KAMEZAWA Hiroyuki   per-zone and recl...
222
  	struct mem_cgroup_lru_info info;
6d61ef409   Balbir Singh   memcg: memory cgr...
223
  	/*
af901ca18   André Goddard Rosa   tree-wide: fix as...
224
  	 * While reclaiming in a hierarchy, we cache the last child we
04046e1a0   KAMEZAWA Hiroyuki   memcg: use CSS ID
225
  	 * reclaimed from.
6d61ef409   Balbir Singh   memcg: memory cgr...
226
  	 */
04046e1a0   KAMEZAWA Hiroyuki   memcg: use CSS ID
227
  	int last_scanned_child;
889976dbc   Ying Han   memcg: reclaim me...
228
229
230
  	int last_scanned_node;
  #if MAX_NUMNODES > 1
  	nodemask_t	scan_nodes;
453a9bf34   KAMEZAWA Hiroyuki   memcg: fix numa s...
231
232
  	atomic_t	numainfo_events;
  	atomic_t	numainfo_updating;
889976dbc   Ying Han   memcg: reclaim me...
233
  #endif
18f59ea7d   Balbir Singh   memcg: memory cgr...
234
235
236
237
  	/*
  	 * Should the accounting and control be hierarchical, per subtree?
  	 */
  	bool use_hierarchy;
79dfdaccd   Michal Hocko   memcg: make oom_l...
238
239
240
  
  	bool		oom_lock;
  	atomic_t	under_oom;
8c7c6e34a   KAMEZAWA Hiroyuki   memcg: mem+swap c...
241
  	atomic_t	refcnt;
14797e236   KOSAKI Motohiro   memcg: add inacti...
242

1f4c025b5   KAMEZAWA Hiroyuki   memcg: export mem...
243
  	int	swappiness;
3c11ecf44   KAMEZAWA Hiroyuki   memcg: oom kill d...
244
245
  	/* OOM-Killer disable */
  	int		oom_kill_disable;
a7885eb8a   KOSAKI Motohiro   memcg: swappiness
246

22a668d7c   KAMEZAWA Hiroyuki   memcg: fix behavi...
247
248
  	/* set when res.limit == memsw.limit */
  	bool		memsw_is_minimum;
2e72b6347   Kirill A. Shutemov   memcg: implement ...
249
250
251
252
  	/* protect arrays of thresholds */
  	struct mutex thresholds_lock;
  
  	/* thresholds for memory usage. RCU-protected */
2c488db27   Kirill A. Shutemov   memcg: clean up m...
253
  	struct mem_cgroup_thresholds thresholds;
907860ed3   Kirill A. Shutemov   cgroups: make cft...
254

2e72b6347   Kirill A. Shutemov   memcg: implement ...
255
  	/* thresholds for mem+swap usage. RCU-protected */
2c488db27   Kirill A. Shutemov   memcg: clean up m...
256
  	struct mem_cgroup_thresholds memsw_thresholds;
907860ed3   Kirill A. Shutemov   cgroups: make cft...
257

9490ff275   KAMEZAWA Hiroyuki   memcg: oom notifier
258
259
  	/* For oom notifier event fd */
  	struct list_head oom_notify;
185efc0f9   Johannes Weiner   memcg: Revert "me...
260

d52aa412d   KAMEZAWA Hiroyuki   memory cgroup enh...
261
  	/*
7dc74be03   Daisuke Nishimura   memcg: add interf...
262
263
264
265
  	 * Should we move charges of a task when a task is moved into this
  	 * mem_cgroup ? And what type of charges should we move ?
  	 */
  	unsigned long 	move_charge_at_immigrate;
7dc74be03   Daisuke Nishimura   memcg: add interf...
266
  	/*
c62b1a3b3   KAMEZAWA Hiroyuki   memcg: use generi...
267
  	 * percpu counter.
d52aa412d   KAMEZAWA Hiroyuki   memory cgroup enh...
268
  	 */
c62b1a3b3   KAMEZAWA Hiroyuki   memcg: use generi...
269
  	struct mem_cgroup_stat_cpu *stat;
711d3d2c9   KAMEZAWA Hiroyuki   memcg: cpu hotplu...
270
271
272
273
274
275
  	/*
  	 * used when a cpu is offlined or other synchronizations
  	 * See mem_cgroup_read_stat().
  	 */
  	struct mem_cgroup_stat_cpu nocpu_base;
  	spinlock_t pcp_counter_lock;
8cdea7c05   Balbir Singh   Memory controller...
276
  };
7dc74be03   Daisuke Nishimura   memcg: add interf...
277
278
279
280
281
282
  /* Stuffs for move charges at task migration. */
  /*
   * Types of charges to be moved. "move_charge_at_immitgrate" is treated as a
   * left-shifted bitmap of these types.
   */
  enum move_type {
4ffef5fef   Daisuke Nishimura   memcg: move charg...
283
  	MOVE_CHARGE_TYPE_ANON,	/* private anonymous page and swap of it */
87946a722   Daisuke Nishimura   memcg: move charg...
284
  	MOVE_CHARGE_TYPE_FILE,	/* file page(including tmpfs) and swap of it */
7dc74be03   Daisuke Nishimura   memcg: add interf...
285
286
  	NR_MOVE_TYPE,
  };
4ffef5fef   Daisuke Nishimura   memcg: move charg...
287
288
  /* "mc" and its members are protected by cgroup_mutex */
  static struct move_charge_struct {
b1dd693e5   Daisuke Nishimura   memcg: avoid dead...
289
  	spinlock_t	  lock; /* for from, to */
4ffef5fef   Daisuke Nishimura   memcg: move charg...
290
291
292
  	struct mem_cgroup *from;
  	struct mem_cgroup *to;
  	unsigned long precharge;
854ffa8d1   Daisuke Nishimura   memcg: improve pe...
293
  	unsigned long moved_charge;
483c30b51   Daisuke Nishimura   memcg: improve pe...
294
  	unsigned long moved_swap;
8033b97c9   Daisuke Nishimura   memcg: avoid oom ...
295
296
297
  	struct task_struct *moving_task;	/* a task moving charges */
  	wait_queue_head_t waitq;		/* a waitq for other context */
  } mc = {
2bd9bb206   KAMEZAWA Hiroyuki   memcg: clean up w...
298
  	.lock = __SPIN_LOCK_UNLOCKED(mc.lock),
8033b97c9   Daisuke Nishimura   memcg: avoid oom ...
299
300
  	.waitq = __WAIT_QUEUE_HEAD_INITIALIZER(mc.waitq),
  };
4ffef5fef   Daisuke Nishimura   memcg: move charg...
301

90254a658   Daisuke Nishimura   memcg: clean up m...
302
303
304
305
306
  static bool move_anon(void)
  {
  	return test_bit(MOVE_CHARGE_TYPE_ANON,
  					&mc.to->move_charge_at_immigrate);
  }
87946a722   Daisuke Nishimura   memcg: move charg...
307
308
309
310
311
  static bool move_file(void)
  {
  	return test_bit(MOVE_CHARGE_TYPE_FILE,
  					&mc.to->move_charge_at_immigrate);
  }
4e4169535   Balbir Singh   memory controller...
312
313
314
315
316
317
  /*
   * Maximum loops in mem_cgroup_hierarchical_reclaim(), used for soft
   * limit reclaim to prevent infinite loops, if they ever occur.
   */
  #define	MEM_CGROUP_MAX_RECLAIM_LOOPS		(100)
  #define	MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS	(2)
217bc3194   KAMEZAWA Hiroyuki   memory cgroup enh...
318
319
320
  enum charge_type {
  	MEM_CGROUP_CHARGE_TYPE_CACHE = 0,
  	MEM_CGROUP_CHARGE_TYPE_MAPPED,
4f98a2fee   Rik van Riel   vmscan: split LRU...
321
  	MEM_CGROUP_CHARGE_TYPE_SHMEM,	/* used by page migration of shmem */
c05555b57   KAMEZAWA Hiroyuki   memcg: atomic ops...
322
  	MEM_CGROUP_CHARGE_TYPE_FORCE,	/* used by force_empty */
d13d14430   KAMEZAWA Hiroyuki   memcg: handle swa...
323
  	MEM_CGROUP_CHARGE_TYPE_SWAPOUT,	/* for accounting swapcache */
8a9478ca7   KAMEZAWA Hiroyuki   memcg: fix swap a...
324
  	MEM_CGROUP_CHARGE_TYPE_DROP,	/* a page was unused swap cache */
c05555b57   KAMEZAWA Hiroyuki   memcg: atomic ops...
325
326
  	NR_CHARGE_TYPE,
  };
8c7c6e34a   KAMEZAWA Hiroyuki   memcg: mem+swap c...
327
328
329
  /* for encoding cft->private value on file */
  #define _MEM			(0)
  #define _MEMSWAP		(1)
9490ff275   KAMEZAWA Hiroyuki   memcg: oom notifier
330
  #define _OOM_TYPE		(2)
8c7c6e34a   KAMEZAWA Hiroyuki   memcg: mem+swap c...
331
332
333
  #define MEMFILE_PRIVATE(x, val)	(((x) << 16) | (val))
  #define MEMFILE_TYPE(val)	(((val) >> 16) & 0xffff)
  #define MEMFILE_ATTR(val)	((val) & 0xffff)
9490ff275   KAMEZAWA Hiroyuki   memcg: oom notifier
334
335
  /* Used for OOM nofiier */
  #define OOM_CONTROL		(0)
8c7c6e34a   KAMEZAWA Hiroyuki   memcg: mem+swap c...
336

75822b449   Balbir Singh   memory controller...
337
338
339
340
341
342
343
  /*
   * Reclaim flags for mem_cgroup_hierarchical_reclaim
   */
  #define MEM_CGROUP_RECLAIM_NOSWAP_BIT	0x0
  #define MEM_CGROUP_RECLAIM_NOSWAP	(1 << MEM_CGROUP_RECLAIM_NOSWAP_BIT)
  #define MEM_CGROUP_RECLAIM_SHRINK_BIT	0x1
  #define MEM_CGROUP_RECLAIM_SHRINK	(1 << MEM_CGROUP_RECLAIM_SHRINK_BIT)
4e4169535   Balbir Singh   memory controller...
344
345
  #define MEM_CGROUP_RECLAIM_SOFT_BIT	0x2
  #define MEM_CGROUP_RECLAIM_SOFT		(1 << MEM_CGROUP_RECLAIM_SOFT_BIT)
75822b449   Balbir Singh   memory controller...
346

8c7c6e34a   KAMEZAWA Hiroyuki   memcg: mem+swap c...
347
348
  static void mem_cgroup_get(struct mem_cgroup *mem);
  static void mem_cgroup_put(struct mem_cgroup *mem);
7bcc1bb12   Daisuke Nishimura   memcg: get/put pa...
349
  static struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *mem);
26fe61684   KAMEZAWA Hiroyuki   memcg: fix percpu...
350
  static void drain_all_stock_async(struct mem_cgroup *mem);
8c7c6e34a   KAMEZAWA Hiroyuki   memcg: mem+swap c...
351

f64c3f549   Balbir Singh   memory controller...
352
353
354
355
356
  static struct mem_cgroup_per_zone *
  mem_cgroup_zoneinfo(struct mem_cgroup *mem, int nid, int zid)
  {
  	return &mem->info.nodeinfo[nid]->zoneinfo[zid];
  }
d324236b3   Wu Fengguang   memcg: add access...
357
358
359
360
  struct cgroup_subsys_state *mem_cgroup_css(struct mem_cgroup *mem)
  {
  	return &mem->css;
  }
f64c3f549   Balbir Singh   memory controller...
361
  static struct mem_cgroup_per_zone *
97a6c37b3   Johannes Weiner   memcg: change pag...
362
  page_cgroup_zoneinfo(struct mem_cgroup *mem, struct page *page)
f64c3f549   Balbir Singh   memory controller...
363
  {
97a6c37b3   Johannes Weiner   memcg: change pag...
364
365
  	int nid = page_to_nid(page);
  	int zid = page_zonenum(page);
f64c3f549   Balbir Singh   memory controller...
366

f64c3f549   Balbir Singh   memory controller...
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
  	return mem_cgroup_zoneinfo(mem, nid, zid);
  }
  
  static struct mem_cgroup_tree_per_zone *
  soft_limit_tree_node_zone(int nid, int zid)
  {
  	return &soft_limit_tree.rb_tree_per_node[nid]->rb_tree_per_zone[zid];
  }
  
  static struct mem_cgroup_tree_per_zone *
  soft_limit_tree_from_page(struct page *page)
  {
  	int nid = page_to_nid(page);
  	int zid = page_zonenum(page);
  
  	return &soft_limit_tree.rb_tree_per_node[nid]->rb_tree_per_zone[zid];
  }
  
  static void
4e4169535   Balbir Singh   memory controller...
386
  __mem_cgroup_insert_exceeded(struct mem_cgroup *mem,
f64c3f549   Balbir Singh   memory controller...
387
  				struct mem_cgroup_per_zone *mz,
ef8745c1e   KAMEZAWA Hiroyuki   memcg: reduce che...
388
389
  				struct mem_cgroup_tree_per_zone *mctz,
  				unsigned long long new_usage_in_excess)
f64c3f549   Balbir Singh   memory controller...
390
391
392
393
394
395
396
  {
  	struct rb_node **p = &mctz->rb_root.rb_node;
  	struct rb_node *parent = NULL;
  	struct mem_cgroup_per_zone *mz_node;
  
  	if (mz->on_tree)
  		return;
ef8745c1e   KAMEZAWA Hiroyuki   memcg: reduce che...
397
398
399
  	mz->usage_in_excess = new_usage_in_excess;
  	if (!mz->usage_in_excess)
  		return;
f64c3f549   Balbir Singh   memory controller...
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
  	while (*p) {
  		parent = *p;
  		mz_node = rb_entry(parent, struct mem_cgroup_per_zone,
  					tree_node);
  		if (mz->usage_in_excess < mz_node->usage_in_excess)
  			p = &(*p)->rb_left;
  		/*
  		 * We can't avoid mem cgroups that are over their soft
  		 * limit by the same amount
  		 */
  		else if (mz->usage_in_excess >= mz_node->usage_in_excess)
  			p = &(*p)->rb_right;
  	}
  	rb_link_node(&mz->tree_node, parent, p);
  	rb_insert_color(&mz->tree_node, &mctz->rb_root);
  	mz->on_tree = true;
4e4169535   Balbir Singh   memory controller...
416
417
418
419
420
421
422
423
424
425
426
427
428
429
  }
  
  static void
  __mem_cgroup_remove_exceeded(struct mem_cgroup *mem,
  				struct mem_cgroup_per_zone *mz,
  				struct mem_cgroup_tree_per_zone *mctz)
  {
  	if (!mz->on_tree)
  		return;
  	rb_erase(&mz->tree_node, &mctz->rb_root);
  	mz->on_tree = false;
  }
  
  static void
f64c3f549   Balbir Singh   memory controller...
430
431
432
433
434
  mem_cgroup_remove_exceeded(struct mem_cgroup *mem,
  				struct mem_cgroup_per_zone *mz,
  				struct mem_cgroup_tree_per_zone *mctz)
  {
  	spin_lock(&mctz->lock);
4e4169535   Balbir Singh   memory controller...
435
  	__mem_cgroup_remove_exceeded(mem, mz, mctz);
f64c3f549   Balbir Singh   memory controller...
436
437
  	spin_unlock(&mctz->lock);
  }
f64c3f549   Balbir Singh   memory controller...
438
439
440
  
  static void mem_cgroup_update_tree(struct mem_cgroup *mem, struct page *page)
  {
ef8745c1e   KAMEZAWA Hiroyuki   memcg: reduce che...
441
  	unsigned long long excess;
f64c3f549   Balbir Singh   memory controller...
442
443
  	struct mem_cgroup_per_zone *mz;
  	struct mem_cgroup_tree_per_zone *mctz;
4e649152c   KAMEZAWA Hiroyuki   memcg: some modif...
444
445
  	int nid = page_to_nid(page);
  	int zid = page_zonenum(page);
f64c3f549   Balbir Singh   memory controller...
446
447
448
  	mctz = soft_limit_tree_from_page(page);
  
  	/*
4e649152c   KAMEZAWA Hiroyuki   memcg: some modif...
449
450
  	 * Necessary to update all ancestors when hierarchy is used.
  	 * because their event counter is not touched.
f64c3f549   Balbir Singh   memory controller...
451
  	 */
4e649152c   KAMEZAWA Hiroyuki   memcg: some modif...
452
453
  	for (; mem; mem = parent_mem_cgroup(mem)) {
  		mz = mem_cgroup_zoneinfo(mem, nid, zid);
ef8745c1e   KAMEZAWA Hiroyuki   memcg: reduce che...
454
  		excess = res_counter_soft_limit_excess(&mem->res);
4e649152c   KAMEZAWA Hiroyuki   memcg: some modif...
455
456
457
458
  		/*
  		 * We have to update the tree if mz is on RB-tree or
  		 * mem is over its softlimit.
  		 */
ef8745c1e   KAMEZAWA Hiroyuki   memcg: reduce che...
459
  		if (excess || mz->on_tree) {
4e649152c   KAMEZAWA Hiroyuki   memcg: some modif...
460
461
462
463
464
  			spin_lock(&mctz->lock);
  			/* if on-tree, remove it */
  			if (mz->on_tree)
  				__mem_cgroup_remove_exceeded(mem, mz, mctz);
  			/*
ef8745c1e   KAMEZAWA Hiroyuki   memcg: reduce che...
465
466
  			 * Insert again. mz->usage_in_excess will be updated.
  			 * If excess is 0, no tree ops.
4e649152c   KAMEZAWA Hiroyuki   memcg: some modif...
467
  			 */
ef8745c1e   KAMEZAWA Hiroyuki   memcg: reduce che...
468
  			__mem_cgroup_insert_exceeded(mem, mz, mctz, excess);
4e649152c   KAMEZAWA Hiroyuki   memcg: some modif...
469
470
  			spin_unlock(&mctz->lock);
  		}
f64c3f549   Balbir Singh   memory controller...
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
  	}
  }
  
  static void mem_cgroup_remove_from_trees(struct mem_cgroup *mem)
  {
  	int node, zone;
  	struct mem_cgroup_per_zone *mz;
  	struct mem_cgroup_tree_per_zone *mctz;
  
  	for_each_node_state(node, N_POSSIBLE) {
  		for (zone = 0; zone < MAX_NR_ZONES; zone++) {
  			mz = mem_cgroup_zoneinfo(mem, node, zone);
  			mctz = soft_limit_tree_node_zone(node, zone);
  			mem_cgroup_remove_exceeded(mem, mz, mctz);
  		}
  	}
  }
4e4169535   Balbir Singh   memory controller...
488
489
490
491
  static struct mem_cgroup_per_zone *
  __mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_zone *mctz)
  {
  	struct rb_node *rightmost = NULL;
26251eaf9   KAMEZAWA Hiroyuki   memcg: fix refcnt...
492
  	struct mem_cgroup_per_zone *mz;
4e4169535   Balbir Singh   memory controller...
493
494
  
  retry:
26251eaf9   KAMEZAWA Hiroyuki   memcg: fix refcnt...
495
  	mz = NULL;
4e4169535   Balbir Singh   memory controller...
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
  	rightmost = rb_last(&mctz->rb_root);
  	if (!rightmost)
  		goto done;		/* Nothing to reclaim from */
  
  	mz = rb_entry(rightmost, struct mem_cgroup_per_zone, tree_node);
  	/*
  	 * Remove the node now but someone else can add it back,
  	 * we will to add it back at the end of reclaim to its correct
  	 * position in the tree.
  	 */
  	__mem_cgroup_remove_exceeded(mz->mem, mz, mctz);
  	if (!res_counter_soft_limit_excess(&mz->mem->res) ||
  		!css_tryget(&mz->mem->css))
  		goto retry;
  done:
  	return mz;
  }
  
  static struct mem_cgroup_per_zone *
  mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_zone *mctz)
  {
  	struct mem_cgroup_per_zone *mz;
  
  	spin_lock(&mctz->lock);
  	mz = __mem_cgroup_largest_soft_limit_node(mctz);
  	spin_unlock(&mctz->lock);
  	return mz;
  }
711d3d2c9   KAMEZAWA Hiroyuki   memcg: cpu hotplu...
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
  /*
   * Implementation Note: reading percpu statistics for memcg.
   *
   * Both of vmstat[] and percpu_counter has threshold and do periodic
   * synchronization to implement "quick" read. There are trade-off between
   * reading cost and precision of value. Then, we may have a chance to implement
   * a periodic synchronizion of counter in memcg's counter.
   *
   * But this _read() function is used for user interface now. The user accounts
   * memory usage by memory cgroup and he _always_ requires exact value because
   * he accounts memory. Even if we provide quick-and-fuzzy read, we always
   * have to visit all online cpus and make sum. So, for now, unnecessary
   * synchronization is not implemented. (just implemented for cpu hotplug)
   *
   * If there are kernel internal actions which can make use of some not-exact
   * value, and reading all cpu value can be performance bottleneck in some
   * common workload, threashold and synchonization as vmstat[] should be
   * implemented.
   */
7a159cc9d   Johannes Weiner   memcg: use native...
543
544
  static long mem_cgroup_read_stat(struct mem_cgroup *mem,
  				 enum mem_cgroup_stat_index idx)
c62b1a3b3   KAMEZAWA Hiroyuki   memcg: use generi...
545
  {
7a159cc9d   Johannes Weiner   memcg: use native...
546
  	long val = 0;
c62b1a3b3   KAMEZAWA Hiroyuki   memcg: use generi...
547
  	int cpu;
c62b1a3b3   KAMEZAWA Hiroyuki   memcg: use generi...
548

711d3d2c9   KAMEZAWA Hiroyuki   memcg: cpu hotplu...
549
550
  	get_online_cpus();
  	for_each_online_cpu(cpu)
c62b1a3b3   KAMEZAWA Hiroyuki   memcg: use generi...
551
  		val += per_cpu(mem->stat->count[idx], cpu);
711d3d2c9   KAMEZAWA Hiroyuki   memcg: cpu hotplu...
552
553
554
555
556
557
  #ifdef CONFIG_HOTPLUG_CPU
  	spin_lock(&mem->pcp_counter_lock);
  	val += mem->nocpu_base.count[idx];
  	spin_unlock(&mem->pcp_counter_lock);
  #endif
  	put_online_cpus();
c62b1a3b3   KAMEZAWA Hiroyuki   memcg: use generi...
558
559
  	return val;
  }
0c3e73e84   Balbir Singh   memcg: improve re...
560
561
562
563
  static void mem_cgroup_swap_statistics(struct mem_cgroup *mem,
  					 bool charge)
  {
  	int val = (charge) ? 1 : -1;
c62b1a3b3   KAMEZAWA Hiroyuki   memcg: use generi...
564
  	this_cpu_add(mem->stat->count[MEM_CGROUP_STAT_SWAPOUT], val);
0c3e73e84   Balbir Singh   memcg: improve re...
565
  }
456f998ec   Ying Han   memcg: add the pa...
566
567
568
569
570
571
572
573
574
  void mem_cgroup_pgfault(struct mem_cgroup *mem, int val)
  {
  	this_cpu_add(mem->stat->events[MEM_CGROUP_EVENTS_PGFAULT], val);
  }
  
  void mem_cgroup_pgmajfault(struct mem_cgroup *mem, int val)
  {
  	this_cpu_add(mem->stat->events[MEM_CGROUP_EVENTS_PGMAJFAULT], val);
  }
e9f8974f2   Johannes Weiner   memcg: break out ...
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
  static unsigned long mem_cgroup_read_events(struct mem_cgroup *mem,
  					    enum mem_cgroup_events_index idx)
  {
  	unsigned long val = 0;
  	int cpu;
  
  	for_each_online_cpu(cpu)
  		val += per_cpu(mem->stat->events[idx], cpu);
  #ifdef CONFIG_HOTPLUG_CPU
  	spin_lock(&mem->pcp_counter_lock);
  	val += mem->nocpu_base.events[idx];
  	spin_unlock(&mem->pcp_counter_lock);
  #endif
  	return val;
  }
c05555b57   KAMEZAWA Hiroyuki   memcg: atomic ops...
590
  static void mem_cgroup_charge_statistics(struct mem_cgroup *mem,
e401f1761   KAMEZAWA Hiroyuki   memcg: modify acc...
591
  					 bool file, int nr_pages)
d52aa412d   KAMEZAWA Hiroyuki   memory cgroup enh...
592
  {
c62b1a3b3   KAMEZAWA Hiroyuki   memcg: use generi...
593
  	preempt_disable();
e401f1761   KAMEZAWA Hiroyuki   memcg: modify acc...
594
595
  	if (file)
  		__this_cpu_add(mem->stat->count[MEM_CGROUP_STAT_CACHE], nr_pages);
d52aa412d   KAMEZAWA Hiroyuki   memory cgroup enh...
596
  	else
e401f1761   KAMEZAWA Hiroyuki   memcg: modify acc...
597
  		__this_cpu_add(mem->stat->count[MEM_CGROUP_STAT_RSS], nr_pages);
55e462b05   Balaji Rao   memcg: simple sta...
598

e401f1761   KAMEZAWA Hiroyuki   memcg: modify acc...
599
600
  	/* pagein of a big page is an event. So, ignore page size */
  	if (nr_pages > 0)
e9f8974f2   Johannes Weiner   memcg: break out ...
601
  		__this_cpu_inc(mem->stat->events[MEM_CGROUP_EVENTS_PGPGIN]);
3751d6043   KAMEZAWA Hiroyuki   memcg: fix event ...
602
  	else {
e9f8974f2   Johannes Weiner   memcg: break out ...
603
  		__this_cpu_inc(mem->stat->events[MEM_CGROUP_EVENTS_PGPGOUT]);
3751d6043   KAMEZAWA Hiroyuki   memcg: fix event ...
604
605
  		nr_pages = -nr_pages; /* for event */
  	}
e401f1761   KAMEZAWA Hiroyuki   memcg: modify acc...
606

e9f8974f2   Johannes Weiner   memcg: break out ...
607
  	__this_cpu_add(mem->stat->events[MEM_CGROUP_EVENTS_COUNT], nr_pages);
2e72b6347   Kirill A. Shutemov   memcg: implement ...
608

c62b1a3b3   KAMEZAWA Hiroyuki   memcg: use generi...
609
  	preempt_enable();
6d12e2d8d   KAMEZAWA Hiroyuki   per-zone and recl...
610
  }
bb2a0de92   KAMEZAWA Hiroyuki   memcg: consolidat...
611
612
613
  unsigned long
  mem_cgroup_zone_nr_lru_pages(struct mem_cgroup *mem, int nid, int zid,
  			unsigned int lru_mask)
889976dbc   Ying Han   memcg: reclaim me...
614
615
  {
  	struct mem_cgroup_per_zone *mz;
bb2a0de92   KAMEZAWA Hiroyuki   memcg: consolidat...
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
  	enum lru_list l;
  	unsigned long ret = 0;
  
  	mz = mem_cgroup_zoneinfo(mem, nid, zid);
  
  	for_each_lru(l) {
  		if (BIT(l) & lru_mask)
  			ret += MEM_CGROUP_ZSTAT(mz, l);
  	}
  	return ret;
  }
  
  static unsigned long
  mem_cgroup_node_nr_lru_pages(struct mem_cgroup *mem,
  			int nid, unsigned int lru_mask)
  {
889976dbc   Ying Han   memcg: reclaim me...
632
633
  	u64 total = 0;
  	int zid;
bb2a0de92   KAMEZAWA Hiroyuki   memcg: consolidat...
634
635
  	for (zid = 0; zid < MAX_NR_ZONES; zid++)
  		total += mem_cgroup_zone_nr_lru_pages(mem, nid, zid, lru_mask);
889976dbc   Ying Han   memcg: reclaim me...
636
637
  	return total;
  }
bb2a0de92   KAMEZAWA Hiroyuki   memcg: consolidat...
638
639
640
  
  static unsigned long mem_cgroup_nr_lru_pages(struct mem_cgroup *mem,
  			unsigned int lru_mask)
6d12e2d8d   KAMEZAWA Hiroyuki   per-zone and recl...
641
  {
889976dbc   Ying Han   memcg: reclaim me...
642
  	int nid;
6d12e2d8d   KAMEZAWA Hiroyuki   per-zone and recl...
643
  	u64 total = 0;
bb2a0de92   KAMEZAWA Hiroyuki   memcg: consolidat...
644
645
  	for_each_node_state(nid, N_HIGH_MEMORY)
  		total += mem_cgroup_node_nr_lru_pages(mem, nid, lru_mask);
6d12e2d8d   KAMEZAWA Hiroyuki   per-zone and recl...
646
  	return total;
d52aa412d   KAMEZAWA Hiroyuki   memory cgroup enh...
647
  }
7a159cc9d   Johannes Weiner   memcg: use native...
648
649
650
651
652
653
654
655
656
657
658
  static bool __memcg_event_check(struct mem_cgroup *mem, int target)
  {
  	unsigned long val, next;
  
  	val = this_cpu_read(mem->stat->events[MEM_CGROUP_EVENTS_COUNT]);
  	next = this_cpu_read(mem->stat->targets[target]);
  	/* from time_after() in jiffies.h */
  	return ((long)next - (long)val < 0);
  }
  
  static void __mem_cgroup_target_update(struct mem_cgroup *mem, int target)
d2265e6fa   KAMEZAWA Hiroyuki   memcg : share eve...
659
  {
7a159cc9d   Johannes Weiner   memcg: use native...
660
  	unsigned long val, next;
d2265e6fa   KAMEZAWA Hiroyuki   memcg : share eve...
661

e9f8974f2   Johannes Weiner   memcg: break out ...
662
  	val = this_cpu_read(mem->stat->events[MEM_CGROUP_EVENTS_COUNT]);
d2265e6fa   KAMEZAWA Hiroyuki   memcg : share eve...
663

7a159cc9d   Johannes Weiner   memcg: use native...
664
665
666
667
668
669
670
  	switch (target) {
  	case MEM_CGROUP_TARGET_THRESH:
  		next = val + THRESHOLDS_EVENTS_TARGET;
  		break;
  	case MEM_CGROUP_TARGET_SOFTLIMIT:
  		next = val + SOFTLIMIT_EVENTS_TARGET;
  		break;
453a9bf34   KAMEZAWA Hiroyuki   memcg: fix numa s...
671
672
673
  	case MEM_CGROUP_TARGET_NUMAINFO:
  		next = val + NUMAINFO_EVENTS_TARGET;
  		break;
7a159cc9d   Johannes Weiner   memcg: use native...
674
675
676
677
678
  	default:
  		return;
  	}
  
  	this_cpu_write(mem->stat->targets[target], next);
d2265e6fa   KAMEZAWA Hiroyuki   memcg : share eve...
679
680
681
682
683
684
685
686
687
  }
  
  /*
   * Check events in order.
   *
   */
  static void memcg_check_events(struct mem_cgroup *mem, struct page *page)
  {
  	/* threshold event is triggered in finer grain than soft limit */
7a159cc9d   Johannes Weiner   memcg: use native...
688
  	if (unlikely(__memcg_event_check(mem, MEM_CGROUP_TARGET_THRESH))) {
d2265e6fa   KAMEZAWA Hiroyuki   memcg : share eve...
689
  		mem_cgroup_threshold(mem);
7a159cc9d   Johannes Weiner   memcg: use native...
690
691
  		__mem_cgroup_target_update(mem, MEM_CGROUP_TARGET_THRESH);
  		if (unlikely(__memcg_event_check(mem,
453a9bf34   KAMEZAWA Hiroyuki   memcg: fix numa s...
692
  			     MEM_CGROUP_TARGET_SOFTLIMIT))) {
d2265e6fa   KAMEZAWA Hiroyuki   memcg : share eve...
693
  			mem_cgroup_update_tree(mem, page);
7a159cc9d   Johannes Weiner   memcg: use native...
694
  			__mem_cgroup_target_update(mem,
453a9bf34   KAMEZAWA Hiroyuki   memcg: fix numa s...
695
696
697
698
699
700
701
702
  						   MEM_CGROUP_TARGET_SOFTLIMIT);
  		}
  #if MAX_NUMNODES > 1
  		if (unlikely(__memcg_event_check(mem,
  			MEM_CGROUP_TARGET_NUMAINFO))) {
  			atomic_inc(&mem->numainfo_events);
  			__mem_cgroup_target_update(mem,
  				MEM_CGROUP_TARGET_NUMAINFO);
7a159cc9d   Johannes Weiner   memcg: use native...
703
  		}
453a9bf34   KAMEZAWA Hiroyuki   memcg: fix numa s...
704
  #endif
d2265e6fa   KAMEZAWA Hiroyuki   memcg : share eve...
705
706
  	}
  }
d5b69e38f   Hugh Dickins   memcg: memcontrol...
707
  static struct mem_cgroup *mem_cgroup_from_cont(struct cgroup *cont)
8cdea7c05   Balbir Singh   Memory controller...
708
709
710
711
712
  {
  	return container_of(cgroup_subsys_state(cont,
  				mem_cgroup_subsys_id), struct mem_cgroup,
  				css);
  }
cf475ad28   Balbir Singh   cgroups: add an o...
713
  struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p)
78fb74669   Pavel Emelianov   Memory controller...
714
  {
31a78f23b   Balbir Singh   mm owner: fix rac...
715
716
717
718
719
720
721
  	/*
  	 * mm_update_next_owner() may clear mm->owner to NULL
  	 * if it races with swapoff, page migration, etc.
  	 * So this can be called with p == NULL.
  	 */
  	if (unlikely(!p))
  		return NULL;
78fb74669   Pavel Emelianov   Memory controller...
722
723
724
  	return container_of(task_subsys_state(p, mem_cgroup_subsys_id),
  				struct mem_cgroup, css);
  }
a433658c3   KOSAKI Motohiro   vmscan,memcg: mem...
725
  struct mem_cgroup *try_get_mem_cgroup_from_mm(struct mm_struct *mm)
54595fe26   KAMEZAWA Hiroyuki   memcg: use css_tr...
726
727
  {
  	struct mem_cgroup *mem = NULL;
0b7f569e4   KAMEZAWA Hiroyuki   memcg: fix OOM ki...
728
729
730
  
  	if (!mm)
  		return NULL;
54595fe26   KAMEZAWA Hiroyuki   memcg: use css_tr...
731
732
733
734
735
736
737
738
739
740
741
742
743
744
  	/*
  	 * Because we have no locks, mm->owner's may be being moved to other
  	 * cgroup. We use css_tryget() here even if this looks
  	 * pessimistic (rather than adding locks here).
  	 */
  	rcu_read_lock();
  	do {
  		mem = mem_cgroup_from_task(rcu_dereference(mm->owner));
  		if (unlikely(!mem))
  			break;
  	} while (!css_tryget(&mem->css));
  	rcu_read_unlock();
  	return mem;
  }
7d74b06f2   KAMEZAWA Hiroyuki   memcg: use for_ea...
745
746
  /* The caller has to guarantee "mem" exists before calling this */
  static struct mem_cgroup *mem_cgroup_start_loop(struct mem_cgroup *mem)
14067bb3e   KAMEZAWA Hiroyuki   memcg: hierarchic...
747
  {
711d3d2c9   KAMEZAWA Hiroyuki   memcg: cpu hotplu...
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
  	struct cgroup_subsys_state *css;
  	int found;
  
  	if (!mem) /* ROOT cgroup has the smallest ID */
  		return root_mem_cgroup; /*css_put/get against root is ignored*/
  	if (!mem->use_hierarchy) {
  		if (css_tryget(&mem->css))
  			return mem;
  		return NULL;
  	}
  	rcu_read_lock();
  	/*
  	 * searching a memory cgroup which has the smallest ID under given
  	 * ROOT cgroup. (ID >= 1)
  	 */
  	css = css_get_next(&mem_cgroup_subsys, 1, &mem->css, &found);
  	if (css && css_tryget(css))
  		mem = container_of(css, struct mem_cgroup, css);
  	else
  		mem = NULL;
  	rcu_read_unlock();
  	return mem;
7d74b06f2   KAMEZAWA Hiroyuki   memcg: use for_ea...
770
771
772
773
774
775
776
777
778
  }
  
  static struct mem_cgroup *mem_cgroup_get_next(struct mem_cgroup *iter,
  					struct mem_cgroup *root,
  					bool cond)
  {
  	int nextid = css_id(&iter->css) + 1;
  	int found;
  	int hierarchy_used;
14067bb3e   KAMEZAWA Hiroyuki   memcg: hierarchic...
779
  	struct cgroup_subsys_state *css;
14067bb3e   KAMEZAWA Hiroyuki   memcg: hierarchic...
780

7d74b06f2   KAMEZAWA Hiroyuki   memcg: use for_ea...
781
  	hierarchy_used = iter->use_hierarchy;
14067bb3e   KAMEZAWA Hiroyuki   memcg: hierarchic...
782

7d74b06f2   KAMEZAWA Hiroyuki   memcg: use for_ea...
783
  	css_put(&iter->css);
711d3d2c9   KAMEZAWA Hiroyuki   memcg: cpu hotplu...
784
785
  	/* If no ROOT, walk all, ignore hierarchy */
  	if (!cond || (root && !hierarchy_used))
7d74b06f2   KAMEZAWA Hiroyuki   memcg: use for_ea...
786
  		return NULL;
14067bb3e   KAMEZAWA Hiroyuki   memcg: hierarchic...
787

711d3d2c9   KAMEZAWA Hiroyuki   memcg: cpu hotplu...
788
789
  	if (!root)
  		root = root_mem_cgroup;
7d74b06f2   KAMEZAWA Hiroyuki   memcg: use for_ea...
790
791
  	do {
  		iter = NULL;
14067bb3e   KAMEZAWA Hiroyuki   memcg: hierarchic...
792
  		rcu_read_lock();
7d74b06f2   KAMEZAWA Hiroyuki   memcg: use for_ea...
793
794
795
  
  		css = css_get_next(&mem_cgroup_subsys, nextid,
  				&root->css, &found);
14067bb3e   KAMEZAWA Hiroyuki   memcg: hierarchic...
796
  		if (css && css_tryget(css))
7d74b06f2   KAMEZAWA Hiroyuki   memcg: use for_ea...
797
  			iter = container_of(css, struct mem_cgroup, css);
14067bb3e   KAMEZAWA Hiroyuki   memcg: hierarchic...
798
  		rcu_read_unlock();
7d74b06f2   KAMEZAWA Hiroyuki   memcg: use for_ea...
799
  		/* If css is NULL, no more cgroups will be found */
14067bb3e   KAMEZAWA Hiroyuki   memcg: hierarchic...
800
  		nextid = found + 1;
7d74b06f2   KAMEZAWA Hiroyuki   memcg: use for_ea...
801
  	} while (css && !iter);
14067bb3e   KAMEZAWA Hiroyuki   memcg: hierarchic...
802

7d74b06f2   KAMEZAWA Hiroyuki   memcg: use for_ea...
803
  	return iter;
14067bb3e   KAMEZAWA Hiroyuki   memcg: hierarchic...
804
  }
7d74b06f2   KAMEZAWA Hiroyuki   memcg: use for_ea...
805
806
807
808
809
810
811
812
813
814
815
816
  /*
   * for_eacn_mem_cgroup_tree() for visiting all cgroup under tree. Please
   * be careful that "break" loop is not allowed. We have reference count.
   * Instead of that modify "cond" to be false and "continue" to exit the loop.
   */
  #define for_each_mem_cgroup_tree_cond(iter, root, cond)	\
  	for (iter = mem_cgroup_start_loop(root);\
  	     iter != NULL;\
  	     iter = mem_cgroup_get_next(iter, root, cond))
  
  #define for_each_mem_cgroup_tree(iter, root) \
  	for_each_mem_cgroup_tree_cond(iter, root, true)
711d3d2c9   KAMEZAWA Hiroyuki   memcg: cpu hotplu...
817
818
  #define for_each_mem_cgroup_all(iter) \
  	for_each_mem_cgroup_tree_cond(iter, NULL, true)
14067bb3e   KAMEZAWA Hiroyuki   memcg: hierarchic...
819

4b3bde4c9   Balbir Singh   memcg: remove the...
820
821
822
823
  static inline bool mem_cgroup_is_root(struct mem_cgroup *mem)
  {
  	return (mem == root_mem_cgroup);
  }
456f998ec   Ying Han   memcg: add the pa...
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
  void mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx)
  {
  	struct mem_cgroup *mem;
  
  	if (!mm)
  		return;
  
  	rcu_read_lock();
  	mem = mem_cgroup_from_task(rcu_dereference(mm->owner));
  	if (unlikely(!mem))
  		goto out;
  
  	switch (idx) {
  	case PGMAJFAULT:
  		mem_cgroup_pgmajfault(mem, 1);
  		break;
  	case PGFAULT:
  		mem_cgroup_pgfault(mem, 1);
  		break;
  	default:
  		BUG();
  	}
  out:
  	rcu_read_unlock();
  }
  EXPORT_SYMBOL(mem_cgroup_count_vm_event);
08e552c69   KAMEZAWA Hiroyuki   memcg: synchroniz...
850
851
852
853
854
855
856
857
858
859
860
861
862
  /*
   * Following LRU functions are allowed to be used without PCG_LOCK.
   * Operations are called by routine of global LRU independently from memcg.
   * What we have to take care of here is validness of pc->mem_cgroup.
   *
   * Changes to pc->mem_cgroup happens when
   * 1. charge
   * 2. moving account
   * In typical case, "charge" is done before add-to-lru. Exception is SwapCache.
   * It is added to LRU before charge.
   * If PCG_USED bit is not set, page_cgroup is not added to this private LRU.
   * When moving account, the page is not on LRU. It's isolated.
   */
4f98a2fee   Rik van Riel   vmscan: split LRU...
863

08e552c69   KAMEZAWA Hiroyuki   memcg: synchroniz...
864
865
866
  void mem_cgroup_del_lru_list(struct page *page, enum lru_list lru)
  {
  	struct page_cgroup *pc;
08e552c69   KAMEZAWA Hiroyuki   memcg: synchroniz...
867
  	struct mem_cgroup_per_zone *mz;
6d12e2d8d   KAMEZAWA Hiroyuki   per-zone and recl...
868

f8d665422   Hirokazu Takahashi   memcg: add mem_cg...
869
  	if (mem_cgroup_disabled())
08e552c69   KAMEZAWA Hiroyuki   memcg: synchroniz...
870
871
872
  		return;
  	pc = lookup_page_cgroup(page);
  	/* can happen while we handle swapcache. */
4b3bde4c9   Balbir Singh   memcg: remove the...
873
  	if (!TestClearPageCgroupAcctLRU(pc))
08e552c69   KAMEZAWA Hiroyuki   memcg: synchroniz...
874
  		return;
4b3bde4c9   Balbir Singh   memcg: remove the...
875
  	VM_BUG_ON(!pc->mem_cgroup);
544122e5e   KAMEZAWA Hiroyuki   memcg: fix LRU ac...
876
877
878
879
  	/*
  	 * We don't check PCG_USED bit. It's cleared when the "page" is finally
  	 * removed from global LRU.
  	 */
97a6c37b3   Johannes Weiner   memcg: change pag...
880
  	mz = page_cgroup_zoneinfo(pc->mem_cgroup, page);
ece35ca81   KAMEZAWA Hiroyuki   memcg: fix LRU ac...
881
882
  	/* huge page split is done under lru_lock. so, we have no races. */
  	MEM_CGROUP_ZSTAT(mz, lru) -= 1 << compound_order(page);
4b3bde4c9   Balbir Singh   memcg: remove the...
883
884
885
  	if (mem_cgroup_is_root(pc->mem_cgroup))
  		return;
  	VM_BUG_ON(list_empty(&pc->lru));
08e552c69   KAMEZAWA Hiroyuki   memcg: synchroniz...
886
  	list_del_init(&pc->lru);
6d12e2d8d   KAMEZAWA Hiroyuki   per-zone and recl...
887
  }
08e552c69   KAMEZAWA Hiroyuki   memcg: synchroniz...
888
  void mem_cgroup_del_lru(struct page *page)
6d12e2d8d   KAMEZAWA Hiroyuki   per-zone and recl...
889
  {
08e552c69   KAMEZAWA Hiroyuki   memcg: synchroniz...
890
891
  	mem_cgroup_del_lru_list(page, page_lru(page));
  }
b69408e88   Christoph Lameter   vmscan: Use an in...
892

3f58a8294   Minchan Kim   memcg: move memcg...
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
  /*
   * Writeback is about to end against a page which has been marked for immediate
   * reclaim.  If it still appears to be reclaimable, move it to the tail of the
   * inactive list.
   */
  void mem_cgroup_rotate_reclaimable_page(struct page *page)
  {
  	struct mem_cgroup_per_zone *mz;
  	struct page_cgroup *pc;
  	enum lru_list lru = page_lru(page);
  
  	if (mem_cgroup_disabled())
  		return;
  
  	pc = lookup_page_cgroup(page);
  	/* unused or root page is not rotated. */
  	if (!PageCgroupUsed(pc))
  		return;
  	/* Ensure pc->mem_cgroup is visible after reading PCG_USED. */
  	smp_rmb();
  	if (mem_cgroup_is_root(pc->mem_cgroup))
  		return;
97a6c37b3   Johannes Weiner   memcg: change pag...
915
  	mz = page_cgroup_zoneinfo(pc->mem_cgroup, page);
3f58a8294   Minchan Kim   memcg: move memcg...
916
917
  	list_move_tail(&pc->lru, &mz->lists[lru]);
  }
08e552c69   KAMEZAWA Hiroyuki   memcg: synchroniz...
918
919
920
921
  void mem_cgroup_rotate_lru_list(struct page *page, enum lru_list lru)
  {
  	struct mem_cgroup_per_zone *mz;
  	struct page_cgroup *pc;
b69408e88   Christoph Lameter   vmscan: Use an in...
922

f8d665422   Hirokazu Takahashi   memcg: add mem_cg...
923
  	if (mem_cgroup_disabled())
08e552c69   KAMEZAWA Hiroyuki   memcg: synchroniz...
924
  		return;
6d12e2d8d   KAMEZAWA Hiroyuki   per-zone and recl...
925

08e552c69   KAMEZAWA Hiroyuki   memcg: synchroniz...
926
  	pc = lookup_page_cgroup(page);
4b3bde4c9   Balbir Singh   memcg: remove the...
927
  	/* unused or root page is not rotated. */
713735b42   Johannes Weiner   memcg: correctly ...
928
929
930
931
932
  	if (!PageCgroupUsed(pc))
  		return;
  	/* Ensure pc->mem_cgroup is visible after reading PCG_USED. */
  	smp_rmb();
  	if (mem_cgroup_is_root(pc->mem_cgroup))
08e552c69   KAMEZAWA Hiroyuki   memcg: synchroniz...
933
  		return;
97a6c37b3   Johannes Weiner   memcg: change pag...
934
  	mz = page_cgroup_zoneinfo(pc->mem_cgroup, page);
08e552c69   KAMEZAWA Hiroyuki   memcg: synchroniz...
935
  	list_move(&pc->lru, &mz->lists[lru]);
6d12e2d8d   KAMEZAWA Hiroyuki   per-zone and recl...
936
  }
08e552c69   KAMEZAWA Hiroyuki   memcg: synchroniz...
937
  void mem_cgroup_add_lru_list(struct page *page, enum lru_list lru)
66e1707bc   Balbir Singh   Memory controller...
938
  {
08e552c69   KAMEZAWA Hiroyuki   memcg: synchroniz...
939
940
  	struct page_cgroup *pc;
  	struct mem_cgroup_per_zone *mz;
6d12e2d8d   KAMEZAWA Hiroyuki   per-zone and recl...
941

f8d665422   Hirokazu Takahashi   memcg: add mem_cg...
942
  	if (mem_cgroup_disabled())
08e552c69   KAMEZAWA Hiroyuki   memcg: synchroniz...
943
944
  		return;
  	pc = lookup_page_cgroup(page);
4b3bde4c9   Balbir Singh   memcg: remove the...
945
  	VM_BUG_ON(PageCgroupAcctLRU(pc));
08e552c69   KAMEZAWA Hiroyuki   memcg: synchroniz...
946
  	if (!PageCgroupUsed(pc))
894bc3104   Lee Schermerhorn   Unevictable LRU I...
947
  		return;
713735b42   Johannes Weiner   memcg: correctly ...
948
949
  	/* Ensure pc->mem_cgroup is visible after reading PCG_USED. */
  	smp_rmb();
97a6c37b3   Johannes Weiner   memcg: change pag...
950
  	mz = page_cgroup_zoneinfo(pc->mem_cgroup, page);
ece35ca81   KAMEZAWA Hiroyuki   memcg: fix LRU ac...
951
952
  	/* huge page split is done under lru_lock. so, we have no races. */
  	MEM_CGROUP_ZSTAT(mz, lru) += 1 << compound_order(page);
4b3bde4c9   Balbir Singh   memcg: remove the...
953
954
955
  	SetPageCgroupAcctLRU(pc);
  	if (mem_cgroup_is_root(pc->mem_cgroup))
  		return;
08e552c69   KAMEZAWA Hiroyuki   memcg: synchroniz...
956
957
  	list_add(&pc->lru, &mz->lists[lru]);
  }
544122e5e   KAMEZAWA Hiroyuki   memcg: fix LRU ac...
958

08e552c69   KAMEZAWA Hiroyuki   memcg: synchroniz...
959
  /*
5a6475a4e   KAMEZAWA Hiroyuki   memcg: fix leak o...
960
961
962
963
   * At handling SwapCache and other FUSE stuff, pc->mem_cgroup may be changed
   * while it's linked to lru because the page may be reused after it's fully
   * uncharged. To handle that, unlink page_cgroup from LRU when charge it again.
   * It's done under lock_page and expected that zone->lru_lock isnever held.
08e552c69   KAMEZAWA Hiroyuki   memcg: synchroniz...
964
   */
5a6475a4e   KAMEZAWA Hiroyuki   memcg: fix leak o...
965
  static void mem_cgroup_lru_del_before_commit(struct page *page)
08e552c69   KAMEZAWA Hiroyuki   memcg: synchroniz...
966
  {
544122e5e   KAMEZAWA Hiroyuki   memcg: fix LRU ac...
967
968
969
  	unsigned long flags;
  	struct zone *zone = page_zone(page);
  	struct page_cgroup *pc = lookup_page_cgroup(page);
5a6475a4e   KAMEZAWA Hiroyuki   memcg: fix leak o...
970
971
972
973
974
975
976
977
978
979
  	/*
  	 * Doing this check without taking ->lru_lock seems wrong but this
  	 * is safe. Because if page_cgroup's USED bit is unset, the page
  	 * will not be added to any memcg's LRU. If page_cgroup's USED bit is
  	 * set, the commit after this will fail, anyway.
  	 * This all charge/uncharge is done under some mutual execustion.
  	 * So, we don't need to taking care of changes in USED bit.
  	 */
  	if (likely(!PageLRU(page)))
  		return;
544122e5e   KAMEZAWA Hiroyuki   memcg: fix LRU ac...
980
981
982
983
984
985
986
987
  	spin_lock_irqsave(&zone->lru_lock, flags);
  	/*
  	 * Forget old LRU when this page_cgroup is *not* used. This Used bit
  	 * is guarded by lock_page() because the page is SwapCache.
  	 */
  	if (!PageCgroupUsed(pc))
  		mem_cgroup_del_lru_list(page, page_lru(page));
  	spin_unlock_irqrestore(&zone->lru_lock, flags);
08e552c69   KAMEZAWA Hiroyuki   memcg: synchroniz...
988
  }
5a6475a4e   KAMEZAWA Hiroyuki   memcg: fix leak o...
989
  static void mem_cgroup_lru_add_after_commit(struct page *page)
544122e5e   KAMEZAWA Hiroyuki   memcg: fix LRU ac...
990
991
992
993
  {
  	unsigned long flags;
  	struct zone *zone = page_zone(page);
  	struct page_cgroup *pc = lookup_page_cgroup(page);
5a6475a4e   KAMEZAWA Hiroyuki   memcg: fix leak o...
994
995
996
  	/* taking care of that the page is added to LRU while we commit it */
  	if (likely(!PageLRU(page)))
  		return;
544122e5e   KAMEZAWA Hiroyuki   memcg: fix LRU ac...
997
998
  	spin_lock_irqsave(&zone->lru_lock, flags);
  	/* link when the page is linked to LRU but page_cgroup isn't */
4b3bde4c9   Balbir Singh   memcg: remove the...
999
  	if (PageLRU(page) && !PageCgroupAcctLRU(pc))
544122e5e   KAMEZAWA Hiroyuki   memcg: fix LRU ac...
1000
1001
1002
  		mem_cgroup_add_lru_list(page, page_lru(page));
  	spin_unlock_irqrestore(&zone->lru_lock, flags);
  }
08e552c69   KAMEZAWA Hiroyuki   memcg: synchroniz...
1003
1004
1005
  void mem_cgroup_move_lists(struct page *page,
  			   enum lru_list from, enum lru_list to)
  {
f8d665422   Hirokazu Takahashi   memcg: add mem_cg...
1006
  	if (mem_cgroup_disabled())
08e552c69   KAMEZAWA Hiroyuki   memcg: synchroniz...
1007
1008
1009
  		return;
  	mem_cgroup_del_lru_list(page, from);
  	mem_cgroup_add_lru_list(page, to);
66e1707bc   Balbir Singh   Memory controller...
1010
  }
3e92041d6   Michal Hocko   memcg: add mem_cg...
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
  /*
   * Checks whether given mem is same or in the root_mem's
   * hierarchy subtree
   */
  static bool mem_cgroup_same_or_subtree(const struct mem_cgroup *root_mem,
  		struct mem_cgroup *mem)
  {
  	if (root_mem != mem) {
  		return (root_mem->use_hierarchy &&
  			css_is_ancestor(&mem->css, &root_mem->css));
  	}
  
  	return true;
  }
4c4a22148   David Rientjes   memcontrol: move ...
1025
1026
1027
  int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *mem)
  {
  	int ret;
0b7f569e4   KAMEZAWA Hiroyuki   memcg: fix OOM ki...
1028
  	struct mem_cgroup *curr = NULL;
158e0a2d1   KAMEZAWA Hiroyuki   memcg: use find_l...
1029
  	struct task_struct *p;
4c4a22148   David Rientjes   memcontrol: move ...
1030

158e0a2d1   KAMEZAWA Hiroyuki   memcg: use find_l...
1031
1032
1033
1034
1035
  	p = find_lock_task_mm(task);
  	if (!p)
  		return 0;
  	curr = try_get_mem_cgroup_from_mm(p->mm);
  	task_unlock(p);
0b7f569e4   KAMEZAWA Hiroyuki   memcg: fix OOM ki...
1036
1037
  	if (!curr)
  		return 0;
d31f56dbf   Daisuke Nishimura   memcg: avoid oom-...
1038
1039
1040
1041
1042
1043
  	/*
  	 * We should check use_hierarchy of "mem" not "curr". Because checking
  	 * use_hierarchy of "curr" here make this function true if hierarchy is
  	 * enabled in "curr" and "curr" is a child of "mem" in *cgroup*
  	 * hierarchy(even if use_hierarchy is disabled in "mem").
  	 */
3e92041d6   Michal Hocko   memcg: add mem_cg...
1044
  	ret = mem_cgroup_same_or_subtree(mem, curr);
0b7f569e4   KAMEZAWA Hiroyuki   memcg: fix OOM ki...
1045
  	css_put(&curr->css);
4c4a22148   David Rientjes   memcontrol: move ...
1046
1047
  	return ret;
  }
c772be939   KOSAKI Motohiro   memcg: fix calcul...
1048
  static int calc_inactive_ratio(struct mem_cgroup *memcg, unsigned long *present_pages)
14797e236   KOSAKI Motohiro   memcg: add inacti...
1049
1050
1051
  {
  	unsigned long active;
  	unsigned long inactive;
c772be939   KOSAKI Motohiro   memcg: fix calcul...
1052
1053
  	unsigned long gb;
  	unsigned long inactive_ratio;
14797e236   KOSAKI Motohiro   memcg: add inacti...
1054

bb2a0de92   KAMEZAWA Hiroyuki   memcg: consolidat...
1055
1056
  	inactive = mem_cgroup_nr_lru_pages(memcg, BIT(LRU_INACTIVE_ANON));
  	active = mem_cgroup_nr_lru_pages(memcg, BIT(LRU_ACTIVE_ANON));
14797e236   KOSAKI Motohiro   memcg: add inacti...
1057

c772be939   KOSAKI Motohiro   memcg: fix calcul...
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
  	gb = (inactive + active) >> (30 - PAGE_SHIFT);
  	if (gb)
  		inactive_ratio = int_sqrt(10 * gb);
  	else
  		inactive_ratio = 1;
  
  	if (present_pages) {
  		present_pages[0] = inactive;
  		present_pages[1] = active;
  	}
  
  	return inactive_ratio;
  }
  
  int mem_cgroup_inactive_anon_is_low(struct mem_cgroup *memcg)
  {
  	unsigned long active;
  	unsigned long inactive;
  	unsigned long present_pages[2];
  	unsigned long inactive_ratio;
  
  	inactive_ratio = calc_inactive_ratio(memcg, present_pages);
  
  	inactive = present_pages[0];
  	active = present_pages[1];
  
  	if (inactive * inactive_ratio < active)
14797e236   KOSAKI Motohiro   memcg: add inacti...
1085
1086
1087
1088
  		return 1;
  
  	return 0;
  }
56e49d218   Rik van Riel   vmscan: evict use...
1089
1090
1091
1092
  int mem_cgroup_inactive_file_is_low(struct mem_cgroup *memcg)
  {
  	unsigned long active;
  	unsigned long inactive;
bb2a0de92   KAMEZAWA Hiroyuki   memcg: consolidat...
1093
1094
  	inactive = mem_cgroup_nr_lru_pages(memcg, BIT(LRU_INACTIVE_FILE));
  	active = mem_cgroup_nr_lru_pages(memcg, BIT(LRU_ACTIVE_FILE));
56e49d218   Rik van Riel   vmscan: evict use...
1095
1096
1097
  
  	return (active > inactive);
  }
3e2f41f1f   KOSAKI Motohiro   memcg: add zone_r...
1098
1099
1100
  struct zone_reclaim_stat *mem_cgroup_get_reclaim_stat(struct mem_cgroup *memcg,
  						      struct zone *zone)
  {
13d7e3a2d   KOSAKI Motohiro   memcg: convert to...
1101
  	int nid = zone_to_nid(zone);
3e2f41f1f   KOSAKI Motohiro   memcg: add zone_r...
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
  	int zid = zone_idx(zone);
  	struct mem_cgroup_per_zone *mz = mem_cgroup_zoneinfo(memcg, nid, zid);
  
  	return &mz->reclaim_stat;
  }
  
  struct zone_reclaim_stat *
  mem_cgroup_get_reclaim_stat_from_page(struct page *page)
  {
  	struct page_cgroup *pc;
  	struct mem_cgroup_per_zone *mz;
  
  	if (mem_cgroup_disabled())
  		return NULL;
  
  	pc = lookup_page_cgroup(page);
bd112db87   Daisuke Nishimura   memcg: fix mem_cg...
1118
1119
  	if (!PageCgroupUsed(pc))
  		return NULL;
713735b42   Johannes Weiner   memcg: correctly ...
1120
1121
  	/* Ensure pc->mem_cgroup is visible after reading PCG_USED. */
  	smp_rmb();
97a6c37b3   Johannes Weiner   memcg: change pag...
1122
  	mz = page_cgroup_zoneinfo(pc->mem_cgroup, page);
3e2f41f1f   KOSAKI Motohiro   memcg: add zone_r...
1123
1124
  	return &mz->reclaim_stat;
  }
66e1707bc   Balbir Singh   Memory controller...
1125
1126
1127
1128
1129
  unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan,
  					struct list_head *dst,
  					unsigned long *scanned, int order,
  					int mode, struct zone *z,
  					struct mem_cgroup *mem_cont,
4f98a2fee   Rik van Riel   vmscan: split LRU...
1130
  					int active, int file)
66e1707bc   Balbir Singh   Memory controller...
1131
1132
1133
1134
1135
1136
  {
  	unsigned long nr_taken = 0;
  	struct page *page;
  	unsigned long scan;
  	LIST_HEAD(pc_list);
  	struct list_head *src;
ff7283fa3   KAMEZAWA Hiroyuki   bugfix for memory...
1137
  	struct page_cgroup *pc, *tmp;
13d7e3a2d   KOSAKI Motohiro   memcg: convert to...
1138
  	int nid = zone_to_nid(z);
1ecaab2bd   KAMEZAWA Hiroyuki   per-zone and recl...
1139
1140
  	int zid = zone_idx(z);
  	struct mem_cgroup_per_zone *mz;
b7c46d151   Johannes Weiner   mm: drop unneeded...
1141
  	int lru = LRU_FILE * file + active;
2ffebca6a   KAMEZAWA Hiroyuki   memcg: fix lru ro...
1142
  	int ret;
66e1707bc   Balbir Singh   Memory controller...
1143

cf475ad28   Balbir Singh   cgroups: add an o...
1144
  	BUG_ON(!mem_cont);
1ecaab2bd   KAMEZAWA Hiroyuki   per-zone and recl...
1145
  	mz = mem_cgroup_zoneinfo(mem_cont, nid, zid);
b69408e88   Christoph Lameter   vmscan: Use an in...
1146
  	src = &mz->lists[lru];
66e1707bc   Balbir Singh   Memory controller...
1147

ff7283fa3   KAMEZAWA Hiroyuki   bugfix for memory...
1148
1149
  	scan = 0;
  	list_for_each_entry_safe_reverse(pc, tmp, src, lru) {
436c6541b   Hugh Dickins   memcgroup: fix zo...
1150
  		if (scan >= nr_to_scan)
ff7283fa3   KAMEZAWA Hiroyuki   bugfix for memory...
1151
  			break;
08e552c69   KAMEZAWA Hiroyuki   memcg: synchroniz...
1152

52d4b9ac0   KAMEZAWA Hiroyuki   memcg: allocate a...
1153
1154
  		if (unlikely(!PageCgroupUsed(pc)))
  			continue;
5564e88ba   Johannes Weiner   memcg: condense p...
1155

6b3ae58ef   Johannes Weiner   memcg: remove dir...
1156
  		page = lookup_cgroup_page(pc);
5564e88ba   Johannes Weiner   memcg: condense p...
1157

436c6541b   Hugh Dickins   memcgroup: fix zo...
1158
  		if (unlikely(!PageLRU(page)))
ff7283fa3   KAMEZAWA Hiroyuki   bugfix for memory...
1159
  			continue;
ff7283fa3   KAMEZAWA Hiroyuki   bugfix for memory...
1160

436c6541b   Hugh Dickins   memcgroup: fix zo...
1161
  		scan++;
2ffebca6a   KAMEZAWA Hiroyuki   memcg: fix lru ro...
1162
1163
1164
  		ret = __isolate_lru_page(page, mode, file);
  		switch (ret) {
  		case 0:
66e1707bc   Balbir Singh   Memory controller...
1165
  			list_move(&page->lru, dst);
2ffebca6a   KAMEZAWA Hiroyuki   memcg: fix lru ro...
1166
  			mem_cgroup_del_lru(page);
2c888cfbc   Rik van Riel   thp: fix anon mem...
1167
  			nr_taken += hpage_nr_pages(page);
2ffebca6a   KAMEZAWA Hiroyuki   memcg: fix lru ro...
1168
1169
1170
1171
1172
1173
1174
  			break;
  		case -EBUSY:
  			/* we don't affect global LRU but rotate in our LRU */
  			mem_cgroup_rotate_lru_list(page, page_lru(page));
  			break;
  		default:
  			break;
66e1707bc   Balbir Singh   Memory controller...
1175
1176
  		}
  	}
66e1707bc   Balbir Singh   Memory controller...
1177
  	*scanned = scan;
cc8e970c3   KOSAKI Motohiro   memcg: add mm_vms...
1178
1179
1180
  
  	trace_mm_vmscan_memcg_isolate(0, nr_to_scan, scan, nr_taken,
  				      0, 0, 0, mode);
66e1707bc   Balbir Singh   Memory controller...
1181
1182
  	return nr_taken;
  }
6d61ef409   Balbir Singh   memcg: memory cgr...
1183
1184
  #define mem_cgroup_from_res_counter(counter, member)	\
  	container_of(counter, struct mem_cgroup, member)
19942822d   Johannes Weiner   memcg: prevent en...
1185
  /**
9d11ea9f1   Johannes Weiner   memcg: simplify t...
1186
1187
   * mem_cgroup_margin - calculate chargeable space of a memory cgroup
   * @mem: the memory cgroup
19942822d   Johannes Weiner   memcg: prevent en...
1188
   *
9d11ea9f1   Johannes Weiner   memcg: simplify t...
1189
   * Returns the maximum amount of memory @mem can be charged with, in
7ec99d621   Johannes Weiner   memcg: unify char...
1190
   * pages.
19942822d   Johannes Weiner   memcg: prevent en...
1191
   */
7ec99d621   Johannes Weiner   memcg: unify char...
1192
  static unsigned long mem_cgroup_margin(struct mem_cgroup *mem)
19942822d   Johannes Weiner   memcg: prevent en...
1193
  {
9d11ea9f1   Johannes Weiner   memcg: simplify t...
1194
1195
1196
1197
1198
  	unsigned long long margin;
  
  	margin = res_counter_margin(&mem->res);
  	if (do_swap_account)
  		margin = min(margin, res_counter_margin(&mem->memsw));
7ec99d621   Johannes Weiner   memcg: unify char...
1199
  	return margin >> PAGE_SHIFT;
19942822d   Johannes Weiner   memcg: prevent en...
1200
  }
1f4c025b5   KAMEZAWA Hiroyuki   memcg: export mem...
1201
  int mem_cgroup_swappiness(struct mem_cgroup *memcg)
a7885eb8a   KOSAKI Motohiro   memcg: swappiness
1202
1203
  {
  	struct cgroup *cgrp = memcg->css.cgroup;
a7885eb8a   KOSAKI Motohiro   memcg: swappiness
1204
1205
1206
1207
  
  	/* root ? */
  	if (cgrp->parent == NULL)
  		return vm_swappiness;
bf1ff2635   Johannes Weiner   memcg: remove mem...
1208
  	return memcg->swappiness;
a7885eb8a   KOSAKI Motohiro   memcg: swappiness
1209
  }
32047e2a8   KAMEZAWA Hiroyuki   memcg: avoid lock...
1210
1211
1212
  static void mem_cgroup_start_move(struct mem_cgroup *mem)
  {
  	int cpu;
1489ebad8   KAMEZAWA Hiroyuki   memcg: cpu hotplu...
1213
1214
1215
1216
  
  	get_online_cpus();
  	spin_lock(&mem->pcp_counter_lock);
  	for_each_online_cpu(cpu)
32047e2a8   KAMEZAWA Hiroyuki   memcg: avoid lock...
1217
  		per_cpu(mem->stat->count[MEM_CGROUP_ON_MOVE], cpu) += 1;
1489ebad8   KAMEZAWA Hiroyuki   memcg: cpu hotplu...
1218
1219
1220
  	mem->nocpu_base.count[MEM_CGROUP_ON_MOVE] += 1;
  	spin_unlock(&mem->pcp_counter_lock);
  	put_online_cpus();
32047e2a8   KAMEZAWA Hiroyuki   memcg: avoid lock...
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
  
  	synchronize_rcu();
  }
  
  static void mem_cgroup_end_move(struct mem_cgroup *mem)
  {
  	int cpu;
  
  	if (!mem)
  		return;
1489ebad8   KAMEZAWA Hiroyuki   memcg: cpu hotplu...
1231
1232
1233
  	get_online_cpus();
  	spin_lock(&mem->pcp_counter_lock);
  	for_each_online_cpu(cpu)
32047e2a8   KAMEZAWA Hiroyuki   memcg: avoid lock...
1234
  		per_cpu(mem->stat->count[MEM_CGROUP_ON_MOVE], cpu) -= 1;
1489ebad8   KAMEZAWA Hiroyuki   memcg: cpu hotplu...
1235
1236
1237
  	mem->nocpu_base.count[MEM_CGROUP_ON_MOVE] -= 1;
  	spin_unlock(&mem->pcp_counter_lock);
  	put_online_cpus();
32047e2a8   KAMEZAWA Hiroyuki   memcg: avoid lock...
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
  }
  /*
   * 2 routines for checking "mem" is under move_account() or not.
   *
   * mem_cgroup_stealed() - checking a cgroup is mc.from or not. This is used
   *			  for avoiding race in accounting. If true,
   *			  pc->mem_cgroup may be overwritten.
   *
   * mem_cgroup_under_move() - checking a cgroup is mc.from or mc.to or
   *			  under hierarchy of moving cgroups. This is for
   *			  waiting at hith-memory prressure caused by "move".
   */
  
  static bool mem_cgroup_stealed(struct mem_cgroup *mem)
  {
  	VM_BUG_ON(!rcu_read_lock_held());
  	return this_cpu_read(mem->stat->count[MEM_CGROUP_ON_MOVE]) > 0;
  }
4b5343346   KAMEZAWA Hiroyuki   memcg: clean up t...
1256
1257
1258
  
  static bool mem_cgroup_under_move(struct mem_cgroup *mem)
  {
2bd9bb206   KAMEZAWA Hiroyuki   memcg: clean up w...
1259
1260
  	struct mem_cgroup *from;
  	struct mem_cgroup *to;
4b5343346   KAMEZAWA Hiroyuki   memcg: clean up t...
1261
  	bool ret = false;
2bd9bb206   KAMEZAWA Hiroyuki   memcg: clean up w...
1262
1263
1264
1265
1266
1267
1268
1269
1270
  	/*
  	 * Unlike task_move routines, we access mc.to, mc.from not under
  	 * mutual exclusion by cgroup_mutex. Here, we take spinlock instead.
  	 */
  	spin_lock(&mc.lock);
  	from = mc.from;
  	to = mc.to;
  	if (!from)
  		goto unlock;
3e92041d6   Michal Hocko   memcg: add mem_cg...
1271
1272
1273
  
  	ret = mem_cgroup_same_or_subtree(mem, from)
  		|| mem_cgroup_same_or_subtree(mem, to);
2bd9bb206   KAMEZAWA Hiroyuki   memcg: clean up w...
1274
1275
  unlock:
  	spin_unlock(&mc.lock);
4b5343346   KAMEZAWA Hiroyuki   memcg: clean up t...
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
  	return ret;
  }
  
  static bool mem_cgroup_wait_acct_move(struct mem_cgroup *mem)
  {
  	if (mc.moving_task && current != mc.moving_task) {
  		if (mem_cgroup_under_move(mem)) {
  			DEFINE_WAIT(wait);
  			prepare_to_wait(&mc.waitq, &wait, TASK_INTERRUPTIBLE);
  			/* moving charge context might have finished. */
  			if (mc.moving_task)
  				schedule();
  			finish_wait(&mc.waitq, &wait);
  			return true;
  		}
  	}
  	return false;
  }
e222432bf   Balbir Singh   memcg: show memcg...
1294
  /**
6a6135b64   Kirill A. Shutemov   memcg: typo in co...
1295
   * mem_cgroup_print_oom_info: Called from OOM with tasklist_lock held in read mode.
e222432bf   Balbir Singh   memcg: show memcg...
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
   * @memcg: The memory cgroup that went over limit
   * @p: Task that is going to be killed
   *
   * NOTE: @memcg and @p's mem_cgroup can be different when hierarchy is
   * enabled
   */
  void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p)
  {
  	struct cgroup *task_cgrp;
  	struct cgroup *mem_cgrp;
  	/*
  	 * Need a buffer in BSS, can't rely on allocations. The code relies
  	 * on the assumption that OOM is serialized for memory controller.
  	 * If this assumption is broken, revisit this code.
  	 */
  	static char memcg_name[PATH_MAX];
  	int ret;
d31f56dbf   Daisuke Nishimura   memcg: avoid oom-...
1313
  	if (!memcg || !p)
e222432bf   Balbir Singh   memcg: show memcg...
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
  		return;
  
  
  	rcu_read_lock();
  
  	mem_cgrp = memcg->css.cgroup;
  	task_cgrp = task_cgroup(p, mem_cgroup_subsys_id);
  
  	ret = cgroup_path(task_cgrp, memcg_name, PATH_MAX);
  	if (ret < 0) {
  		/*
  		 * Unfortunately, we are unable to convert to a useful name
  		 * But we'll still print out the usage information
  		 */
  		rcu_read_unlock();
  		goto done;
  	}
  	rcu_read_unlock();
  
  	printk(KERN_INFO "Task in %s killed", memcg_name);
  
  	rcu_read_lock();
  	ret = cgroup_path(mem_cgrp, memcg_name, PATH_MAX);
  	if (ret < 0) {
  		rcu_read_unlock();
  		goto done;
  	}
  	rcu_read_unlock();
  
  	/*
  	 * Continues from above, so we don't need an KERN_ level
  	 */
  	printk(KERN_CONT " as a result of limit of %s
  ", memcg_name);
  done:
  
  	printk(KERN_INFO "memory: usage %llukB, limit %llukB, failcnt %llu
  ",
  		res_counter_read_u64(&memcg->res, RES_USAGE) >> 10,
  		res_counter_read_u64(&memcg->res, RES_LIMIT) >> 10,
  		res_counter_read_u64(&memcg->res, RES_FAILCNT));
  	printk(KERN_INFO "memory+swap: usage %llukB, limit %llukB, "
  		"failcnt %llu
  ",
  		res_counter_read_u64(&memcg->memsw, RES_USAGE) >> 10,
  		res_counter_read_u64(&memcg->memsw, RES_LIMIT) >> 10,
  		res_counter_read_u64(&memcg->memsw, RES_FAILCNT));
  }
81d39c20f   KAMEZAWA Hiroyuki   memcg: fix shrink...
1362
1363
1364
1365
1366
1367
1368
  /*
   * This function returns the number of memcg under hierarchy tree. Returns
   * 1(self count) if no children.
   */
  static int mem_cgroup_count_children(struct mem_cgroup *mem)
  {
  	int num = 0;
7d74b06f2   KAMEZAWA Hiroyuki   memcg: use for_ea...
1369
1370
1371
1372
  	struct mem_cgroup *iter;
  
  	for_each_mem_cgroup_tree(iter, mem)
  		num++;
81d39c20f   KAMEZAWA Hiroyuki   memcg: fix shrink...
1373
1374
  	return num;
  }
6d61ef409   Balbir Singh   memcg: memory cgr...
1375
  /*
a63d83f42   David Rientjes   oom: badness heur...
1376
1377
1378
1379
1380
1381
   * Return the memory (and swap, if configured) limit for a memcg.
   */
  u64 mem_cgroup_get_limit(struct mem_cgroup *memcg)
  {
  	u64 limit;
  	u64 memsw;
f3e8eb70b   Johannes Weiner   memcg: fix unit m...
1382
1383
  	limit = res_counter_read_u64(&memcg->res, RES_LIMIT);
  	limit += total_swap_pages << PAGE_SHIFT;
a63d83f42   David Rientjes   oom: badness heur...
1384
1385
1386
1387
1388
1389
1390
1391
1392
  	memsw = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
  	/*
  	 * If memsw is finite and limits the amount of swap space available
  	 * to this memcg, return that limit.
  	 */
  	return min(limit, memsw);
  }
  
  /*
04046e1a0   KAMEZAWA Hiroyuki   memcg: use CSS ID
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
   * Visit the first child (need not be the first child as per the ordering
   * of the cgroup list, since we track last_scanned_child) of @mem and use
   * that to reclaim free pages from.
   */
  static struct mem_cgroup *
  mem_cgroup_select_victim(struct mem_cgroup *root_mem)
  {
  	struct mem_cgroup *ret = NULL;
  	struct cgroup_subsys_state *css;
  	int nextid, found;
  
  	if (!root_mem->use_hierarchy) {
  		css_get(&root_mem->css);
  		ret = root_mem;
  	}
  
  	while (!ret) {
  		rcu_read_lock();
  		nextid = root_mem->last_scanned_child + 1;
  		css = css_get_next(&mem_cgroup_subsys, nextid, &root_mem->css,
  				   &found);
  		if (css && css_tryget(css))
  			ret = container_of(css, struct mem_cgroup, css);
  
  		rcu_read_unlock();
  		/* Updates scanning parameter */
04046e1a0   KAMEZAWA Hiroyuki   memcg: use CSS ID
1419
1420
1421
1422
1423
  		if (!css) {
  			/* this means start scan from ID:1 */
  			root_mem->last_scanned_child = 0;
  		} else
  			root_mem->last_scanned_child = found;
04046e1a0   KAMEZAWA Hiroyuki   memcg: use CSS ID
1424
1425
1426
1427
  	}
  
  	return ret;
  }
4d0c066d2   KAMEZAWA Hiroyuki   memcg: fix reclai...
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
  /**
   * test_mem_cgroup_node_reclaimable
   * @mem: the target memcg
   * @nid: the node ID to be checked.
   * @noswap : specify true here if the user wants flle only information.
   *
   * This function returns whether the specified memcg contains any
   * reclaimable pages on a node. Returns true if there are any reclaimable
   * pages in the node.
   */
  static bool test_mem_cgroup_node_reclaimable(struct mem_cgroup *mem,
  		int nid, bool noswap)
  {
bb2a0de92   KAMEZAWA Hiroyuki   memcg: consolidat...
1441
  	if (mem_cgroup_node_nr_lru_pages(mem, nid, LRU_ALL_FILE))
4d0c066d2   KAMEZAWA Hiroyuki   memcg: fix reclai...
1442
1443
1444
  		return true;
  	if (noswap || !total_swap_pages)
  		return false;
bb2a0de92   KAMEZAWA Hiroyuki   memcg: consolidat...
1445
  	if (mem_cgroup_node_nr_lru_pages(mem, nid, LRU_ALL_ANON))
4d0c066d2   KAMEZAWA Hiroyuki   memcg: fix reclai...
1446
1447
1448
1449
  		return true;
  	return false;
  
  }
889976dbc   Ying Han   memcg: reclaim me...
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
  #if MAX_NUMNODES > 1
  
  /*
   * Always updating the nodemask is not very good - even if we have an empty
   * list or the wrong list here, we can start from some node and traverse all
   * nodes based on the zonelist. So update the list loosely once per 10 secs.
   *
   */
  static void mem_cgroup_may_update_nodemask(struct mem_cgroup *mem)
  {
  	int nid;
453a9bf34   KAMEZAWA Hiroyuki   memcg: fix numa s...
1461
1462
1463
1464
1465
1466
1467
  	/*
  	 * numainfo_events > 0 means there was at least NUMAINFO_EVENTS_TARGET
  	 * pagein/pageout changes since the last update.
  	 */
  	if (!atomic_read(&mem->numainfo_events))
  		return;
  	if (atomic_inc_return(&mem->numainfo_updating) > 1)
889976dbc   Ying Han   memcg: reclaim me...
1468
  		return;
889976dbc   Ying Han   memcg: reclaim me...
1469
1470
1471
1472
  	/* make a nodemask where this memcg uses memory from */
  	mem->scan_nodes = node_states[N_HIGH_MEMORY];
  
  	for_each_node_mask(nid, node_states[N_HIGH_MEMORY]) {
4d0c066d2   KAMEZAWA Hiroyuki   memcg: fix reclai...
1473
1474
  		if (!test_mem_cgroup_node_reclaimable(mem, nid, false))
  			node_clear(nid, mem->scan_nodes);
889976dbc   Ying Han   memcg: reclaim me...
1475
  	}
453a9bf34   KAMEZAWA Hiroyuki   memcg: fix numa s...
1476
1477
1478
  
  	atomic_set(&mem->numainfo_events, 0);
  	atomic_set(&mem->numainfo_updating, 0);
889976dbc   Ying Han   memcg: reclaim me...
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
  }
  
  /*
   * Selecting a node where we start reclaim from. Because what we need is just
   * reducing usage counter, start from anywhere is O,K. Considering
   * memory reclaim from current node, there are pros. and cons.
   *
   * Freeing memory from current node means freeing memory from a node which
   * we'll use or we've used. So, it may make LRU bad. And if several threads
   * hit limits, it will see a contention on a node. But freeing from remote
   * node means more costs for memory reclaim because of memory latency.
   *
   * Now, we use round-robin. Better algorithm is welcomed.
   */
  int mem_cgroup_select_victim_node(struct mem_cgroup *mem)
  {
  	int node;
  
  	mem_cgroup_may_update_nodemask(mem);
  	node = mem->last_scanned_node;
  
  	node = next_node(node, mem->scan_nodes);
  	if (node == MAX_NUMNODES)
  		node = first_node(mem->scan_nodes);
  	/*
  	 * We call this when we hit limit, not when pages are added to LRU.
  	 * No LRU may hold pages because all pages are UNEVICTABLE or
  	 * memcg is too small and all pages are not on LRU. In that case,
  	 * we use curret node.
  	 */
  	if (unlikely(node == MAX_NUMNODES))
  		node = numa_node_id();
  
  	mem->last_scanned_node = node;
  	return node;
  }
4d0c066d2   KAMEZAWA Hiroyuki   memcg: fix reclai...
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
  /*
   * Check all nodes whether it contains reclaimable pages or not.
   * For quick scan, we make use of scan_nodes. This will allow us to skip
   * unused nodes. But scan_nodes is lazily updated and may not cotain
   * enough new information. We need to do double check.
   */
  bool mem_cgroup_reclaimable(struct mem_cgroup *mem, bool noswap)
  {
  	int nid;
  
  	/*
  	 * quick check...making use of scan_node.
  	 * We can skip unused nodes.
  	 */
  	if (!nodes_empty(mem->scan_nodes)) {
  		for (nid = first_node(mem->scan_nodes);
  		     nid < MAX_NUMNODES;
  		     nid = next_node(nid, mem->scan_nodes)) {
  
  			if (test_mem_cgroup_node_reclaimable(mem, nid, noswap))
  				return true;
  		}
  	}
  	/*
  	 * Check rest of nodes.
  	 */
  	for_each_node_state(nid, N_HIGH_MEMORY) {
  		if (node_isset(nid, mem->scan_nodes))
  			continue;
  		if (test_mem_cgroup_node_reclaimable(mem, nid, noswap))
  			return true;
  	}
  	return false;
  }
889976dbc   Ying Han   memcg: reclaim me...
1549
1550
1551
1552
1553
  #else
  int mem_cgroup_select_victim_node(struct mem_cgroup *mem)
  {
  	return 0;
  }
4d0c066d2   KAMEZAWA Hiroyuki   memcg: fix reclai...
1554
1555
1556
1557
1558
  
  bool mem_cgroup_reclaimable(struct mem_cgroup *mem, bool noswap)
  {
  	return test_mem_cgroup_node_reclaimable(mem, 0, noswap);
  }
889976dbc   Ying Han   memcg: reclaim me...
1559
  #endif
04046e1a0   KAMEZAWA Hiroyuki   memcg: use CSS ID
1560
1561
1562
1563
  /*
   * Scan the hierarchy if needed to reclaim memory. We remember the last child
   * we reclaimed from, so that we don't end up penalizing one child extensively
   * based on its position in the children list.
6d61ef409   Balbir Singh   memcg: memory cgr...
1564
1565
   *
   * root_mem is the original ancestor that we've been reclaim from.
04046e1a0   KAMEZAWA Hiroyuki   memcg: use CSS ID
1566
1567
1568
   *
   * We give up and return to the caller when we visit root_mem twice.
   * (other groups can be removed while we're walking....)
81d39c20f   KAMEZAWA Hiroyuki   memcg: fix shrink...
1569
1570
   *
   * If shrink==true, for avoiding to free too much, this returns immedieately.
6d61ef409   Balbir Singh   memcg: memory cgr...
1571
1572
   */
  static int mem_cgroup_hierarchical_reclaim(struct mem_cgroup *root_mem,
4e4169535   Balbir Singh   memory controller...
1573
  						struct zone *zone,
75822b449   Balbir Singh   memory controller...
1574
  						gfp_t gfp_mask,
0ae5e89c6   Ying Han   memcg: count the ...
1575
1576
  						unsigned long reclaim_options,
  						unsigned long *total_scanned)
6d61ef409   Balbir Singh   memcg: memory cgr...
1577
  {
04046e1a0   KAMEZAWA Hiroyuki   memcg: use CSS ID
1578
1579
1580
  	struct mem_cgroup *victim;
  	int ret, total = 0;
  	int loop = 0;
75822b449   Balbir Singh   memory controller...
1581
1582
  	bool noswap = reclaim_options & MEM_CGROUP_RECLAIM_NOSWAP;
  	bool shrink = reclaim_options & MEM_CGROUP_RECLAIM_SHRINK;
4e4169535   Balbir Singh   memory controller...
1583
  	bool check_soft = reclaim_options & MEM_CGROUP_RECLAIM_SOFT;
9d11ea9f1   Johannes Weiner   memcg: simplify t...
1584
  	unsigned long excess;
185efc0f9   Johannes Weiner   memcg: Revert "me...
1585
  	unsigned long nr_scanned;
9d11ea9f1   Johannes Weiner   memcg: simplify t...
1586
1587
  
  	excess = res_counter_soft_limit_excess(&root_mem->res) >> PAGE_SHIFT;
04046e1a0   KAMEZAWA Hiroyuki   memcg: use CSS ID
1588

22a668d7c   KAMEZAWA Hiroyuki   memcg: fix behavi...
1589
  	/* If memsw_is_minimum==1, swap-out is of-no-use. */
108b6a784   Daisuke Nishimura   memcg: fix behavi...
1590
  	if (!check_soft && !shrink && root_mem->memsw_is_minimum)
22a668d7c   KAMEZAWA Hiroyuki   memcg: fix behavi...
1591
  		noswap = true;
4e4169535   Balbir Singh   memory controller...
1592
  	while (1) {
04046e1a0   KAMEZAWA Hiroyuki   memcg: use CSS ID
1593
  		victim = mem_cgroup_select_victim(root_mem);
4e4169535   Balbir Singh   memory controller...
1594
  		if (victim == root_mem) {
04046e1a0   KAMEZAWA Hiroyuki   memcg: use CSS ID
1595
  			loop++;
fbc29a25e   KAMEZAWA Hiroyuki   memcg: avoid perc...
1596
1597
1598
1599
1600
1601
1602
  			/*
  			 * We are not draining per cpu cached charges during
  			 * soft limit reclaim  because global reclaim doesn't
  			 * care about charges. It tries to free some memory and
  			 * charges will not give any.
  			 */
  			if (!check_soft && loop >= 1)
26fe61684   KAMEZAWA Hiroyuki   memcg: fix percpu...
1603
  				drain_all_stock_async(root_mem);
4e4169535   Balbir Singh   memory controller...
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
  			if (loop >= 2) {
  				/*
  				 * If we have not been able to reclaim
  				 * anything, it might because there are
  				 * no reclaimable pages under this hierarchy
  				 */
  				if (!check_soft || !total) {
  					css_put(&victim->css);
  					break;
  				}
  				/*
25985edce   Lucas De Marchi   Fix common misspe...
1615
  				 * We want to do more targeted reclaim.
4e4169535   Balbir Singh   memory controller...
1616
1617
1618
1619
1620
1621
1622
1623
1624
1625
1626
  				 * excess >> 2 is not to excessive so as to
  				 * reclaim too much, nor too less that we keep
  				 * coming back to reclaim from this cgroup
  				 */
  				if (total >= (excess >> 2) ||
  					(loop > MEM_CGROUP_MAX_RECLAIM_LOOPS)) {
  					css_put(&victim->css);
  					break;
  				}
  			}
  		}
4d0c066d2   KAMEZAWA Hiroyuki   memcg: fix reclai...
1627
  		if (!mem_cgroup_reclaimable(victim, noswap)) {
04046e1a0   KAMEZAWA Hiroyuki   memcg: use CSS ID
1628
1629
  			/* this cgroup's local usage == 0 */
  			css_put(&victim->css);
6d61ef409   Balbir Singh   memcg: memory cgr...
1630
1631
  			continue;
  		}
04046e1a0   KAMEZAWA Hiroyuki   memcg: use CSS ID
1632
  		/* we use swappiness of local cgroup */
0ae5e89c6   Ying Han   memcg: count the ...
1633
  		if (check_soft) {
4e4169535   Balbir Singh   memory controller...
1634
  			ret = mem_cgroup_shrink_node_zone(victim, gfp_mask,
185efc0f9   Johannes Weiner   memcg: Revert "me...
1635
1636
  				noswap, zone, &nr_scanned);
  			*total_scanned += nr_scanned;
0ae5e89c6   Ying Han   memcg: count the ...
1637
  		} else
4e4169535   Balbir Singh   memory controller...
1638
  			ret = try_to_free_mem_cgroup_pages(victim, gfp_mask,
185efc0f9   Johannes Weiner   memcg: Revert "me...
1639
  						noswap);
04046e1a0   KAMEZAWA Hiroyuki   memcg: use CSS ID
1640
  		css_put(&victim->css);
81d39c20f   KAMEZAWA Hiroyuki   memcg: fix shrink...
1641
1642
1643
1644
1645
1646
1647
  		/*
  		 * At shrinking usage, we can't check we should stop here or
  		 * reclaim more. It's depends on callers. last_scanned_child
  		 * will work enough for keeping fairness under tree.
  		 */
  		if (shrink)
  			return ret;
04046e1a0   KAMEZAWA Hiroyuki   memcg: use CSS ID
1648
  		total += ret;
4e4169535   Balbir Singh   memory controller...
1649
  		if (check_soft) {
9d11ea9f1   Johannes Weiner   memcg: simplify t...
1650
  			if (!res_counter_soft_limit_excess(&root_mem->res))
4e4169535   Balbir Singh   memory controller...
1651
  				return total;
9d11ea9f1   Johannes Weiner   memcg: simplify t...
1652
  		} else if (mem_cgroup_margin(root_mem))
4fd14ebf6   Johannes Weiner   memcg: remove unu...
1653
  			return total;
6d61ef409   Balbir Singh   memcg: memory cgr...
1654
  	}
04046e1a0   KAMEZAWA Hiroyuki   memcg: use CSS ID
1655
  	return total;
6d61ef409   Balbir Singh   memcg: memory cgr...
1656
  }
867578cbc   KAMEZAWA Hiroyuki   memcg: fix oom ki...
1657
1658
1659
  /*
   * Check OOM-Killer is already running under our hierarchy.
   * If someone is running, return false.
1af8efe96   Michal Hocko   memcg: change mem...
1660
   * Has to be called with memcg_oom_lock
867578cbc   KAMEZAWA Hiroyuki   memcg: fix oom ki...
1661
1662
1663
   */
  static bool mem_cgroup_oom_lock(struct mem_cgroup *mem)
  {
79dfdaccd   Michal Hocko   memcg: make oom_l...
1664
1665
  	struct mem_cgroup *iter, *failed = NULL;
  	bool cond = true;
a636b327f   KAMEZAWA Hiroyuki   memcg: avoid unne...
1666

79dfdaccd   Michal Hocko   memcg: make oom_l...
1667
  	for_each_mem_cgroup_tree_cond(iter, mem, cond) {
23751be00   Johannes Weiner   memcg: fix hierar...
1668
  		if (iter->oom_lock) {
79dfdaccd   Michal Hocko   memcg: make oom_l...
1669
1670
1671
1672
  			/*
  			 * this subtree of our hierarchy is already locked
  			 * so we cannot give a lock.
  			 */
79dfdaccd   Michal Hocko   memcg: make oom_l...
1673
1674
  			failed = iter;
  			cond = false;
23751be00   Johannes Weiner   memcg: fix hierar...
1675
1676
  		} else
  			iter->oom_lock = true;
7d74b06f2   KAMEZAWA Hiroyuki   memcg: use for_ea...
1677
  	}
867578cbc   KAMEZAWA Hiroyuki   memcg: fix oom ki...
1678

79dfdaccd   Michal Hocko   memcg: make oom_l...
1679
  	if (!failed)
23751be00   Johannes Weiner   memcg: fix hierar...
1680
  		return true;
79dfdaccd   Michal Hocko   memcg: make oom_l...
1681
1682
1683
1684
1685
1686
1687
1688
1689
1690
1691
1692
1693
  
  	/*
  	 * OK, we failed to lock the whole subtree so we have to clean up
  	 * what we set up to the failing subtree
  	 */
  	cond = true;
  	for_each_mem_cgroup_tree_cond(iter, mem, cond) {
  		if (iter == failed) {
  			cond = false;
  			continue;
  		}
  		iter->oom_lock = false;
  	}
23751be00   Johannes Weiner   memcg: fix hierar...
1694
  	return false;
a636b327f   KAMEZAWA Hiroyuki   memcg: avoid unne...
1695
  }
0b7f569e4   KAMEZAWA Hiroyuki   memcg: fix OOM ki...
1696

79dfdaccd   Michal Hocko   memcg: make oom_l...
1697
  /*
1af8efe96   Michal Hocko   memcg: change mem...
1698
   * Has to be called with memcg_oom_lock
79dfdaccd   Michal Hocko   memcg: make oom_l...
1699
   */
7d74b06f2   KAMEZAWA Hiroyuki   memcg: use for_ea...
1700
  static int mem_cgroup_oom_unlock(struct mem_cgroup *mem)
0b7f569e4   KAMEZAWA Hiroyuki   memcg: fix OOM ki...
1701
  {
7d74b06f2   KAMEZAWA Hiroyuki   memcg: use for_ea...
1702
  	struct mem_cgroup *iter;
79dfdaccd   Michal Hocko   memcg: make oom_l...
1703
1704
1705
1706
1707
1708
1709
1710
1711
1712
1713
1714
1715
1716
1717
1718
  	for_each_mem_cgroup_tree(iter, mem)
  		iter->oom_lock = false;
  	return 0;
  }
  
  static void mem_cgroup_mark_under_oom(struct mem_cgroup *mem)
  {
  	struct mem_cgroup *iter;
  
  	for_each_mem_cgroup_tree(iter, mem)
  		atomic_inc(&iter->under_oom);
  }
  
  static void mem_cgroup_unmark_under_oom(struct mem_cgroup *mem)
  {
  	struct mem_cgroup *iter;
867578cbc   KAMEZAWA Hiroyuki   memcg: fix oom ki...
1719
1720
1721
1722
1723
  	/*
  	 * When a new child is created while the hierarchy is under oom,
  	 * mem_cgroup_oom_lock() may not be called. We have to use
  	 * atomic_add_unless() here.
  	 */
7d74b06f2   KAMEZAWA Hiroyuki   memcg: use for_ea...
1724
  	for_each_mem_cgroup_tree(iter, mem)
79dfdaccd   Michal Hocko   memcg: make oom_l...
1725
  		atomic_add_unless(&iter->under_oom, -1, 0);
0b7f569e4   KAMEZAWA Hiroyuki   memcg: fix OOM ki...
1726
  }
1af8efe96   Michal Hocko   memcg: change mem...
1727
  static DEFINE_SPINLOCK(memcg_oom_lock);
867578cbc   KAMEZAWA Hiroyuki   memcg: fix oom ki...
1728
  static DECLARE_WAIT_QUEUE_HEAD(memcg_oom_waitq);
dc98df5a1   KAMEZAWA Hiroyuki   memcg: oom wakeup...
1729
1730
1731
1732
1733
1734
1735
1736
  struct oom_wait_info {
  	struct mem_cgroup *mem;
  	wait_queue_t	wait;
  };
  
  static int memcg_oom_wake_function(wait_queue_t *wait,
  	unsigned mode, int sync, void *arg)
  {
3e92041d6   Michal Hocko   memcg: add mem_cg...
1737
1738
  	struct mem_cgroup *wake_mem = (struct mem_cgroup *)arg,
  			  *oom_wait_mem;
dc98df5a1   KAMEZAWA Hiroyuki   memcg: oom wakeup...
1739
1740
1741
  	struct oom_wait_info *oom_wait_info;
  
  	oom_wait_info = container_of(wait, struct oom_wait_info, wait);
3e92041d6   Michal Hocko   memcg: add mem_cg...
1742
  	oom_wait_mem = oom_wait_info->mem;
dc98df5a1   KAMEZAWA Hiroyuki   memcg: oom wakeup...
1743

dc98df5a1   KAMEZAWA Hiroyuki   memcg: oom wakeup...
1744
1745
1746
1747
  	/*
  	 * Both of oom_wait_info->mem and wake_mem are stable under us.
  	 * Then we can use css_is_ancestor without taking care of RCU.
  	 */
3e92041d6   Michal Hocko   memcg: add mem_cg...
1748
1749
  	if (!mem_cgroup_same_or_subtree(oom_wait_mem, wake_mem)
  			&& !mem_cgroup_same_or_subtree(wake_mem, oom_wait_mem))
dc98df5a1   KAMEZAWA Hiroyuki   memcg: oom wakeup...
1750
  		return 0;
dc98df5a1   KAMEZAWA Hiroyuki   memcg: oom wakeup...
1751
1752
1753
1754
1755
1756
1757
1758
  	return autoremove_wake_function(wait, mode, sync, arg);
  }
  
  static void memcg_wakeup_oom(struct mem_cgroup *mem)
  {
  	/* for filtering, pass "mem" as argument. */
  	__wake_up(&memcg_oom_waitq, TASK_NORMAL, 0, mem);
  }
3c11ecf44   KAMEZAWA Hiroyuki   memcg: oom kill d...
1759
1760
  static void memcg_oom_recover(struct mem_cgroup *mem)
  {
79dfdaccd   Michal Hocko   memcg: make oom_l...
1761
  	if (mem && atomic_read(&mem->under_oom))
3c11ecf44   KAMEZAWA Hiroyuki   memcg: oom kill d...
1762
1763
  		memcg_wakeup_oom(mem);
  }
867578cbc   KAMEZAWA Hiroyuki   memcg: fix oom ki...
1764
1765
1766
1767
  /*
   * try to call OOM killer. returns false if we should exit memory-reclaim loop.
   */
  bool mem_cgroup_handle_oom(struct mem_cgroup *mem, gfp_t mask)
0b7f569e4   KAMEZAWA Hiroyuki   memcg: fix OOM ki...
1768
  {
dc98df5a1   KAMEZAWA Hiroyuki   memcg: oom wakeup...
1769
  	struct oom_wait_info owait;
3c11ecf44   KAMEZAWA Hiroyuki   memcg: oom kill d...
1770
  	bool locked, need_to_kill;
867578cbc   KAMEZAWA Hiroyuki   memcg: fix oom ki...
1771

dc98df5a1   KAMEZAWA Hiroyuki   memcg: oom wakeup...
1772
1773
1774
1775
1776
  	owait.mem = mem;
  	owait.wait.flags = 0;
  	owait.wait.func = memcg_oom_wake_function;
  	owait.wait.private = current;
  	INIT_LIST_HEAD(&owait.wait.task_list);
3c11ecf44   KAMEZAWA Hiroyuki   memcg: oom kill d...
1777
  	need_to_kill = true;
79dfdaccd   Michal Hocko   memcg: make oom_l...
1778
  	mem_cgroup_mark_under_oom(mem);
867578cbc   KAMEZAWA Hiroyuki   memcg: fix oom ki...
1779
  	/* At first, try to OOM lock hierarchy under mem.*/
1af8efe96   Michal Hocko   memcg: change mem...
1780
  	spin_lock(&memcg_oom_lock);
867578cbc   KAMEZAWA Hiroyuki   memcg: fix oom ki...
1781
1782
1783
1784
1785
1786
  	locked = mem_cgroup_oom_lock(mem);
  	/*
  	 * Even if signal_pending(), we can't quit charge() loop without
  	 * accounting. So, UNINTERRUPTIBLE is appropriate. But SIGKILL
  	 * under OOM is always welcomed, use TASK_KILLABLE here.
  	 */
3c11ecf44   KAMEZAWA Hiroyuki   memcg: oom kill d...
1787
1788
1789
1790
  	prepare_to_wait(&memcg_oom_waitq, &owait.wait, TASK_KILLABLE);
  	if (!locked || mem->oom_kill_disable)
  		need_to_kill = false;
  	if (locked)
9490ff275   KAMEZAWA Hiroyuki   memcg: oom notifier
1791
  		mem_cgroup_oom_notify(mem);
1af8efe96   Michal Hocko   memcg: change mem...
1792
  	spin_unlock(&memcg_oom_lock);
867578cbc   KAMEZAWA Hiroyuki   memcg: fix oom ki...
1793

3c11ecf44   KAMEZAWA Hiroyuki   memcg: oom kill d...
1794
1795
  	if (need_to_kill) {
  		finish_wait(&memcg_oom_waitq, &owait.wait);
867578cbc   KAMEZAWA Hiroyuki   memcg: fix oom ki...
1796
  		mem_cgroup_out_of_memory(mem, mask);
3c11ecf44   KAMEZAWA Hiroyuki   memcg: oom kill d...
1797
  	} else {
867578cbc   KAMEZAWA Hiroyuki   memcg: fix oom ki...
1798
  		schedule();
dc98df5a1   KAMEZAWA Hiroyuki   memcg: oom wakeup...
1799
  		finish_wait(&memcg_oom_waitq, &owait.wait);
867578cbc   KAMEZAWA Hiroyuki   memcg: fix oom ki...
1800
  	}
1af8efe96   Michal Hocko   memcg: change mem...
1801
  	spin_lock(&memcg_oom_lock);
79dfdaccd   Michal Hocko   memcg: make oom_l...
1802
1803
  	if (locked)
  		mem_cgroup_oom_unlock(mem);
dc98df5a1   KAMEZAWA Hiroyuki   memcg: oom wakeup...
1804
  	memcg_wakeup_oom(mem);
1af8efe96   Michal Hocko   memcg: change mem...
1805
  	spin_unlock(&memcg_oom_lock);
867578cbc   KAMEZAWA Hiroyuki   memcg: fix oom ki...
1806

79dfdaccd   Michal Hocko   memcg: make oom_l...
1807
  	mem_cgroup_unmark_under_oom(mem);
867578cbc   KAMEZAWA Hiroyuki   memcg: fix oom ki...
1808
1809
1810
1811
1812
  	if (test_thread_flag(TIF_MEMDIE) || fatal_signal_pending(current))
  		return false;
  	/* Give chance to dying process */
  	schedule_timeout(1);
  	return true;
0b7f569e4   KAMEZAWA Hiroyuki   memcg: fix OOM ki...
1813
  }
d69b042f3   Balbir Singh   memcg: add file-b...
1814
1815
1816
  /*
   * Currently used to update mapped file statistics, but the routine can be
   * generalized to update other statistics as well.
32047e2a8   KAMEZAWA Hiroyuki   memcg: avoid lock...
1817
1818
1819
1820
1821
1822
1823
1824
1825
1826
1827
1828
1829
1830
1831
1832
1833
1834
1835
   *
   * Notes: Race condition
   *
   * We usually use page_cgroup_lock() for accessing page_cgroup member but
   * it tends to be costly. But considering some conditions, we doesn't need
   * to do so _always_.
   *
   * Considering "charge", lock_page_cgroup() is not required because all
   * file-stat operations happen after a page is attached to radix-tree. There
   * are no race with "charge".
   *
   * Considering "uncharge", we know that memcg doesn't clear pc->mem_cgroup
   * at "uncharge" intentionally. So, we always see valid pc->mem_cgroup even
   * if there are race with "uncharge". Statistics itself is properly handled
   * by flags.
   *
   * Considering "move", this is an only case we see a race. To make the race
   * small, we check MEM_CGROUP_ON_MOVE percpu value and detect there are
   * possibility of race condition. If there is, we take a lock.
d69b042f3   Balbir Singh   memcg: add file-b...
1836
   */
26174efd4   KAMEZAWA Hiroyuki   memcg: generic fi...
1837

2a7106f2c   Greg Thelen   memcg: create ext...
1838
1839
  void mem_cgroup_update_page_stat(struct page *page,
  				 enum mem_cgroup_page_stat_item idx, int val)
d69b042f3   Balbir Singh   memcg: add file-b...
1840
1841
  {
  	struct mem_cgroup *mem;
32047e2a8   KAMEZAWA Hiroyuki   memcg: avoid lock...
1842
1843
  	struct page_cgroup *pc = lookup_page_cgroup(page);
  	bool need_unlock = false;
dbd4ea78f   KAMEZAWA Hiroyuki   memcg: add lock t...
1844
  	unsigned long uninitialized_var(flags);
d69b042f3   Balbir Singh   memcg: add file-b...
1845

d69b042f3   Balbir Singh   memcg: add file-b...
1846
1847
  	if (unlikely(!pc))
  		return;
32047e2a8   KAMEZAWA Hiroyuki   memcg: avoid lock...
1848
  	rcu_read_lock();
d69b042f3   Balbir Singh   memcg: add file-b...
1849
  	mem = pc->mem_cgroup;
32047e2a8   KAMEZAWA Hiroyuki   memcg: avoid lock...
1850
1851
1852
  	if (unlikely(!mem || !PageCgroupUsed(pc)))
  		goto out;
  	/* pc->mem_cgroup is unstable ? */
ca3e02141   KAMEZAWA Hiroyuki   memcg: fix USED b...
1853
  	if (unlikely(mem_cgroup_stealed(mem)) || PageTransHuge(page)) {
32047e2a8   KAMEZAWA Hiroyuki   memcg: avoid lock...
1854
  		/* take a lock against to access pc->mem_cgroup */
dbd4ea78f   KAMEZAWA Hiroyuki   memcg: add lock t...
1855
  		move_lock_page_cgroup(pc, &flags);
32047e2a8   KAMEZAWA Hiroyuki   memcg: avoid lock...
1856
1857
1858
1859
1860
  		need_unlock = true;
  		mem = pc->mem_cgroup;
  		if (!mem || !PageCgroupUsed(pc))
  			goto out;
  	}
26174efd4   KAMEZAWA Hiroyuki   memcg: generic fi...
1861

26174efd4   KAMEZAWA Hiroyuki   memcg: generic fi...
1862
  	switch (idx) {
2a7106f2c   Greg Thelen   memcg: create ext...
1863
  	case MEMCG_NR_FILE_MAPPED:
26174efd4   KAMEZAWA Hiroyuki   memcg: generic fi...
1864
1865
1866
  		if (val > 0)
  			SetPageCgroupFileMapped(pc);
  		else if (!page_mapped(page))
0c270f8f9   KAMEZAWA Hiroyuki   memcg: fix race i...
1867
  			ClearPageCgroupFileMapped(pc);
2a7106f2c   Greg Thelen   memcg: create ext...
1868
  		idx = MEM_CGROUP_STAT_FILE_MAPPED;
26174efd4   KAMEZAWA Hiroyuki   memcg: generic fi...
1869
1870
1871
  		break;
  	default:
  		BUG();
8725d5416   KAMEZAWA Hiroyuki   memcg: fix race i...
1872
  	}
d69b042f3   Balbir Singh   memcg: add file-b...
1873

2a7106f2c   Greg Thelen   memcg: create ext...
1874
  	this_cpu_add(mem->stat->count[idx], val);
32047e2a8   KAMEZAWA Hiroyuki   memcg: avoid lock...
1875
1876
  out:
  	if (unlikely(need_unlock))
dbd4ea78f   KAMEZAWA Hiroyuki   memcg: add lock t...
1877
  		move_unlock_page_cgroup(pc, &flags);
32047e2a8   KAMEZAWA Hiroyuki   memcg: avoid lock...
1878
1879
  	rcu_read_unlock();
  	return;
d69b042f3   Balbir Singh   memcg: add file-b...
1880
  }
2a7106f2c   Greg Thelen   memcg: create ext...
1881
  EXPORT_SYMBOL(mem_cgroup_update_page_stat);
26174efd4   KAMEZAWA Hiroyuki   memcg: generic fi...
1882

f817ed485   KAMEZAWA Hiroyuki   memcg: move all a...
1883
  /*
cdec2e426   KAMEZAWA Hiroyuki   memcg: coalesce c...
1884
1885
1886
   * size of first charge trial. "32" comes from vmscan.c's magic value.
   * TODO: maybe necessary to use big numbers in big irons.
   */
7ec99d621   Johannes Weiner   memcg: unify char...
1887
  #define CHARGE_BATCH	32U
cdec2e426   KAMEZAWA Hiroyuki   memcg: coalesce c...
1888
1889
  struct memcg_stock_pcp {
  	struct mem_cgroup *cached; /* this never be root cgroup */
11c9ea4e8   Johannes Weiner   memcg: convert pe...
1890
  	unsigned int nr_pages;
cdec2e426   KAMEZAWA Hiroyuki   memcg: coalesce c...
1891
  	struct work_struct work;
26fe61684   KAMEZAWA Hiroyuki   memcg: fix percpu...
1892
1893
  	unsigned long flags;
  #define FLUSHING_CACHED_CHARGE	(0)
cdec2e426   KAMEZAWA Hiroyuki   memcg: coalesce c...
1894
1895
  };
  static DEFINE_PER_CPU(struct memcg_stock_pcp, memcg_stock);
9f50fad65   Michal Hocko   Revert "memcg: ge...
1896
  static DEFINE_MUTEX(percpu_charge_mutex);
cdec2e426   KAMEZAWA Hiroyuki   memcg: coalesce c...
1897
1898
  
  /*
11c9ea4e8   Johannes Weiner   memcg: convert pe...
1899
   * Try to consume stocked charge on this cpu. If success, one page is consumed
cdec2e426   KAMEZAWA Hiroyuki   memcg: coalesce c...
1900
1901
1902
1903
1904
1905
1906
1907
1908
1909
   * from local stock and true is returned. If the stock is 0 or charges from a
   * cgroup which is not current target, returns false. This stock will be
   * refilled.
   */
  static bool consume_stock(struct mem_cgroup *mem)
  {
  	struct memcg_stock_pcp *stock;
  	bool ret = true;
  
  	stock = &get_cpu_var(memcg_stock);
11c9ea4e8   Johannes Weiner   memcg: convert pe...
1910
1911
  	if (mem == stock->cached && stock->nr_pages)
  		stock->nr_pages--;
cdec2e426   KAMEZAWA Hiroyuki   memcg: coalesce c...
1912
1913
1914
1915
1916
1917
1918
1919
1920
1921
1922
1923
  	else /* need to call res_counter_charge */
  		ret = false;
  	put_cpu_var(memcg_stock);
  	return ret;
  }
  
  /*
   * Returns stocks cached in percpu to res_counter and reset cached information.
   */
  static void drain_stock(struct memcg_stock_pcp *stock)
  {
  	struct mem_cgroup *old = stock->cached;
11c9ea4e8   Johannes Weiner   memcg: convert pe...
1924
1925
1926
1927
  	if (stock->nr_pages) {
  		unsigned long bytes = stock->nr_pages * PAGE_SIZE;
  
  		res_counter_uncharge(&old->res, bytes);
cdec2e426   KAMEZAWA Hiroyuki   memcg: coalesce c...
1928
  		if (do_swap_account)
11c9ea4e8   Johannes Weiner   memcg: convert pe...
1929
1930
  			res_counter_uncharge(&old->memsw, bytes);
  		stock->nr_pages = 0;
cdec2e426   KAMEZAWA Hiroyuki   memcg: coalesce c...
1931
1932
  	}
  	stock->cached = NULL;
cdec2e426   KAMEZAWA Hiroyuki   memcg: coalesce c...
1933
1934
1935
1936
1937
1938
1939
1940
1941
1942
  }
  
  /*
   * This must be called under preempt disabled or must be called by
   * a thread which is pinned to local cpu.
   */
  static void drain_local_stock(struct work_struct *dummy)
  {
  	struct memcg_stock_pcp *stock = &__get_cpu_var(memcg_stock);
  	drain_stock(stock);
26fe61684   KAMEZAWA Hiroyuki   memcg: fix percpu...
1943
  	clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags);
cdec2e426   KAMEZAWA Hiroyuki   memcg: coalesce c...
1944
1945
1946
1947
  }
  
  /*
   * Cache charges(val) which is from res_counter, to local per_cpu area.
320cc51d9   Greg Thelen   mm: fix typo in r...
1948
   * This will be consumed by consume_stock() function, later.
cdec2e426   KAMEZAWA Hiroyuki   memcg: coalesce c...
1949
   */
11c9ea4e8   Johannes Weiner   memcg: convert pe...
1950
  static void refill_stock(struct mem_cgroup *mem, unsigned int nr_pages)
cdec2e426   KAMEZAWA Hiroyuki   memcg: coalesce c...
1951
1952
1953
1954
1955
1956
1957
  {
  	struct memcg_stock_pcp *stock = &get_cpu_var(memcg_stock);
  
  	if (stock->cached != mem) { /* reset if necessary */
  		drain_stock(stock);
  		stock->cached = mem;
  	}
11c9ea4e8   Johannes Weiner   memcg: convert pe...
1958
  	stock->nr_pages += nr_pages;
cdec2e426   KAMEZAWA Hiroyuki   memcg: coalesce c...
1959
1960
1961
1962
  	put_cpu_var(memcg_stock);
  }
  
  /*
d38144b7a   Michal Hocko   memcg: unify sync...
1963
1964
1965
   * Drains all per-CPU charge caches for given root_mem resp. subtree
   * of the hierarchy under it. sync flag says whether we should block
   * until the work is done.
cdec2e426   KAMEZAWA Hiroyuki   memcg: coalesce c...
1966
   */
d38144b7a   Michal Hocko   memcg: unify sync...
1967
  static void drain_all_stock(struct mem_cgroup *root_mem, bool sync)
cdec2e426   KAMEZAWA Hiroyuki   memcg: coalesce c...
1968
  {
26fe61684   KAMEZAWA Hiroyuki   memcg: fix percpu...
1969
  	int cpu, curcpu;
d38144b7a   Michal Hocko   memcg: unify sync...
1970

cdec2e426   KAMEZAWA Hiroyuki   memcg: coalesce c...
1971
  	/* Notify other cpus that system-wide "drain" is running */
cdec2e426   KAMEZAWA Hiroyuki   memcg: coalesce c...
1972
  	get_online_cpus();
5af12d0ef   Johannes Weiner   memcg: pin execut...
1973
  	curcpu = get_cpu();
cdec2e426   KAMEZAWA Hiroyuki   memcg: coalesce c...
1974
1975
  	for_each_online_cpu(cpu) {
  		struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu);
26fe61684   KAMEZAWA Hiroyuki   memcg: fix percpu...
1976
  		struct mem_cgroup *mem;
26fe61684   KAMEZAWA Hiroyuki   memcg: fix percpu...
1977
  		mem = stock->cached;
d1a05b697   Michal Hocko   memcg: do not try...
1978
  		if (!mem || !stock->nr_pages)
26fe61684   KAMEZAWA Hiroyuki   memcg: fix percpu...
1979
  			continue;
3e92041d6   Michal Hocko   memcg: add mem_cg...
1980
1981
  		if (!mem_cgroup_same_or_subtree(root_mem, mem))
  			continue;
d1a05b697   Michal Hocko   memcg: do not try...
1982
1983
1984
1985
1986
1987
  		if (!test_and_set_bit(FLUSHING_CACHED_CHARGE, &stock->flags)) {
  			if (cpu == curcpu)
  				drain_local_stock(&stock->work);
  			else
  				schedule_work_on(cpu, &stock->work);
  		}
cdec2e426   KAMEZAWA Hiroyuki   memcg: coalesce c...
1988
  	}
5af12d0ef   Johannes Weiner   memcg: pin execut...
1989
  	put_cpu();
d38144b7a   Michal Hocko   memcg: unify sync...
1990
1991
1992
1993
1994
1995
  
  	if (!sync)
  		goto out;
  
  	for_each_online_cpu(cpu) {
  		struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu);
9f50fad65   Michal Hocko   Revert "memcg: ge...
1996
  		if (test_bit(FLUSHING_CACHED_CHARGE, &stock->flags))
d38144b7a   Michal Hocko   memcg: unify sync...
1997
1998
1999
  			flush_work(&stock->work);
  	}
  out:
cdec2e426   KAMEZAWA Hiroyuki   memcg: coalesce c...
2000
   	put_online_cpus();
d38144b7a   Michal Hocko   memcg: unify sync...
2001
2002
2003
2004
2005
2006
2007
2008
2009
2010
  }
  
  /*
   * Tries to drain stocked charges in other cpus. This function is asynchronous
   * and just put a work per cpu for draining localy on each cpu. Caller can
   * expects some charges will be back to res_counter later but cannot wait for
   * it.
   */
  static void drain_all_stock_async(struct mem_cgroup *root_mem)
  {
9f50fad65   Michal Hocko   Revert "memcg: ge...
2011
2012
2013
2014
2015
  	/*
  	 * If someone calls draining, avoid adding more kworker runs.
  	 */
  	if (!mutex_trylock(&percpu_charge_mutex))
  		return;
d38144b7a   Michal Hocko   memcg: unify sync...
2016
  	drain_all_stock(root_mem, false);
9f50fad65   Michal Hocko   Revert "memcg: ge...
2017
  	mutex_unlock(&percpu_charge_mutex);
cdec2e426   KAMEZAWA Hiroyuki   memcg: coalesce c...
2018
2019
2020
  }
  
  /* This is a synchronous drain interface. */
d38144b7a   Michal Hocko   memcg: unify sync...
2021
  static void drain_all_stock_sync(struct mem_cgroup *root_mem)
cdec2e426   KAMEZAWA Hiroyuki   memcg: coalesce c...
2022
2023
  {
  	/* called when force_empty is called */
9f50fad65   Michal Hocko   Revert "memcg: ge...
2024
  	mutex_lock(&percpu_charge_mutex);
d38144b7a   Michal Hocko   memcg: unify sync...
2025
  	drain_all_stock(root_mem, true);
9f50fad65   Michal Hocko   Revert "memcg: ge...
2026
  	mutex_unlock(&percpu_charge_mutex);
cdec2e426   KAMEZAWA Hiroyuki   memcg: coalesce c...
2027
  }
711d3d2c9   KAMEZAWA Hiroyuki   memcg: cpu hotplu...
2028
2029
2030
2031
2032
2033
2034
2035
2036
2037
  /*
   * This function drains percpu counter value from DEAD cpu and
   * move it to local cpu. Note that this function can be preempted.
   */
  static void mem_cgroup_drain_pcp_counter(struct mem_cgroup *mem, int cpu)
  {
  	int i;
  
  	spin_lock(&mem->pcp_counter_lock);
  	for (i = 0; i < MEM_CGROUP_STAT_DATA; i++) {
7a159cc9d   Johannes Weiner   memcg: use native...
2038
  		long x = per_cpu(mem->stat->count[i], cpu);
711d3d2c9   KAMEZAWA Hiroyuki   memcg: cpu hotplu...
2039
2040
2041
2042
  
  		per_cpu(mem->stat->count[i], cpu) = 0;
  		mem->nocpu_base.count[i] += x;
  	}
e9f8974f2   Johannes Weiner   memcg: break out ...
2043
2044
2045
2046
2047
2048
  	for (i = 0; i < MEM_CGROUP_EVENTS_NSTATS; i++) {
  		unsigned long x = per_cpu(mem->stat->events[i], cpu);
  
  		per_cpu(mem->stat->events[i], cpu) = 0;
  		mem->nocpu_base.events[i] += x;
  	}
1489ebad8   KAMEZAWA Hiroyuki   memcg: cpu hotplu...
2049
2050
2051
2052
2053
2054
2055
2056
2057
2058
2059
  	/* need to clear ON_MOVE value, works as a kind of lock. */
  	per_cpu(mem->stat->count[MEM_CGROUP_ON_MOVE], cpu) = 0;
  	spin_unlock(&mem->pcp_counter_lock);
  }
  
  static void synchronize_mem_cgroup_on_move(struct mem_cgroup *mem, int cpu)
  {
  	int idx = MEM_CGROUP_ON_MOVE;
  
  	spin_lock(&mem->pcp_counter_lock);
  	per_cpu(mem->stat->count[idx], cpu) = mem->nocpu_base.count[idx];
711d3d2c9   KAMEZAWA Hiroyuki   memcg: cpu hotplu...
2060
2061
2062
2063
  	spin_unlock(&mem->pcp_counter_lock);
  }
  
  static int __cpuinit memcg_cpu_hotplug_callback(struct notifier_block *nb,
cdec2e426   KAMEZAWA Hiroyuki   memcg: coalesce c...
2064
2065
2066
2067
2068
  					unsigned long action,
  					void *hcpu)
  {
  	int cpu = (unsigned long)hcpu;
  	struct memcg_stock_pcp *stock;
711d3d2c9   KAMEZAWA Hiroyuki   memcg: cpu hotplu...
2069
  	struct mem_cgroup *iter;
cdec2e426   KAMEZAWA Hiroyuki   memcg: coalesce c...
2070

1489ebad8   KAMEZAWA Hiroyuki   memcg: cpu hotplu...
2071
2072
2073
2074
2075
  	if ((action == CPU_ONLINE)) {
  		for_each_mem_cgroup_all(iter)
  			synchronize_mem_cgroup_on_move(iter, cpu);
  		return NOTIFY_OK;
  	}
711d3d2c9   KAMEZAWA Hiroyuki   memcg: cpu hotplu...
2076
  	if ((action != CPU_DEAD) || action != CPU_DEAD_FROZEN)
cdec2e426   KAMEZAWA Hiroyuki   memcg: coalesce c...
2077
  		return NOTIFY_OK;
711d3d2c9   KAMEZAWA Hiroyuki   memcg: cpu hotplu...
2078
2079
2080
  
  	for_each_mem_cgroup_all(iter)
  		mem_cgroup_drain_pcp_counter(iter, cpu);
cdec2e426   KAMEZAWA Hiroyuki   memcg: coalesce c...
2081
2082
2083
2084
  	stock = &per_cpu(memcg_stock, cpu);
  	drain_stock(stock);
  	return NOTIFY_OK;
  }
4b5343346   KAMEZAWA Hiroyuki   memcg: clean up t...
2085
2086
2087
2088
2089
2090
2091
2092
2093
  
  /* See __mem_cgroup_try_charge() for details */
  enum {
  	CHARGE_OK,		/* success */
  	CHARGE_RETRY,		/* need to retry but retry is not bad */
  	CHARGE_NOMEM,		/* we can't do more. return -ENOMEM */
  	CHARGE_WOULDBLOCK,	/* GFP_WAIT wasn't set and no enough res. */
  	CHARGE_OOM_DIE,		/* the current is killed because of OOM */
  };
7ec99d621   Johannes Weiner   memcg: unify char...
2094
2095
  static int mem_cgroup_do_charge(struct mem_cgroup *mem, gfp_t gfp_mask,
  				unsigned int nr_pages, bool oom_check)
4b5343346   KAMEZAWA Hiroyuki   memcg: clean up t...
2096
  {
7ec99d621   Johannes Weiner   memcg: unify char...
2097
  	unsigned long csize = nr_pages * PAGE_SIZE;
4b5343346   KAMEZAWA Hiroyuki   memcg: clean up t...
2098
2099
2100
2101
2102
2103
2104
2105
2106
2107
2108
2109
2110
  	struct mem_cgroup *mem_over_limit;
  	struct res_counter *fail_res;
  	unsigned long flags = 0;
  	int ret;
  
  	ret = res_counter_charge(&mem->res, csize, &fail_res);
  
  	if (likely(!ret)) {
  		if (!do_swap_account)
  			return CHARGE_OK;
  		ret = res_counter_charge(&mem->memsw, csize, &fail_res);
  		if (likely(!ret))
  			return CHARGE_OK;
01c88e2d6   KAMEZAWA Hiroyuki   memcg: fix accoun...
2111
  		res_counter_uncharge(&mem->res, csize);
4b5343346   KAMEZAWA Hiroyuki   memcg: clean up t...
2112
2113
2114
2115
  		mem_over_limit = mem_cgroup_from_res_counter(fail_res, memsw);
  		flags |= MEM_CGROUP_RECLAIM_NOSWAP;
  	} else
  		mem_over_limit = mem_cgroup_from_res_counter(fail_res, res);
9221edb71   Johannes Weiner   memcg: prevent en...
2116
  	/*
7ec99d621   Johannes Weiner   memcg: unify char...
2117
2118
  	 * nr_pages can be either a huge page (HPAGE_PMD_NR), a batch
  	 * of regular pages (CHARGE_BATCH), or a single regular page (1).
9221edb71   Johannes Weiner   memcg: prevent en...
2119
2120
2121
2122
  	 *
  	 * Never reclaim on behalf of optional batching, retry with a
  	 * single page instead.
  	 */
7ec99d621   Johannes Weiner   memcg: unify char...
2123
  	if (nr_pages == CHARGE_BATCH)
4b5343346   KAMEZAWA Hiroyuki   memcg: clean up t...
2124
2125
2126
2127
2128
2129
  		return CHARGE_RETRY;
  
  	if (!(gfp_mask & __GFP_WAIT))
  		return CHARGE_WOULDBLOCK;
  
  	ret = mem_cgroup_hierarchical_reclaim(mem_over_limit, NULL,
0ae5e89c6   Ying Han   memcg: count the ...
2130
  					      gfp_mask, flags, NULL);
7ec99d621   Johannes Weiner   memcg: unify char...
2131
  	if (mem_cgroup_margin(mem_over_limit) >= nr_pages)
19942822d   Johannes Weiner   memcg: prevent en...
2132
  		return CHARGE_RETRY;
4b5343346   KAMEZAWA Hiroyuki   memcg: clean up t...
2133
  	/*
19942822d   Johannes Weiner   memcg: prevent en...
2134
2135
2136
2137
2138
2139
2140
  	 * Even though the limit is exceeded at this point, reclaim
  	 * may have been able to free some pages.  Retry the charge
  	 * before killing the task.
  	 *
  	 * Only for regular pages, though: huge pages are rather
  	 * unlikely to succeed so close to the limit, and we fall back
  	 * to regular pages anyway in case of failure.
4b5343346   KAMEZAWA Hiroyuki   memcg: clean up t...
2141
  	 */
7ec99d621   Johannes Weiner   memcg: unify char...
2142
  	if (nr_pages == 1 && ret)
4b5343346   KAMEZAWA Hiroyuki   memcg: clean up t...
2143
2144
2145
2146
2147
2148
2149
2150
2151
2152
2153
2154
2155
2156
2157
2158
2159
2160
  		return CHARGE_RETRY;
  
  	/*
  	 * At task move, charge accounts can be doubly counted. So, it's
  	 * better to wait until the end of task_move if something is going on.
  	 */
  	if (mem_cgroup_wait_acct_move(mem_over_limit))
  		return CHARGE_RETRY;
  
  	/* If we don't need to call oom-killer at el, return immediately */
  	if (!oom_check)
  		return CHARGE_NOMEM;
  	/* check OOM */
  	if (!mem_cgroup_handle_oom(mem_over_limit, gfp_mask))
  		return CHARGE_OOM_DIE;
  
  	return CHARGE_RETRY;
  }
cdec2e426   KAMEZAWA Hiroyuki   memcg: coalesce c...
2161
  /*
f817ed485   KAMEZAWA Hiroyuki   memcg: move all a...
2162
2163
   * Unlike exported interface, "oom" parameter is added. if oom==true,
   * oom-killer can be invoked.
8a9f3ccd2   Balbir Singh   Memory controller...
2164
   */
f817ed485   KAMEZAWA Hiroyuki   memcg: move all a...
2165
  static int __mem_cgroup_try_charge(struct mm_struct *mm,
ec1685109   Andrea Arcangeli   thp: memcg compound
2166
  				   gfp_t gfp_mask,
7ec99d621   Johannes Weiner   memcg: unify char...
2167
2168
2169
  				   unsigned int nr_pages,
  				   struct mem_cgroup **memcg,
  				   bool oom)
8a9f3ccd2   Balbir Singh   Memory controller...
2170
  {
7ec99d621   Johannes Weiner   memcg: unify char...
2171
  	unsigned int batch = max(CHARGE_BATCH, nr_pages);
4b5343346   KAMEZAWA Hiroyuki   memcg: clean up t...
2172
2173
2174
  	int nr_oom_retries = MEM_CGROUP_RECLAIM_RETRIES;
  	struct mem_cgroup *mem = NULL;
  	int ret;
a636b327f   KAMEZAWA Hiroyuki   memcg: avoid unne...
2175

867578cbc   KAMEZAWA Hiroyuki   memcg: fix oom ki...
2176
2177
2178
2179
2180
2181
2182
2183
  	/*
  	 * Unlike gloval-vm's OOM-kill, we're not in memory shortage
  	 * in system level. So, allow to go ahead dying process in addition to
  	 * MEMDIE process.
  	 */
  	if (unlikely(test_thread_flag(TIF_MEMDIE)
  		     || fatal_signal_pending(current)))
  		goto bypass;
a636b327f   KAMEZAWA Hiroyuki   memcg: avoid unne...
2184

8a9f3ccd2   Balbir Singh   Memory controller...
2185
  	/*
3be91277e   Hugh Dickins   memcgroup: tidy u...
2186
2187
  	 * We always charge the cgroup the mm_struct belongs to.
  	 * The mm_struct's mem_cgroup changes on task migration if the
8a9f3ccd2   Balbir Singh   Memory controller...
2188
2189
2190
  	 * thread group leader migrates. It's possible that mm is not
  	 * set, if so charge the init_mm (happens for pagecache usage).
  	 */
f75ca9620   KAMEZAWA Hiroyuki   memcg: avoid css_...
2191
2192
2193
2194
  	if (!*memcg && !mm)
  		goto bypass;
  again:
  	if (*memcg) { /* css should be a valid one */
4b5343346   KAMEZAWA Hiroyuki   memcg: clean up t...
2195
  		mem = *memcg;
f75ca9620   KAMEZAWA Hiroyuki   memcg: avoid css_...
2196
2197
2198
  		VM_BUG_ON(css_is_removed(&mem->css));
  		if (mem_cgroup_is_root(mem))
  			goto done;
7ec99d621   Johannes Weiner   memcg: unify char...
2199
  		if (nr_pages == 1 && consume_stock(mem))
f75ca9620   KAMEZAWA Hiroyuki   memcg: avoid css_...
2200
  			goto done;
4b5343346   KAMEZAWA Hiroyuki   memcg: clean up t...
2201
2202
  		css_get(&mem->css);
  	} else {
f75ca9620   KAMEZAWA Hiroyuki   memcg: avoid css_...
2203
  		struct task_struct *p;
54595fe26   KAMEZAWA Hiroyuki   memcg: use css_tr...
2204

f75ca9620   KAMEZAWA Hiroyuki   memcg: avoid css_...
2205
2206
  		rcu_read_lock();
  		p = rcu_dereference(mm->owner);
f75ca9620   KAMEZAWA Hiroyuki   memcg: avoid css_...
2207
  		/*
ebb76ce16   KAMEZAWA Hiroyuki   memcg: fix wrong ...
2208
2209
2210
2211
2212
2213
2214
2215
  		 * Because we don't have task_lock(), "p" can exit.
  		 * In that case, "mem" can point to root or p can be NULL with
  		 * race with swapoff. Then, we have small risk of mis-accouning.
  		 * But such kind of mis-account by race always happens because
  		 * we don't have cgroup_mutex(). It's overkill and we allo that
  		 * small race, here.
  		 * (*) swapoff at el will charge against mm-struct not against
  		 * task-struct. So, mm->owner can be NULL.
f75ca9620   KAMEZAWA Hiroyuki   memcg: avoid css_...
2216
2217
  		 */
  		mem = mem_cgroup_from_task(p);
ebb76ce16   KAMEZAWA Hiroyuki   memcg: fix wrong ...
2218
  		if (!mem || mem_cgroup_is_root(mem)) {
f75ca9620   KAMEZAWA Hiroyuki   memcg: avoid css_...
2219
2220
2221
  			rcu_read_unlock();
  			goto done;
  		}
7ec99d621   Johannes Weiner   memcg: unify char...
2222
  		if (nr_pages == 1 && consume_stock(mem)) {
f75ca9620   KAMEZAWA Hiroyuki   memcg: avoid css_...
2223
2224
2225
2226
2227
2228
2229
2230
2231
2232
2233
2234
2235
2236
2237
2238
2239
2240
  			/*
  			 * It seems dagerous to access memcg without css_get().
  			 * But considering how consume_stok works, it's not
  			 * necessary. If consume_stock success, some charges
  			 * from this memcg are cached on this cpu. So, we
  			 * don't need to call css_get()/css_tryget() before
  			 * calling consume_stock().
  			 */
  			rcu_read_unlock();
  			goto done;
  		}
  		/* after here, we may be blocked. we need to get refcnt */
  		if (!css_tryget(&mem->css)) {
  			rcu_read_unlock();
  			goto again;
  		}
  		rcu_read_unlock();
  	}
8a9f3ccd2   Balbir Singh   Memory controller...
2241

4b5343346   KAMEZAWA Hiroyuki   memcg: clean up t...
2242
2243
  	do {
  		bool oom_check;
7a81b88cb   KAMEZAWA Hiroyuki   memcg: introduce ...
2244

4b5343346   KAMEZAWA Hiroyuki   memcg: clean up t...
2245
  		/* If killed, bypass charge */
f75ca9620   KAMEZAWA Hiroyuki   memcg: avoid css_...
2246
2247
  		if (fatal_signal_pending(current)) {
  			css_put(&mem->css);
4b5343346   KAMEZAWA Hiroyuki   memcg: clean up t...
2248
  			goto bypass;
f75ca9620   KAMEZAWA Hiroyuki   memcg: avoid css_...
2249
  		}
6d61ef409   Balbir Singh   memcg: memory cgr...
2250

4b5343346   KAMEZAWA Hiroyuki   memcg: clean up t...
2251
2252
2253
2254
  		oom_check = false;
  		if (oom && !nr_oom_retries) {
  			oom_check = true;
  			nr_oom_retries = MEM_CGROUP_RECLAIM_RETRIES;
cdec2e426   KAMEZAWA Hiroyuki   memcg: coalesce c...
2255
  		}
66e1707bc   Balbir Singh   Memory controller...
2256

7ec99d621   Johannes Weiner   memcg: unify char...
2257
  		ret = mem_cgroup_do_charge(mem, gfp_mask, batch, oom_check);
4b5343346   KAMEZAWA Hiroyuki   memcg: clean up t...
2258
2259
2260
2261
  		switch (ret) {
  		case CHARGE_OK:
  			break;
  		case CHARGE_RETRY: /* not in OOM situation but retry */
7ec99d621   Johannes Weiner   memcg: unify char...
2262
  			batch = nr_pages;
f75ca9620   KAMEZAWA Hiroyuki   memcg: avoid css_...
2263
2264
2265
  			css_put(&mem->css);
  			mem = NULL;
  			goto again;
4b5343346   KAMEZAWA Hiroyuki   memcg: clean up t...
2266
  		case CHARGE_WOULDBLOCK: /* !__GFP_WAIT */
f75ca9620   KAMEZAWA Hiroyuki   memcg: avoid css_...
2267
  			css_put(&mem->css);
4b5343346   KAMEZAWA Hiroyuki   memcg: clean up t...
2268
2269
  			goto nomem;
  		case CHARGE_NOMEM: /* OOM routine works */
f75ca9620   KAMEZAWA Hiroyuki   memcg: avoid css_...
2270
2271
  			if (!oom) {
  				css_put(&mem->css);
867578cbc   KAMEZAWA Hiroyuki   memcg: fix oom ki...
2272
  				goto nomem;
f75ca9620   KAMEZAWA Hiroyuki   memcg: avoid css_...
2273
  			}
4b5343346   KAMEZAWA Hiroyuki   memcg: clean up t...
2274
2275
2276
2277
  			/* If oom, we never return -ENOMEM */
  			nr_oom_retries--;
  			break;
  		case CHARGE_OOM_DIE: /* Killed by OOM Killer */
f75ca9620   KAMEZAWA Hiroyuki   memcg: avoid css_...
2278
  			css_put(&mem->css);
867578cbc   KAMEZAWA Hiroyuki   memcg: fix oom ki...
2279
  			goto bypass;
66e1707bc   Balbir Singh   Memory controller...
2280
  		}
4b5343346   KAMEZAWA Hiroyuki   memcg: clean up t...
2281
  	} while (ret != CHARGE_OK);
7ec99d621   Johannes Weiner   memcg: unify char...
2282
2283
  	if (batch > nr_pages)
  		refill_stock(mem, batch - nr_pages);
f75ca9620   KAMEZAWA Hiroyuki   memcg: avoid css_...
2284
  	css_put(&mem->css);
0c3e73e84   Balbir Singh   memcg: improve re...
2285
  done:
f75ca9620   KAMEZAWA Hiroyuki   memcg: avoid css_...
2286
  	*memcg = mem;
7a81b88cb   KAMEZAWA Hiroyuki   memcg: introduce ...
2287
2288
  	return 0;
  nomem:
f75ca9620   KAMEZAWA Hiroyuki   memcg: avoid css_...
2289
  	*memcg = NULL;
7a81b88cb   KAMEZAWA Hiroyuki   memcg: introduce ...
2290
  	return -ENOMEM;
867578cbc   KAMEZAWA Hiroyuki   memcg: fix oom ki...
2291
2292
2293
  bypass:
  	*memcg = NULL;
  	return 0;
7a81b88cb   KAMEZAWA Hiroyuki   memcg: introduce ...
2294
  }
8a9f3ccd2   Balbir Singh   Memory controller...
2295

a3b2d6926   KAMEZAWA Hiroyuki   cgroups: use css ...
2296
  /*
a3032a2c1   Daisuke Nishimura   memcg: add mem_cg...
2297
2298
2299
2300
   * Somemtimes we have to undo a charge we got by try_charge().
   * This function is for that and do uncharge, put css's refcnt.
   * gotten by try_charge().
   */
854ffa8d1   Daisuke Nishimura   memcg: improve pe...
2301
  static void __mem_cgroup_cancel_charge(struct mem_cgroup *mem,
e7018b8d2   Johannes Weiner   memcg: keep only ...
2302
  				       unsigned int nr_pages)
a3032a2c1   Daisuke Nishimura   memcg: add mem_cg...
2303
2304
  {
  	if (!mem_cgroup_is_root(mem)) {
e7018b8d2   Johannes Weiner   memcg: keep only ...
2305
2306
2307
  		unsigned long bytes = nr_pages * PAGE_SIZE;
  
  		res_counter_uncharge(&mem->res, bytes);
a3032a2c1   Daisuke Nishimura   memcg: add mem_cg...
2308
  		if (do_swap_account)
e7018b8d2   Johannes Weiner   memcg: keep only ...
2309
  			res_counter_uncharge(&mem->memsw, bytes);
a3032a2c1   Daisuke Nishimura   memcg: add mem_cg...
2310
  	}
854ffa8d1   Daisuke Nishimura   memcg: improve pe...
2311
  }
a3032a2c1   Daisuke Nishimura   memcg: add mem_cg...
2312
  /*
a3b2d6926   KAMEZAWA Hiroyuki   cgroups: use css ...
2313
2314
2315
2316
2317
2318
2319
2320
2321
2322
2323
2324
2325
2326
2327
2328
2329
   * A helper function to get mem_cgroup from ID. must be called under
   * rcu_read_lock(). The caller must check css_is_removed() or some if
   * it's concern. (dropping refcnt from swap can be called against removed
   * memcg.)
   */
  static struct mem_cgroup *mem_cgroup_lookup(unsigned short id)
  {
  	struct cgroup_subsys_state *css;
  
  	/* ID 0 is unused ID */
  	if (!id)
  		return NULL;
  	css = css_lookup(&mem_cgroup_subsys, id);
  	if (!css)
  		return NULL;
  	return container_of(css, struct mem_cgroup, css);
  }
e42d9d5d4   Wu Fengguang   memcg: rename and...
2330
  struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page)
b5a84319a   KAMEZAWA Hiroyuki   memcg: fix shmem'...
2331
  {
e42d9d5d4   Wu Fengguang   memcg: rename and...
2332
  	struct mem_cgroup *mem = NULL;
3c776e646   Daisuke Nishimura   memcg: charge swa...
2333
  	struct page_cgroup *pc;
a3b2d6926   KAMEZAWA Hiroyuki   cgroups: use css ...
2334
  	unsigned short id;
b5a84319a   KAMEZAWA Hiroyuki   memcg: fix shmem'...
2335
  	swp_entry_t ent;
3c776e646   Daisuke Nishimura   memcg: charge swa...
2336
  	VM_BUG_ON(!PageLocked(page));
3c776e646   Daisuke Nishimura   memcg: charge swa...
2337
  	pc = lookup_page_cgroup(page);
c0bd3f63c   Daisuke Nishimura   memcg: fix try_ge...
2338
  	lock_page_cgroup(pc);
a3b2d6926   KAMEZAWA Hiroyuki   cgroups: use css ...
2339
  	if (PageCgroupUsed(pc)) {
3c776e646   Daisuke Nishimura   memcg: charge swa...
2340
  		mem = pc->mem_cgroup;
a3b2d6926   KAMEZAWA Hiroyuki   cgroups: use css ...
2341
2342
  		if (mem && !css_tryget(&mem->css))
  			mem = NULL;
e42d9d5d4   Wu Fengguang   memcg: rename and...
2343
  	} else if (PageSwapCache(page)) {
3c776e646   Daisuke Nishimura   memcg: charge swa...
2344
  		ent.val = page_private(page);
a3b2d6926   KAMEZAWA Hiroyuki   cgroups: use css ...
2345
2346
2347
2348
2349
2350
  		id = lookup_swap_cgroup(ent);
  		rcu_read_lock();
  		mem = mem_cgroup_lookup(id);
  		if (mem && !css_tryget(&mem->css))
  			mem = NULL;
  		rcu_read_unlock();
3c776e646   Daisuke Nishimura   memcg: charge swa...
2351
  	}
c0bd3f63c   Daisuke Nishimura   memcg: fix try_ge...
2352
  	unlock_page_cgroup(pc);
b5a84319a   KAMEZAWA Hiroyuki   memcg: fix shmem'...
2353
2354
  	return mem;
  }
ca3e02141   KAMEZAWA Hiroyuki   memcg: fix USED b...
2355
  static void __mem_cgroup_commit_charge(struct mem_cgroup *mem,
5564e88ba   Johannes Weiner   memcg: condense p...
2356
  				       struct page *page,
7ec99d621   Johannes Weiner   memcg: unify char...
2357
  				       unsigned int nr_pages,
ca3e02141   KAMEZAWA Hiroyuki   memcg: fix USED b...
2358
  				       struct page_cgroup *pc,
7ec99d621   Johannes Weiner   memcg: unify char...
2359
  				       enum charge_type ctype)
7a81b88cb   KAMEZAWA Hiroyuki   memcg: introduce ...
2360
  {
ca3e02141   KAMEZAWA Hiroyuki   memcg: fix USED b...
2361
2362
2363
  	lock_page_cgroup(pc);
  	if (unlikely(PageCgroupUsed(pc))) {
  		unlock_page_cgroup(pc);
e7018b8d2   Johannes Weiner   memcg: keep only ...
2364
  		__mem_cgroup_cancel_charge(mem, nr_pages);
ca3e02141   KAMEZAWA Hiroyuki   memcg: fix USED b...
2365
2366
2367
2368
2369
2370
  		return;
  	}
  	/*
  	 * we don't need page_cgroup_lock about tail pages, becase they are not
  	 * accessed by any other context at this point.
  	 */
8a9f3ccd2   Balbir Singh   Memory controller...
2371
  	pc->mem_cgroup = mem;
261fb61a8   KAMEZAWA Hiroyuki   memcg: add commen...
2372
2373
2374
2375
2376
2377
2378
  	/*
  	 * We access a page_cgroup asynchronously without lock_page_cgroup().
  	 * Especially when a page_cgroup is taken from a page, pc->mem_cgroup
  	 * is accessed after testing USED bit. To make pc->mem_cgroup visible
  	 * before USED bit, we need memory barrier here.
  	 * See mem_cgroup_add_lru_list(), etc.
   	 */
08e552c69   KAMEZAWA Hiroyuki   memcg: synchroniz...
2379
  	smp_wmb();
4b3bde4c9   Balbir Singh   memcg: remove the...
2380
2381
2382
2383
2384
2385
2386
2387
2388
2389
2390
2391
2392
  	switch (ctype) {
  	case MEM_CGROUP_CHARGE_TYPE_CACHE:
  	case MEM_CGROUP_CHARGE_TYPE_SHMEM:
  		SetPageCgroupCache(pc);
  		SetPageCgroupUsed(pc);
  		break;
  	case MEM_CGROUP_CHARGE_TYPE_MAPPED:
  		ClearPageCgroupCache(pc);
  		SetPageCgroupUsed(pc);
  		break;
  	default:
  		break;
  	}
3be91277e   Hugh Dickins   memcgroup: tidy u...
2393

ca3e02141   KAMEZAWA Hiroyuki   memcg: fix USED b...
2394
  	mem_cgroup_charge_statistics(mem, PageCgroupCache(pc), nr_pages);
52d4b9ac0   KAMEZAWA Hiroyuki   memcg: allocate a...
2395
  	unlock_page_cgroup(pc);
430e48631   KAMEZAWA Hiroyuki   memcg: update thr...
2396
2397
2398
2399
2400
  	/*
  	 * "charge_statistics" updated event counter. Then, check it.
  	 * Insert ancestor (and ancestor's ancestors), to softlimit RB-tree.
  	 * if they exceeds softlimit.
  	 */
5564e88ba   Johannes Weiner   memcg: condense p...
2401
  	memcg_check_events(mem, page);
7a81b88cb   KAMEZAWA Hiroyuki   memcg: introduce ...
2402
  }
66e1707bc   Balbir Singh   Memory controller...
2403

ca3e02141   KAMEZAWA Hiroyuki   memcg: fix USED b...
2404
2405
2406
2407
2408
2409
2410
2411
2412
2413
2414
2415
2416
  #ifdef CONFIG_TRANSPARENT_HUGEPAGE
  
  #define PCGF_NOCOPY_AT_SPLIT ((1 << PCG_LOCK) | (1 << PCG_MOVE_LOCK) |\
  			(1 << PCG_ACCT_LRU) | (1 << PCG_MIGRATION))
  /*
   * Because tail pages are not marked as "used", set it. We're under
   * zone->lru_lock, 'splitting on pmd' and compund_lock.
   */
  void mem_cgroup_split_huge_fixup(struct page *head, struct page *tail)
  {
  	struct page_cgroup *head_pc = lookup_page_cgroup(head);
  	struct page_cgroup *tail_pc = lookup_page_cgroup(tail);
  	unsigned long flags;
3d37c4a91   KAMEZAWA Hiroyuki   memcg: bugfix che...
2417
2418
  	if (mem_cgroup_disabled())
  		return;
ca3e02141   KAMEZAWA Hiroyuki   memcg: fix USED b...
2419
  	/*
ece35ca81   KAMEZAWA Hiroyuki   memcg: fix LRU ac...
2420
  	 * We have no races with charge/uncharge but will have races with
ca3e02141   KAMEZAWA Hiroyuki   memcg: fix USED b...
2421
2422
2423
2424
2425
2426
  	 * page state accounting.
  	 */
  	move_lock_page_cgroup(head_pc, &flags);
  
  	tail_pc->mem_cgroup = head_pc->mem_cgroup;
  	smp_wmb(); /* see __commit_charge() */
ece35ca81   KAMEZAWA Hiroyuki   memcg: fix LRU ac...
2427
2428
2429
2430
2431
2432
2433
2434
2435
2436
  	if (PageCgroupAcctLRU(head_pc)) {
  		enum lru_list lru;
  		struct mem_cgroup_per_zone *mz;
  
  		/*
  		 * LRU flags cannot be copied because we need to add tail
  		 *.page to LRU by generic call and our hook will be called.
  		 * We hold lru_lock, then, reduce counter directly.
  		 */
  		lru = page_lru(head);
97a6c37b3   Johannes Weiner   memcg: change pag...
2437
  		mz = page_cgroup_zoneinfo(head_pc->mem_cgroup, head);
ece35ca81   KAMEZAWA Hiroyuki   memcg: fix LRU ac...
2438
2439
  		MEM_CGROUP_ZSTAT(mz, lru) -= 1;
  	}
ca3e02141   KAMEZAWA Hiroyuki   memcg: fix USED b...
2440
2441
2442
2443
  	tail_pc->flags = head_pc->flags & ~PCGF_NOCOPY_AT_SPLIT;
  	move_unlock_page_cgroup(head_pc, &flags);
  }
  #endif
f817ed485   KAMEZAWA Hiroyuki   memcg: move all a...
2444
  /**
de3638d9c   Johannes Weiner   memcg: fold __mem...
2445
   * mem_cgroup_move_account - move account of the page
5564e88ba   Johannes Weiner   memcg: condense p...
2446
   * @page: the page
7ec99d621   Johannes Weiner   memcg: unify char...
2447
   * @nr_pages: number of regular pages (>1 for huge pages)
f817ed485   KAMEZAWA Hiroyuki   memcg: move all a...
2448
2449
2450
   * @pc:	page_cgroup of the page.
   * @from: mem_cgroup which the page is moved from.
   * @to:	mem_cgroup which the page is moved to. @from != @to.
854ffa8d1   Daisuke Nishimura   memcg: improve pe...
2451
   * @uncharge: whether we should call uncharge and css_put against @from.
f817ed485   KAMEZAWA Hiroyuki   memcg: move all a...
2452
2453
   *
   * The caller must confirm following.
08e552c69   KAMEZAWA Hiroyuki   memcg: synchroniz...
2454
   * - page is not on LRU (isolate_page() is useful.)
7ec99d621   Johannes Weiner   memcg: unify char...
2455
   * - compound_lock is held when nr_pages > 1
f817ed485   KAMEZAWA Hiroyuki   memcg: move all a...
2456
   *
854ffa8d1   Daisuke Nishimura   memcg: improve pe...
2457
   * This function doesn't do "charge" nor css_get to new cgroup. It should be
25985edce   Lucas De Marchi   Fix common misspe...
2458
   * done by a caller(__mem_cgroup_try_charge would be useful). If @uncharge is
854ffa8d1   Daisuke Nishimura   memcg: improve pe...
2459
2460
   * true, this function does "uncharge" from old cgroup, but it doesn't if
   * @uncharge is false, so a caller should do "uncharge".
f817ed485   KAMEZAWA Hiroyuki   memcg: move all a...
2461
   */
7ec99d621   Johannes Weiner   memcg: unify char...
2462
2463
2464
2465
2466
2467
  static int mem_cgroup_move_account(struct page *page,
  				   unsigned int nr_pages,
  				   struct page_cgroup *pc,
  				   struct mem_cgroup *from,
  				   struct mem_cgroup *to,
  				   bool uncharge)
f817ed485   KAMEZAWA Hiroyuki   memcg: move all a...
2468
  {
de3638d9c   Johannes Weiner   memcg: fold __mem...
2469
2470
  	unsigned long flags;
  	int ret;
987eba66e   KAMEZAWA Hiroyuki   memcg: fix rmdir,...
2471

f817ed485   KAMEZAWA Hiroyuki   memcg: move all a...
2472
  	VM_BUG_ON(from == to);
5564e88ba   Johannes Weiner   memcg: condense p...
2473
  	VM_BUG_ON(PageLRU(page));
de3638d9c   Johannes Weiner   memcg: fold __mem...
2474
2475
2476
2477
2478
2479
2480
  	/*
  	 * The page is isolated from LRU. So, collapse function
  	 * will not handle this page. But page splitting can happen.
  	 * Do this check under compound_page_lock(). The caller should
  	 * hold it.
  	 */
  	ret = -EBUSY;
7ec99d621   Johannes Weiner   memcg: unify char...
2481
  	if (nr_pages > 1 && !PageTransHuge(page))
de3638d9c   Johannes Weiner   memcg: fold __mem...
2482
2483
2484
2485
2486
2487
2488
2489
2490
  		goto out;
  
  	lock_page_cgroup(pc);
  
  	ret = -EINVAL;
  	if (!PageCgroupUsed(pc) || pc->mem_cgroup != from)
  		goto unlock;
  
  	move_lock_page_cgroup(pc, &flags);
f817ed485   KAMEZAWA Hiroyuki   memcg: move all a...
2491

8725d5416   KAMEZAWA Hiroyuki   memcg: fix race i...
2492
  	if (PageCgroupFileMapped(pc)) {
c62b1a3b3   KAMEZAWA Hiroyuki   memcg: use generi...
2493
2494
2495
2496
2497
  		/* Update mapped_file data for mem_cgroup */
  		preempt_disable();
  		__this_cpu_dec(from->stat->count[MEM_CGROUP_STAT_FILE_MAPPED]);
  		__this_cpu_inc(to->stat->count[MEM_CGROUP_STAT_FILE_MAPPED]);
  		preempt_enable();
d69b042f3   Balbir Singh   memcg: add file-b...
2498
  	}
987eba66e   KAMEZAWA Hiroyuki   memcg: fix rmdir,...
2499
  	mem_cgroup_charge_statistics(from, PageCgroupCache(pc), -nr_pages);
854ffa8d1   Daisuke Nishimura   memcg: improve pe...
2500
2501
  	if (uncharge)
  		/* This is not "cancel", but cancel_charge does all we need. */
e7018b8d2   Johannes Weiner   memcg: keep only ...
2502
  		__mem_cgroup_cancel_charge(from, nr_pages);
d69b042f3   Balbir Singh   memcg: add file-b...
2503

854ffa8d1   Daisuke Nishimura   memcg: improve pe...
2504
  	/* caller should have done css_get */
08e552c69   KAMEZAWA Hiroyuki   memcg: synchroniz...
2505
  	pc->mem_cgroup = to;
987eba66e   KAMEZAWA Hiroyuki   memcg: fix rmdir,...
2506
  	mem_cgroup_charge_statistics(to, PageCgroupCache(pc), nr_pages);
887032670   KAMEZAWA Hiroyuki   cgroup avoid perm...
2507
2508
2509
  	/*
  	 * We charges against "to" which may not have any tasks. Then, "to"
  	 * can be under rmdir(). But in current implementation, caller of
4ffef5fef   Daisuke Nishimura   memcg: move charg...
2510
  	 * this function is just force_empty() and move charge, so it's
25985edce   Lucas De Marchi   Fix common misspe...
2511
  	 * guaranteed that "to" is never removed. So, we don't check rmdir
4ffef5fef   Daisuke Nishimura   memcg: move charg...
2512
  	 * status here.
887032670   KAMEZAWA Hiroyuki   cgroup avoid perm...
2513
  	 */
de3638d9c   Johannes Weiner   memcg: fold __mem...
2514
2515
2516
  	move_unlock_page_cgroup(pc, &flags);
  	ret = 0;
  unlock:
57f9fd7d2   Daisuke Nishimura   memcg: cleanup me...
2517
  	unlock_page_cgroup(pc);
d2265e6fa   KAMEZAWA Hiroyuki   memcg : share eve...
2518
2519
2520
  	/*
  	 * check events
  	 */
5564e88ba   Johannes Weiner   memcg: condense p...
2521
2522
  	memcg_check_events(to, page);
  	memcg_check_events(from, page);
de3638d9c   Johannes Weiner   memcg: fold __mem...
2523
  out:
f817ed485   KAMEZAWA Hiroyuki   memcg: move all a...
2524
2525
2526
2527
2528
2529
  	return ret;
  }
  
  /*
   * move charges to its parent.
   */
5564e88ba   Johannes Weiner   memcg: condense p...
2530
2531
  static int mem_cgroup_move_parent(struct page *page,
  				  struct page_cgroup *pc,
f817ed485   KAMEZAWA Hiroyuki   memcg: move all a...
2532
2533
2534
2535
2536
2537
  				  struct mem_cgroup *child,
  				  gfp_t gfp_mask)
  {
  	struct cgroup *cg = child->css.cgroup;
  	struct cgroup *pcg = cg->parent;
  	struct mem_cgroup *parent;
7ec99d621   Johannes Weiner   memcg: unify char...
2538
  	unsigned int nr_pages;
4be4489fe   Andrew Morton   mm/memcontrol.c: ...
2539
  	unsigned long uninitialized_var(flags);
f817ed485   KAMEZAWA Hiroyuki   memcg: move all a...
2540
2541
2542
2543
2544
  	int ret;
  
  	/* Is ROOT ? */
  	if (!pcg)
  		return -EINVAL;
57f9fd7d2   Daisuke Nishimura   memcg: cleanup me...
2545
2546
2547
2548
2549
  	ret = -EBUSY;
  	if (!get_page_unless_zero(page))
  		goto out;
  	if (isolate_lru_page(page))
  		goto put;
52dbb9050   KAMEZAWA Hiroyuki   memcg: fix race a...
2550

7ec99d621   Johannes Weiner   memcg: unify char...
2551
  	nr_pages = hpage_nr_pages(page);
08e552c69   KAMEZAWA Hiroyuki   memcg: synchroniz...
2552

f817ed485   KAMEZAWA Hiroyuki   memcg: move all a...
2553
  	parent = mem_cgroup_from_cont(pcg);
7ec99d621   Johannes Weiner   memcg: unify char...
2554
  	ret = __mem_cgroup_try_charge(NULL, gfp_mask, nr_pages, &parent, false);
a636b327f   KAMEZAWA Hiroyuki   memcg: avoid unne...
2555
  	if (ret || !parent)
57f9fd7d2   Daisuke Nishimura   memcg: cleanup me...
2556
  		goto put_back;
f817ed485   KAMEZAWA Hiroyuki   memcg: move all a...
2557

7ec99d621   Johannes Weiner   memcg: unify char...
2558
  	if (nr_pages > 1)
987eba66e   KAMEZAWA Hiroyuki   memcg: fix rmdir,...
2559
  		flags = compound_lock_irqsave(page);
7ec99d621   Johannes Weiner   memcg: unify char...
2560
  	ret = mem_cgroup_move_account(page, nr_pages, pc, child, parent, true);
854ffa8d1   Daisuke Nishimura   memcg: improve pe...
2561
  	if (ret)
7ec99d621   Johannes Weiner   memcg: unify char...
2562
  		__mem_cgroup_cancel_charge(parent, nr_pages);
8dba474f0   Jesper Juhl   mm/memcontrol.c: ...
2563

7ec99d621   Johannes Weiner   memcg: unify char...
2564
  	if (nr_pages > 1)
987eba66e   KAMEZAWA Hiroyuki   memcg: fix rmdir,...
2565
  		compound_unlock_irqrestore(page, flags);
8dba474f0   Jesper Juhl   mm/memcontrol.c: ...
2566
  put_back:
08e552c69   KAMEZAWA Hiroyuki   memcg: synchroniz...
2567
  	putback_lru_page(page);
57f9fd7d2   Daisuke Nishimura   memcg: cleanup me...
2568
  put:
40d58138f   Daisuke Nishimura   memcg: fix error ...
2569
  	put_page(page);
57f9fd7d2   Daisuke Nishimura   memcg: cleanup me...
2570
  out:
f817ed485   KAMEZAWA Hiroyuki   memcg: move all a...
2571
2572
  	return ret;
  }
7a81b88cb   KAMEZAWA Hiroyuki   memcg: introduce ...
2573
2574
2575
2576
2577
2578
2579
  /*
   * Charge the memory controller for page usage.
   * Return
   * 0 if the charge was successful
   * < 0 if the cgroup is over its limit
   */
  static int mem_cgroup_charge_common(struct page *page, struct mm_struct *mm,
73045c47b   Daisuke Nishimura   memcg: remove mem...
2580
  				gfp_t gfp_mask, enum charge_type ctype)
7a81b88cb   KAMEZAWA Hiroyuki   memcg: introduce ...
2581
  {
73045c47b   Daisuke Nishimura   memcg: remove mem...
2582
  	struct mem_cgroup *mem = NULL;
7ec99d621   Johannes Weiner   memcg: unify char...
2583
  	unsigned int nr_pages = 1;
7a81b88cb   KAMEZAWA Hiroyuki   memcg: introduce ...
2584
  	struct page_cgroup *pc;
8493ae439   Johannes Weiner   memcg: never OOM ...
2585
  	bool oom = true;
7a81b88cb   KAMEZAWA Hiroyuki   memcg: introduce ...
2586
  	int ret;
ec1685109   Andrea Arcangeli   thp: memcg compound
2587

37c2ac787   Andrea Arcangeli   thp: compound_tra...
2588
  	if (PageTransHuge(page)) {
7ec99d621   Johannes Weiner   memcg: unify char...
2589
  		nr_pages <<= compound_order(page);
37c2ac787   Andrea Arcangeli   thp: compound_tra...
2590
  		VM_BUG_ON(!PageTransHuge(page));
8493ae439   Johannes Weiner   memcg: never OOM ...
2591
2592
2593
2594
2595
  		/*
  		 * Never OOM-kill a process for a huge page.  The
  		 * fault handler will fall back to regular pages.
  		 */
  		oom = false;
37c2ac787   Andrea Arcangeli   thp: compound_tra...
2596
  	}
7a81b88cb   KAMEZAWA Hiroyuki   memcg: introduce ...
2597
2598
  
  	pc = lookup_page_cgroup(page);
af4a66214   Johannes Weiner   memcg: remove NUL...
2599
  	BUG_ON(!pc); /* XXX: remove this and move pc lookup into commit */
7a81b88cb   KAMEZAWA Hiroyuki   memcg: introduce ...
2600

7ec99d621   Johannes Weiner   memcg: unify char...
2601
  	ret = __mem_cgroup_try_charge(mm, gfp_mask, nr_pages, &mem, oom);
a636b327f   KAMEZAWA Hiroyuki   memcg: avoid unne...
2602
  	if (ret || !mem)
7a81b88cb   KAMEZAWA Hiroyuki   memcg: introduce ...
2603
  		return ret;
7ec99d621   Johannes Weiner   memcg: unify char...
2604
  	__mem_cgroup_commit_charge(mem, page, nr_pages, pc, ctype);
8a9f3ccd2   Balbir Singh   Memory controller...
2605
  	return 0;
8a9f3ccd2   Balbir Singh   Memory controller...
2606
  }
7a81b88cb   KAMEZAWA Hiroyuki   memcg: introduce ...
2607
2608
  int mem_cgroup_newpage_charge(struct page *page,
  			      struct mm_struct *mm, gfp_t gfp_mask)
217bc3194   KAMEZAWA Hiroyuki   memory cgroup enh...
2609
  {
f8d665422   Hirokazu Takahashi   memcg: add mem_cg...
2610
  	if (mem_cgroup_disabled())
cede86acd   Li Zefan   memcg: clean up c...
2611
  		return 0;
69029cd55   KAMEZAWA Hiroyuki   memcg: remove ref...
2612
2613
2614
2615
2616
2617
2618
2619
2620
2621
2622
  	/*
  	 * If already mapped, we don't have to account.
  	 * If page cache, page->mapping has address_space.
  	 * But page->mapping may have out-of-use anon_vma pointer,
  	 * detecit it by PageAnon() check. newly-mapped-anon's page->mapping
  	 * is NULL.
    	 */
  	if (page_mapped(page) || (page->mapping && !PageAnon(page)))
  		return 0;
  	if (unlikely(!mm))
  		mm = &init_mm;
217bc3194   KAMEZAWA Hiroyuki   memory cgroup enh...
2623
  	return mem_cgroup_charge_common(page, mm, gfp_mask,
73045c47b   Daisuke Nishimura   memcg: remove mem...
2624
  				MEM_CGROUP_CHARGE_TYPE_MAPPED);
217bc3194   KAMEZAWA Hiroyuki   memory cgroup enh...
2625
  }
83aae4c73   Daisuke Nishimura   memcg: cleanup ca...
2626
2627
2628
  static void
  __mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr,
  					enum charge_type ctype);
5a6475a4e   KAMEZAWA Hiroyuki   memcg: fix leak o...
2629
2630
2631
2632
2633
2634
2635
2636
2637
2638
2639
2640
2641
2642
2643
  static void
  __mem_cgroup_commit_charge_lrucare(struct page *page, struct mem_cgroup *mem,
  					enum charge_type ctype)
  {
  	struct page_cgroup *pc = lookup_page_cgroup(page);
  	/*
  	 * In some case, SwapCache, FUSE(splice_buf->radixtree), the page
  	 * is already on LRU. It means the page may on some other page_cgroup's
  	 * LRU. Take care of it.
  	 */
  	mem_cgroup_lru_del_before_commit(page);
  	__mem_cgroup_commit_charge(mem, page, 1, pc, ctype);
  	mem_cgroup_lru_add_after_commit(page);
  	return;
  }
e1a1cd590   Balbir Singh   Memory controller...
2644
2645
  int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm,
  				gfp_t gfp_mask)
8697d3319   Balbir Singh   Memory controller...
2646
  {
5a6475a4e   KAMEZAWA Hiroyuki   memcg: fix leak o...
2647
  	struct mem_cgroup *mem = NULL;
b5a84319a   KAMEZAWA Hiroyuki   memcg: fix shmem'...
2648
  	int ret;
f8d665422   Hirokazu Takahashi   memcg: add mem_cg...
2649
  	if (mem_cgroup_disabled())
cede86acd   Li Zefan   memcg: clean up c...
2650
  		return 0;
52d4b9ac0   KAMEZAWA Hiroyuki   memcg: allocate a...
2651
2652
  	if (PageCompound(page))
  		return 0;
accf163e6   KAMEZAWA Hiroyuki   memcg: remove a r...
2653

73045c47b   Daisuke Nishimura   memcg: remove mem...
2654
  	if (unlikely(!mm))
8697d3319   Balbir Singh   Memory controller...
2655
  		mm = &init_mm;
accf163e6   KAMEZAWA Hiroyuki   memcg: remove a r...
2656

5a6475a4e   KAMEZAWA Hiroyuki   memcg: fix leak o...
2657
2658
2659
2660
  	if (page_is_file_cache(page)) {
  		ret = __mem_cgroup_try_charge(mm, gfp_mask, 1, &mem, true);
  		if (ret || !mem)
  			return ret;
b5a84319a   KAMEZAWA Hiroyuki   memcg: fix shmem'...
2661

5a6475a4e   KAMEZAWA Hiroyuki   memcg: fix leak o...
2662
2663
2664
2665
2666
2667
2668
2669
2670
  		/*
  		 * FUSE reuses pages without going through the final
  		 * put that would remove them from the LRU list, make
  		 * sure that they get relinked properly.
  		 */
  		__mem_cgroup_commit_charge_lrucare(page, mem,
  					MEM_CGROUP_CHARGE_TYPE_CACHE);
  		return ret;
  	}
83aae4c73   Daisuke Nishimura   memcg: cleanup ca...
2671
2672
2673
2674
2675
2676
2677
2678
  	/* shmem */
  	if (PageSwapCache(page)) {
  		ret = mem_cgroup_try_charge_swapin(mm, page, gfp_mask, &mem);
  		if (!ret)
  			__mem_cgroup_commit_charge_swapin(page, mem,
  					MEM_CGROUP_CHARGE_TYPE_SHMEM);
  	} else
  		ret = mem_cgroup_charge_common(page, mm, gfp_mask,
73045c47b   Daisuke Nishimura   memcg: remove mem...
2679
  					MEM_CGROUP_CHARGE_TYPE_SHMEM);
b5a84319a   KAMEZAWA Hiroyuki   memcg: fix shmem'...
2680

b5a84319a   KAMEZAWA Hiroyuki   memcg: fix shmem'...
2681
  	return ret;
e8589cc18   KAMEZAWA Hiroyuki   memcg: better mig...
2682
  }
54595fe26   KAMEZAWA Hiroyuki   memcg: use css_tr...
2683
2684
2685
  /*
   * While swap-in, try_charge -> commit or cancel, the page is locked.
   * And when try_charge() successfully returns, one refcnt to memcg without
21ae2956c   Uwe Kleine-König   tree-wide: fix ty...
2686
   * struct page_cgroup is acquired. This refcnt will be consumed by
54595fe26   KAMEZAWA Hiroyuki   memcg: use css_tr...
2687
2688
   * "commit()" or removed by "cancel()"
   */
8c7c6e34a   KAMEZAWA Hiroyuki   memcg: mem+swap c...
2689
2690
2691
2692
2693
  int mem_cgroup_try_charge_swapin(struct mm_struct *mm,
  				 struct page *page,
  				 gfp_t mask, struct mem_cgroup **ptr)
  {
  	struct mem_cgroup *mem;
54595fe26   KAMEZAWA Hiroyuki   memcg: use css_tr...
2694
  	int ret;
8c7c6e34a   KAMEZAWA Hiroyuki   memcg: mem+swap c...
2695

56039efa1   KAMEZAWA Hiroyuki   memcg: fix ugly i...
2696
  	*ptr = NULL;
f8d665422   Hirokazu Takahashi   memcg: add mem_cg...
2697
  	if (mem_cgroup_disabled())
8c7c6e34a   KAMEZAWA Hiroyuki   memcg: mem+swap c...
2698
2699
2700
2701
  		return 0;
  
  	if (!do_swap_account)
  		goto charge_cur_mm;
8c7c6e34a   KAMEZAWA Hiroyuki   memcg: mem+swap c...
2702
2703
  	/*
  	 * A racing thread's fault, or swapoff, may have already updated
407f9c8b0   Hugh Dickins   ksm: mem cgroup c...
2704
2705
2706
  	 * the pte, and even removed page from swap cache: in those cases
  	 * do_swap_page()'s pte_same() test will fail; but there's also a
  	 * KSM case which does need to charge the page.
8c7c6e34a   KAMEZAWA Hiroyuki   memcg: mem+swap c...
2707
2708
  	 */
  	if (!PageSwapCache(page))
407f9c8b0   Hugh Dickins   ksm: mem cgroup c...
2709
  		goto charge_cur_mm;
e42d9d5d4   Wu Fengguang   memcg: rename and...
2710
  	mem = try_get_mem_cgroup_from_page(page);
54595fe26   KAMEZAWA Hiroyuki   memcg: use css_tr...
2711
2712
  	if (!mem)
  		goto charge_cur_mm;
8c7c6e34a   KAMEZAWA Hiroyuki   memcg: mem+swap c...
2713
  	*ptr = mem;
7ec99d621   Johannes Weiner   memcg: unify char...
2714
  	ret = __mem_cgroup_try_charge(NULL, mask, 1, ptr, true);
54595fe26   KAMEZAWA Hiroyuki   memcg: use css_tr...
2715
2716
  	css_put(&mem->css);
  	return ret;
8c7c6e34a   KAMEZAWA Hiroyuki   memcg: mem+swap c...
2717
2718
2719
  charge_cur_mm:
  	if (unlikely(!mm))
  		mm = &init_mm;
7ec99d621   Johannes Weiner   memcg: unify char...
2720
  	return __mem_cgroup_try_charge(mm, mask, 1, ptr, true);
8c7c6e34a   KAMEZAWA Hiroyuki   memcg: mem+swap c...
2721
  }
83aae4c73   Daisuke Nishimura   memcg: cleanup ca...
2722
2723
2724
  static void
  __mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr,
  					enum charge_type ctype)
7a81b88cb   KAMEZAWA Hiroyuki   memcg: introduce ...
2725
  {
f8d665422   Hirokazu Takahashi   memcg: add mem_cg...
2726
  	if (mem_cgroup_disabled())
7a81b88cb   KAMEZAWA Hiroyuki   memcg: introduce ...
2727
2728
2729
  		return;
  	if (!ptr)
  		return;
887032670   KAMEZAWA Hiroyuki   cgroup avoid perm...
2730
  	cgroup_exclude_rmdir(&ptr->css);
5a6475a4e   KAMEZAWA Hiroyuki   memcg: fix leak o...
2731
2732
  
  	__mem_cgroup_commit_charge_lrucare(page, ptr, ctype);
8c7c6e34a   KAMEZAWA Hiroyuki   memcg: mem+swap c...
2733
2734
2735
  	/*
  	 * Now swap is on-memory. This means this page may be
  	 * counted both as mem and swap....double count.
03f3c4336   KAMEZAWA Hiroyuki   memcg: fix swap a...
2736
2737
2738
  	 * Fix it by uncharging from memsw. Basically, this SwapCache is stable
  	 * under lock_page(). But in do_swap_page()::memory.c, reuse_swap_page()
  	 * may call delete_from_swap_cache() before reach here.
8c7c6e34a   KAMEZAWA Hiroyuki   memcg: mem+swap c...
2739
  	 */
03f3c4336   KAMEZAWA Hiroyuki   memcg: fix swap a...
2740
  	if (do_swap_account && PageSwapCache(page)) {
8c7c6e34a   KAMEZAWA Hiroyuki   memcg: mem+swap c...
2741
  		swp_entry_t ent = {.val = page_private(page)};
a3b2d6926   KAMEZAWA Hiroyuki   cgroups: use css ...
2742
  		unsigned short id;
8c7c6e34a   KAMEZAWA Hiroyuki   memcg: mem+swap c...
2743
  		struct mem_cgroup *memcg;
a3b2d6926   KAMEZAWA Hiroyuki   cgroups: use css ...
2744
2745
2746
2747
  
  		id = swap_cgroup_record(ent, 0);
  		rcu_read_lock();
  		memcg = mem_cgroup_lookup(id);
8c7c6e34a   KAMEZAWA Hiroyuki   memcg: mem+swap c...
2748
  		if (memcg) {
a3b2d6926   KAMEZAWA Hiroyuki   cgroups: use css ...
2749
2750
2751
2752
  			/*
  			 * This recorded memcg can be obsolete one. So, avoid
  			 * calling css_tryget
  			 */
0c3e73e84   Balbir Singh   memcg: improve re...
2753
  			if (!mem_cgroup_is_root(memcg))
4e649152c   KAMEZAWA Hiroyuki   memcg: some modif...
2754
  				res_counter_uncharge(&memcg->memsw, PAGE_SIZE);
0c3e73e84   Balbir Singh   memcg: improve re...
2755
  			mem_cgroup_swap_statistics(memcg, false);
8c7c6e34a   KAMEZAWA Hiroyuki   memcg: mem+swap c...
2756
2757
  			mem_cgroup_put(memcg);
  		}
a3b2d6926   KAMEZAWA Hiroyuki   cgroups: use css ...
2758
  		rcu_read_unlock();
8c7c6e34a   KAMEZAWA Hiroyuki   memcg: mem+swap c...
2759
  	}
887032670   KAMEZAWA Hiroyuki   cgroup avoid perm...
2760
2761
2762
2763
2764
2765
  	/*
  	 * At swapin, we may charge account against cgroup which has no tasks.
  	 * So, rmdir()->pre_destroy() can be called while we do this charge.
  	 * In that case, we need to call pre_destroy() again. check it here.
  	 */
  	cgroup_release_and_wakeup_rmdir(&ptr->css);
7a81b88cb   KAMEZAWA Hiroyuki   memcg: introduce ...
2766
  }
83aae4c73   Daisuke Nishimura   memcg: cleanup ca...
2767
2768
2769
2770
2771
  void mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr)
  {
  	__mem_cgroup_commit_charge_swapin(page, ptr,
  					MEM_CGROUP_CHARGE_TYPE_MAPPED);
  }
7a81b88cb   KAMEZAWA Hiroyuki   memcg: introduce ...
2772
2773
  void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *mem)
  {
f8d665422   Hirokazu Takahashi   memcg: add mem_cg...
2774
  	if (mem_cgroup_disabled())
7a81b88cb   KAMEZAWA Hiroyuki   memcg: introduce ...
2775
2776
2777
  		return;
  	if (!mem)
  		return;
e7018b8d2   Johannes Weiner   memcg: keep only ...
2778
  	__mem_cgroup_cancel_charge(mem, 1);
7a81b88cb   KAMEZAWA Hiroyuki   memcg: introduce ...
2779
  }
7ec99d621   Johannes Weiner   memcg: unify char...
2780
2781
2782
  static void mem_cgroup_do_uncharge(struct mem_cgroup *mem,
  				   unsigned int nr_pages,
  				   const enum charge_type ctype)
569b846df   KAMEZAWA Hiroyuki   memcg: coalesce u...
2783
2784
2785
  {
  	struct memcg_batch_info *batch = NULL;
  	bool uncharge_memsw = true;
7ec99d621   Johannes Weiner   memcg: unify char...
2786

569b846df   KAMEZAWA Hiroyuki   memcg: coalesce u...
2787
2788
2789
  	/* If swapout, usage of swap doesn't decrease */
  	if (!do_swap_account || ctype == MEM_CGROUP_CHARGE_TYPE_SWAPOUT)
  		uncharge_memsw = false;
569b846df   KAMEZAWA Hiroyuki   memcg: coalesce u...
2790
2791
2792
2793
2794
2795
2796
2797
2798
2799
  
  	batch = &current->memcg_batch;
  	/*
  	 * In usual, we do css_get() when we remember memcg pointer.
  	 * But in this case, we keep res->usage until end of a series of
  	 * uncharges. Then, it's ok to ignore memcg's refcnt.
  	 */
  	if (!batch->memcg)
  		batch->memcg = mem;
  	/*
3c11ecf44   KAMEZAWA Hiroyuki   memcg: oom kill d...
2800
  	 * do_batch > 0 when unmapping pages or inode invalidate/truncate.
25985edce   Lucas De Marchi   Fix common misspe...
2801
  	 * In those cases, all pages freed continuously can be expected to be in
3c11ecf44   KAMEZAWA Hiroyuki   memcg: oom kill d...
2802
2803
2804
2805
2806
2807
2808
  	 * the same cgroup and we have chance to coalesce uncharges.
  	 * But we do uncharge one by one if this is killed by OOM(TIF_MEMDIE)
  	 * because we want to do uncharge as soon as possible.
  	 */
  
  	if (!batch->do_batch || test_thread_flag(TIF_MEMDIE))
  		goto direct_uncharge;
7ec99d621   Johannes Weiner   memcg: unify char...
2809
  	if (nr_pages > 1)
ec1685109   Andrea Arcangeli   thp: memcg compound
2810
  		goto direct_uncharge;
3c11ecf44   KAMEZAWA Hiroyuki   memcg: oom kill d...
2811
  	/*
569b846df   KAMEZAWA Hiroyuki   memcg: coalesce u...
2812
2813
2814
2815
2816
2817
2818
  	 * In typical case, batch->memcg == mem. This means we can
  	 * merge a series of uncharges to an uncharge of res_counter.
  	 * If not, we uncharge res_counter ony by one.
  	 */
  	if (batch->memcg != mem)
  		goto direct_uncharge;
  	/* remember freed charge and uncharge it later */
7ffd4ca7a   Johannes Weiner   memcg: convert un...
2819
  	batch->nr_pages++;
569b846df   KAMEZAWA Hiroyuki   memcg: coalesce u...
2820
  	if (uncharge_memsw)
7ffd4ca7a   Johannes Weiner   memcg: convert un...
2821
  		batch->memsw_nr_pages++;
569b846df   KAMEZAWA Hiroyuki   memcg: coalesce u...
2822
2823
  	return;
  direct_uncharge:
7ec99d621   Johannes Weiner   memcg: unify char...
2824
  	res_counter_uncharge(&mem->res, nr_pages * PAGE_SIZE);
569b846df   KAMEZAWA Hiroyuki   memcg: coalesce u...
2825
  	if (uncharge_memsw)
7ec99d621   Johannes Weiner   memcg: unify char...
2826
  		res_counter_uncharge(&mem->memsw, nr_pages * PAGE_SIZE);
3c11ecf44   KAMEZAWA Hiroyuki   memcg: oom kill d...
2827
2828
  	if (unlikely(batch->memcg != mem))
  		memcg_oom_recover(mem);
569b846df   KAMEZAWA Hiroyuki   memcg: coalesce u...
2829
2830
  	return;
  }
7a81b88cb   KAMEZAWA Hiroyuki   memcg: introduce ...
2831

8697d3319   Balbir Singh   Memory controller...
2832
  /*
69029cd55   KAMEZAWA Hiroyuki   memcg: remove ref...
2833
   * uncharge if !page_mapped(page)
8a9f3ccd2   Balbir Singh   Memory controller...
2834
   */
8c7c6e34a   KAMEZAWA Hiroyuki   memcg: mem+swap c...
2835
  static struct mem_cgroup *
69029cd55   KAMEZAWA Hiroyuki   memcg: remove ref...
2836
  __mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype)
8a9f3ccd2   Balbir Singh   Memory controller...
2837
  {
8c7c6e34a   KAMEZAWA Hiroyuki   memcg: mem+swap c...
2838
  	struct mem_cgroup *mem = NULL;
7ec99d621   Johannes Weiner   memcg: unify char...
2839
2840
  	unsigned int nr_pages = 1;
  	struct page_cgroup *pc;
8a9f3ccd2   Balbir Singh   Memory controller...
2841

f8d665422   Hirokazu Takahashi   memcg: add mem_cg...
2842
  	if (mem_cgroup_disabled())
8c7c6e34a   KAMEZAWA Hiroyuki   memcg: mem+swap c...
2843
  		return NULL;
4077960e2   Balbir Singh   memory controller...
2844

d13d14430   KAMEZAWA Hiroyuki   memcg: handle swa...
2845
  	if (PageSwapCache(page))
8c7c6e34a   KAMEZAWA Hiroyuki   memcg: mem+swap c...
2846
  		return NULL;
d13d14430   KAMEZAWA Hiroyuki   memcg: handle swa...
2847

37c2ac787   Andrea Arcangeli   thp: compound_tra...
2848
  	if (PageTransHuge(page)) {
7ec99d621   Johannes Weiner   memcg: unify char...
2849
  		nr_pages <<= compound_order(page);
37c2ac787   Andrea Arcangeli   thp: compound_tra...
2850
2851
  		VM_BUG_ON(!PageTransHuge(page));
  	}
8697d3319   Balbir Singh   Memory controller...
2852
  	/*
3c541e14b   Balbir Singh   Memory controller...
2853
  	 * Check if our page_cgroup is valid
8697d3319   Balbir Singh   Memory controller...
2854
  	 */
52d4b9ac0   KAMEZAWA Hiroyuki   memcg: allocate a...
2855
2856
  	pc = lookup_page_cgroup(page);
  	if (unlikely(!pc || !PageCgroupUsed(pc)))
8c7c6e34a   KAMEZAWA Hiroyuki   memcg: mem+swap c...
2857
  		return NULL;
b9c565d5a   Hugh Dickins   memcg: remove cle...
2858

52d4b9ac0   KAMEZAWA Hiroyuki   memcg: allocate a...
2859
  	lock_page_cgroup(pc);
d13d14430   KAMEZAWA Hiroyuki   memcg: handle swa...
2860

8c7c6e34a   KAMEZAWA Hiroyuki   memcg: mem+swap c...
2861
  	mem = pc->mem_cgroup;
d13d14430   KAMEZAWA Hiroyuki   memcg: handle swa...
2862
2863
2864
2865
2866
  	if (!PageCgroupUsed(pc))
  		goto unlock_out;
  
  	switch (ctype) {
  	case MEM_CGROUP_CHARGE_TYPE_MAPPED:
8a9478ca7   KAMEZAWA Hiroyuki   memcg: fix swap a...
2867
  	case MEM_CGROUP_CHARGE_TYPE_DROP:
ac39cf8cb   akpm@linux-foundation.org   memcg: fix mis-ac...
2868
2869
  		/* See mem_cgroup_prepare_migration() */
  		if (page_mapped(page) || PageCgroupMigration(pc))
d13d14430   KAMEZAWA Hiroyuki   memcg: handle swa...
2870
2871
2872
2873
2874
2875
2876
2877
2878
2879
2880
  			goto unlock_out;
  		break;
  	case MEM_CGROUP_CHARGE_TYPE_SWAPOUT:
  		if (!PageAnon(page)) {	/* Shared memory */
  			if (page->mapping && !page_is_file_cache(page))
  				goto unlock_out;
  		} else if (page_mapped(page)) /* Anon */
  				goto unlock_out;
  		break;
  	default:
  		break;
52d4b9ac0   KAMEZAWA Hiroyuki   memcg: allocate a...
2881
  	}
d13d14430   KAMEZAWA Hiroyuki   memcg: handle swa...
2882

7ec99d621   Johannes Weiner   memcg: unify char...
2883
  	mem_cgroup_charge_statistics(mem, PageCgroupCache(pc), -nr_pages);
04046e1a0   KAMEZAWA Hiroyuki   memcg: use CSS ID
2884

52d4b9ac0   KAMEZAWA Hiroyuki   memcg: allocate a...
2885
  	ClearPageCgroupUsed(pc);
544122e5e   KAMEZAWA Hiroyuki   memcg: fix LRU ac...
2886
2887
2888
2889
2890
2891
  	/*
  	 * pc->mem_cgroup is not cleared here. It will be accessed when it's
  	 * freed from LRU. This is safe because uncharged page is expected not
  	 * to be reused (freed soon). Exception is SwapCache, it's handled by
  	 * special functions.
  	 */
b9c565d5a   Hugh Dickins   memcg: remove cle...
2892

52d4b9ac0   KAMEZAWA Hiroyuki   memcg: allocate a...
2893
  	unlock_page_cgroup(pc);
f75ca9620   KAMEZAWA Hiroyuki   memcg: avoid css_...
2894
2895
2896
2897
  	/*
  	 * even after unlock, we have mem->res.usage here and this memcg
  	 * will never be freed.
  	 */
d2265e6fa   KAMEZAWA Hiroyuki   memcg : share eve...
2898
  	memcg_check_events(mem, page);
f75ca9620   KAMEZAWA Hiroyuki   memcg: avoid css_...
2899
2900
2901
2902
2903
  	if (do_swap_account && ctype == MEM_CGROUP_CHARGE_TYPE_SWAPOUT) {
  		mem_cgroup_swap_statistics(mem, true);
  		mem_cgroup_get(mem);
  	}
  	if (!mem_cgroup_is_root(mem))
7ec99d621   Johannes Weiner   memcg: unify char...
2904
  		mem_cgroup_do_uncharge(mem, nr_pages, ctype);
6d12e2d8d   KAMEZAWA Hiroyuki   per-zone and recl...
2905

8c7c6e34a   KAMEZAWA Hiroyuki   memcg: mem+swap c...
2906
  	return mem;
d13d14430   KAMEZAWA Hiroyuki   memcg: handle swa...
2907
2908
2909
  
  unlock_out:
  	unlock_page_cgroup(pc);
8c7c6e34a   KAMEZAWA Hiroyuki   memcg: mem+swap c...
2910
  	return NULL;
3c541e14b   Balbir Singh   Memory controller...
2911
  }
69029cd55   KAMEZAWA Hiroyuki   memcg: remove ref...
2912
2913
  void mem_cgroup_uncharge_page(struct page *page)
  {
52d4b9ac0   KAMEZAWA Hiroyuki   memcg: allocate a...
2914
2915
2916
2917
2918
  	/* early check. */
  	if (page_mapped(page))
  		return;
  	if (page->mapping && !PageAnon(page))
  		return;
69029cd55   KAMEZAWA Hiroyuki   memcg: remove ref...
2919
2920
2921
2922
2923
2924
  	__mem_cgroup_uncharge_common(page, MEM_CGROUP_CHARGE_TYPE_MAPPED);
  }
  
  void mem_cgroup_uncharge_cache_page(struct page *page)
  {
  	VM_BUG_ON(page_mapped(page));
b7abea963   KAMEZAWA Hiroyuki   memcg: make page-...
2925
  	VM_BUG_ON(page->mapping);
69029cd55   KAMEZAWA Hiroyuki   memcg: remove ref...
2926
2927
  	__mem_cgroup_uncharge_common(page, MEM_CGROUP_CHARGE_TYPE_CACHE);
  }
569b846df   KAMEZAWA Hiroyuki   memcg: coalesce u...
2928
2929
2930
2931
2932
2933
2934
2935
2936
2937
2938
2939
2940
2941
  /*
   * Batch_start/batch_end is called in unmap_page_range/invlidate/trucate.
   * In that cases, pages are freed continuously and we can expect pages
   * are in the same memcg. All these calls itself limits the number of
   * pages freed at once, then uncharge_start/end() is called properly.
   * This may be called prural(2) times in a context,
   */
  
  void mem_cgroup_uncharge_start(void)
  {
  	current->memcg_batch.do_batch++;
  	/* We can do nest. */
  	if (current->memcg_batch.do_batch == 1) {
  		current->memcg_batch.memcg = NULL;
7ffd4ca7a   Johannes Weiner   memcg: convert un...
2942
2943
  		current->memcg_batch.nr_pages = 0;
  		current->memcg_batch.memsw_nr_pages = 0;
569b846df   KAMEZAWA Hiroyuki   memcg: coalesce u...
2944
2945
2946
2947
2948
2949
2950
2951
2952
2953
2954
2955
2956
2957
2958
2959
2960
2961
2962
2963
  	}
  }
  
  void mem_cgroup_uncharge_end(void)
  {
  	struct memcg_batch_info *batch = &current->memcg_batch;
  
  	if (!batch->do_batch)
  		return;
  
  	batch->do_batch--;
  	if (batch->do_batch) /* If stacked, do nothing. */
  		return;
  
  	if (!batch->memcg)
  		return;
  	/*
  	 * This "batch->memcg" is valid without any css_get/put etc...
  	 * bacause we hide charges behind us.
  	 */
7ffd4ca7a   Johannes Weiner   memcg: convert un...
2964
2965
2966
2967
2968
2969
  	if (batch->nr_pages)
  		res_counter_uncharge(&batch->memcg->res,
  				     batch->nr_pages * PAGE_SIZE);
  	if (batch->memsw_nr_pages)
  		res_counter_uncharge(&batch->memcg->memsw,
  				     batch->memsw_nr_pages * PAGE_SIZE);
3c11ecf44   KAMEZAWA Hiroyuki   memcg: oom kill d...
2970
  	memcg_oom_recover(batch->memcg);
569b846df   KAMEZAWA Hiroyuki   memcg: coalesce u...
2971
2972
2973
  	/* forget this pointer (for sanity check) */
  	batch->memcg = NULL;
  }
e767e0561   Daisuke Nishimura   memcg: fix deadlo...
2974
  #ifdef CONFIG_SWAP
8c7c6e34a   KAMEZAWA Hiroyuki   memcg: mem+swap c...
2975
  /*
e767e0561   Daisuke Nishimura   memcg: fix deadlo...
2976
   * called after __delete_from_swap_cache() and drop "page" account.
8c7c6e34a   KAMEZAWA Hiroyuki   memcg: mem+swap c...
2977
2978
   * memcg information is recorded to swap_cgroup of "ent"
   */
8a9478ca7   KAMEZAWA Hiroyuki   memcg: fix swap a...
2979
2980
  void
  mem_cgroup_uncharge_swapcache(struct page *page, swp_entry_t ent, bool swapout)
8c7c6e34a   KAMEZAWA Hiroyuki   memcg: mem+swap c...
2981
2982
  {
  	struct mem_cgroup *memcg;
8a9478ca7   KAMEZAWA Hiroyuki   memcg: fix swap a...
2983
2984
2985
2986
2987
2988
  	int ctype = MEM_CGROUP_CHARGE_TYPE_SWAPOUT;
  
  	if (!swapout) /* this was a swap cache but the swap is unused ! */
  		ctype = MEM_CGROUP_CHARGE_TYPE_DROP;
  
  	memcg = __mem_cgroup_uncharge_common(page, ctype);
8c7c6e34a   KAMEZAWA Hiroyuki   memcg: mem+swap c...
2989

f75ca9620   KAMEZAWA Hiroyuki   memcg: avoid css_...
2990
2991
2992
2993
2994
  	/*
  	 * record memcg information,  if swapout && memcg != NULL,
  	 * mem_cgroup_get() was called in uncharge().
  	 */
  	if (do_swap_account && swapout && memcg)
a3b2d6926   KAMEZAWA Hiroyuki   cgroups: use css ...
2995
  		swap_cgroup_record(ent, css_id(&memcg->css));
8c7c6e34a   KAMEZAWA Hiroyuki   memcg: mem+swap c...
2996
  }
e767e0561   Daisuke Nishimura   memcg: fix deadlo...
2997
  #endif
8c7c6e34a   KAMEZAWA Hiroyuki   memcg: mem+swap c...
2998
2999
3000
3001
3002
3003
3004
  
  #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
  /*
   * called from swap_entry_free(). remove record in swap_cgroup and
   * uncharge "memsw" account.
   */
  void mem_cgroup_uncharge_swap(swp_entry_t ent)
d13d14430   KAMEZAWA Hiroyuki   memcg: handle swa...
3005
  {
8c7c6e34a   KAMEZAWA Hiroyuki   memcg: mem+swap c...
3006
  	struct mem_cgroup *memcg;
a3b2d6926   KAMEZAWA Hiroyuki   cgroups: use css ...
3007
  	unsigned short id;
8c7c6e34a   KAMEZAWA Hiroyuki   memcg: mem+swap c...
3008
3009
3010
  
  	if (!do_swap_account)
  		return;
a3b2d6926   KAMEZAWA Hiroyuki   cgroups: use css ...
3011
3012
3013
  	id = swap_cgroup_record(ent, 0);
  	rcu_read_lock();
  	memcg = mem_cgroup_lookup(id);
8c7c6e34a   KAMEZAWA Hiroyuki   memcg: mem+swap c...
3014
  	if (memcg) {
a3b2d6926   KAMEZAWA Hiroyuki   cgroups: use css ...
3015
3016
3017
3018
  		/*
  		 * We uncharge this because swap is freed.
  		 * This memcg can be obsolete one. We avoid calling css_tryget
  		 */
0c3e73e84   Balbir Singh   memcg: improve re...
3019
  		if (!mem_cgroup_is_root(memcg))
4e649152c   KAMEZAWA Hiroyuki   memcg: some modif...
3020
  			res_counter_uncharge(&memcg->memsw, PAGE_SIZE);
0c3e73e84   Balbir Singh   memcg: improve re...
3021
  		mem_cgroup_swap_statistics(memcg, false);
8c7c6e34a   KAMEZAWA Hiroyuki   memcg: mem+swap c...
3022
3023
  		mem_cgroup_put(memcg);
  	}
a3b2d6926   KAMEZAWA Hiroyuki   cgroups: use css ...
3024
  	rcu_read_unlock();
d13d14430   KAMEZAWA Hiroyuki   memcg: handle swa...
3025
  }
024914477   Daisuke Nishimura   memcg: move charg...
3026
3027
3028
3029
3030
3031
  
  /**
   * mem_cgroup_move_swap_account - move swap charge and swap_cgroup's record.
   * @entry: swap entry to be moved
   * @from:  mem_cgroup which the entry is moved from
   * @to:  mem_cgroup which the entry is moved to
483c30b51   Daisuke Nishimura   memcg: improve pe...
3032
   * @need_fixup: whether we should fixup res_counters and refcounts.
024914477   Daisuke Nishimura   memcg: move charg...
3033
3034
3035
3036
3037
3038
3039
3040
3041
3042
   *
   * It succeeds only when the swap_cgroup's record for this entry is the same
   * as the mem_cgroup's id of @from.
   *
   * Returns 0 on success, -EINVAL on failure.
   *
   * The caller must have charged to @to, IOW, called res_counter_charge() about
   * both res and memsw, and called css_get().
   */
  static int mem_cgroup_move_swap_account(swp_entry_t entry,
483c30b51   Daisuke Nishimura   memcg: improve pe...
3043
  		struct mem_cgroup *from, struct mem_cgroup *to, bool need_fixup)
024914477   Daisuke Nishimura   memcg: move charg...
3044
3045
3046
3047
3048
3049
3050
  {
  	unsigned short old_id, new_id;
  
  	old_id = css_id(&from->css);
  	new_id = css_id(&to->css);
  
  	if (swap_cgroup_cmpxchg(entry, old_id, new_id) == old_id) {
024914477   Daisuke Nishimura   memcg: move charg...
3051
  		mem_cgroup_swap_statistics(from, false);
483c30b51   Daisuke Nishimura   memcg: improve pe...
3052
  		mem_cgroup_swap_statistics(to, true);
024914477   Daisuke Nishimura   memcg: move charg...
3053
  		/*
483c30b51   Daisuke Nishimura   memcg: improve pe...
3054
3055
3056
3057
3058
3059
  		 * This function is only called from task migration context now.
  		 * It postpones res_counter and refcount handling till the end
  		 * of task migration(mem_cgroup_clear_mc()) for performance
  		 * improvement. But we cannot postpone mem_cgroup_get(to)
  		 * because if the process that has been moved to @to does
  		 * swap-in, the refcount of @to might be decreased to 0.
024914477   Daisuke Nishimura   memcg: move charg...
3060
  		 */
024914477   Daisuke Nishimura   memcg: move charg...
3061
  		mem_cgroup_get(to);
483c30b51   Daisuke Nishimura   memcg: improve pe...
3062
3063
3064
3065
3066
3067
3068
3069
3070
3071
  		if (need_fixup) {
  			if (!mem_cgroup_is_root(from))
  				res_counter_uncharge(&from->memsw, PAGE_SIZE);
  			mem_cgroup_put(from);
  			/*
  			 * we charged both to->res and to->memsw, so we should
  			 * uncharge to->res.
  			 */
  			if (!mem_cgroup_is_root(to))
  				res_counter_uncharge(&to->res, PAGE_SIZE);
483c30b51   Daisuke Nishimura   memcg: improve pe...
3072
  		}
024914477   Daisuke Nishimura   memcg: move charg...
3073
3074
3075
3076
3077
3078
  		return 0;
  	}
  	return -EINVAL;
  }
  #else
  static inline int mem_cgroup_move_swap_account(swp_entry_t entry,
483c30b51   Daisuke Nishimura   memcg: improve pe...
3079
  		struct mem_cgroup *from, struct mem_cgroup *to, bool need_fixup)
024914477   Daisuke Nishimura   memcg: move charg...
3080
3081
3082
  {
  	return -EINVAL;
  }
8c7c6e34a   KAMEZAWA Hiroyuki   memcg: mem+swap c...
3083
  #endif
d13d14430   KAMEZAWA Hiroyuki   memcg: handle swa...
3084

ae41be374   KAMEZAWA Hiroyuki   bugfix for memory...
3085
  /*
01b1ae63c   KAMEZAWA Hiroyuki   memcg: simple mig...
3086
3087
   * Before starting migration, account PAGE_SIZE to mem_cgroup that the old
   * page belongs to.
ae41be374   KAMEZAWA Hiroyuki   bugfix for memory...
3088
   */
ac39cf8cb   akpm@linux-foundation.org   memcg: fix mis-ac...
3089
  int mem_cgroup_prepare_migration(struct page *page,
ef6a3c631   Miklos Szeredi   mm: add replace_p...
3090
  	struct page *newpage, struct mem_cgroup **ptr, gfp_t gfp_mask)
ae41be374   KAMEZAWA Hiroyuki   bugfix for memory...
3091
  {
e8589cc18   KAMEZAWA Hiroyuki   memcg: better mig...
3092
  	struct mem_cgroup *mem = NULL;
7ec99d621   Johannes Weiner   memcg: unify char...
3093
  	struct page_cgroup *pc;
ac39cf8cb   akpm@linux-foundation.org   memcg: fix mis-ac...
3094
  	enum charge_type ctype;
e8589cc18   KAMEZAWA Hiroyuki   memcg: better mig...
3095
  	int ret = 0;
8869b8f6e   Hugh Dickins   memcg: memcontrol...
3096

56039efa1   KAMEZAWA Hiroyuki   memcg: fix ugly i...
3097
  	*ptr = NULL;
ec1685109   Andrea Arcangeli   thp: memcg compound
3098
  	VM_BUG_ON(PageTransHuge(page));
f8d665422   Hirokazu Takahashi   memcg: add mem_cg...
3099
  	if (mem_cgroup_disabled())
4077960e2   Balbir Singh   memory controller...
3100
  		return 0;
52d4b9ac0   KAMEZAWA Hiroyuki   memcg: allocate a...
3101
3102
3103
  	pc = lookup_page_cgroup(page);
  	lock_page_cgroup(pc);
  	if (PageCgroupUsed(pc)) {
e8589cc18   KAMEZAWA Hiroyuki   memcg: better mig...
3104
3105
  		mem = pc->mem_cgroup;
  		css_get(&mem->css);
ac39cf8cb   akpm@linux-foundation.org   memcg: fix mis-ac...
3106
3107
3108
3109
3110
3111
3112
3113
3114
3115
3116
3117
3118
3119
3120
3121
3122
3123
3124
3125
3126
3127
3128
3129
3130
3131
3132
3133
3134
3135
3136
  		/*
  		 * At migrating an anonymous page, its mapcount goes down
  		 * to 0 and uncharge() will be called. But, even if it's fully
  		 * unmapped, migration may fail and this page has to be
  		 * charged again. We set MIGRATION flag here and delay uncharge
  		 * until end_migration() is called
  		 *
  		 * Corner Case Thinking
  		 * A)
  		 * When the old page was mapped as Anon and it's unmap-and-freed
  		 * while migration was ongoing.
  		 * If unmap finds the old page, uncharge() of it will be delayed
  		 * until end_migration(). If unmap finds a new page, it's
  		 * uncharged when it make mapcount to be 1->0. If unmap code
  		 * finds swap_migration_entry, the new page will not be mapped
  		 * and end_migration() will find it(mapcount==0).
  		 *
  		 * B)
  		 * When the old page was mapped but migraion fails, the kernel
  		 * remaps it. A charge for it is kept by MIGRATION flag even
  		 * if mapcount goes down to 0. We can do remap successfully
  		 * without charging it again.
  		 *
  		 * C)
  		 * The "old" page is under lock_page() until the end of
  		 * migration, so, the old page itself will not be swapped-out.
  		 * If the new page is swapped out before end_migraton, our
  		 * hook to usual swap-out path will catch the event.
  		 */
  		if (PageAnon(page))
  			SetPageCgroupMigration(pc);
e8589cc18   KAMEZAWA Hiroyuki   memcg: better mig...
3137
  	}
52d4b9ac0   KAMEZAWA Hiroyuki   memcg: allocate a...
3138
  	unlock_page_cgroup(pc);
ac39cf8cb   akpm@linux-foundation.org   memcg: fix mis-ac...
3139
3140
3141
3142
3143
3144
  	/*
  	 * If the page is not charged at this point,
  	 * we return here.
  	 */
  	if (!mem)
  		return 0;
01b1ae63c   KAMEZAWA Hiroyuki   memcg: simple mig...
3145

93d5c9be1   Andrea Arcangeli   memcg: fix prepar...
3146
  	*ptr = mem;
7ec99d621   Johannes Weiner   memcg: unify char...
3147
  	ret = __mem_cgroup_try_charge(NULL, gfp_mask, 1, ptr, false);
ac39cf8cb   akpm@linux-foundation.org   memcg: fix mis-ac...
3148
3149
3150
3151
3152
3153
3154
3155
3156
3157
3158
3159
  	css_put(&mem->css);/* drop extra refcnt */
  	if (ret || *ptr == NULL) {
  		if (PageAnon(page)) {
  			lock_page_cgroup(pc);
  			ClearPageCgroupMigration(pc);
  			unlock_page_cgroup(pc);
  			/*
  			 * The old page may be fully unmapped while we kept it.
  			 */
  			mem_cgroup_uncharge_page(page);
  		}
  		return -ENOMEM;
e8589cc18   KAMEZAWA Hiroyuki   memcg: better mig...
3160
  	}
ac39cf8cb   akpm@linux-foundation.org   memcg: fix mis-ac...
3161
3162
3163
3164
3165
3166
3167
3168
3169
3170
3171
3172
3173
  	/*
  	 * We charge new page before it's used/mapped. So, even if unlock_page()
  	 * is called before end_migration, we can catch all events on this new
  	 * page. In the case new page is migrated but not remapped, new page's
  	 * mapcount will be finally 0 and we call uncharge in end_migration().
  	 */
  	pc = lookup_page_cgroup(newpage);
  	if (PageAnon(page))
  		ctype = MEM_CGROUP_CHARGE_TYPE_MAPPED;
  	else if (page_is_file_cache(page))
  		ctype = MEM_CGROUP_CHARGE_TYPE_CACHE;
  	else
  		ctype = MEM_CGROUP_CHARGE_TYPE_SHMEM;
7ec99d621   Johannes Weiner   memcg: unify char...
3174
  	__mem_cgroup_commit_charge(mem, page, 1, pc, ctype);
e8589cc18   KAMEZAWA Hiroyuki   memcg: better mig...
3175
  	return ret;
ae41be374   KAMEZAWA Hiroyuki   bugfix for memory...
3176
  }
8869b8f6e   Hugh Dickins   memcg: memcontrol...
3177

69029cd55   KAMEZAWA Hiroyuki   memcg: remove ref...
3178
  /* remove redundant charge if migration failed*/
01b1ae63c   KAMEZAWA Hiroyuki   memcg: simple mig...
3179
  void mem_cgroup_end_migration(struct mem_cgroup *mem,
50de1dd96   Daisuke Nishimura   memcg: fix memory...
3180
  	struct page *oldpage, struct page *newpage, bool migration_ok)
ae41be374   KAMEZAWA Hiroyuki   bugfix for memory...
3181
  {
ac39cf8cb   akpm@linux-foundation.org   memcg: fix mis-ac...
3182
  	struct page *used, *unused;
01b1ae63c   KAMEZAWA Hiroyuki   memcg: simple mig...
3183
  	struct page_cgroup *pc;
01b1ae63c   KAMEZAWA Hiroyuki   memcg: simple mig...
3184
3185
3186
  
  	if (!mem)
  		return;
ac39cf8cb   akpm@linux-foundation.org   memcg: fix mis-ac...
3187
  	/* blocks rmdir() */
887032670   KAMEZAWA Hiroyuki   cgroup avoid perm...
3188
  	cgroup_exclude_rmdir(&mem->css);
50de1dd96   Daisuke Nishimura   memcg: fix memory...
3189
  	if (!migration_ok) {
ac39cf8cb   akpm@linux-foundation.org   memcg: fix mis-ac...
3190
3191
  		used = oldpage;
  		unused = newpage;
01b1ae63c   KAMEZAWA Hiroyuki   memcg: simple mig...
3192
  	} else {
ac39cf8cb   akpm@linux-foundation.org   memcg: fix mis-ac...
3193
  		used = newpage;
01b1ae63c   KAMEZAWA Hiroyuki   memcg: simple mig...
3194
3195
  		unused = oldpage;
  	}
69029cd55   KAMEZAWA Hiroyuki   memcg: remove ref...
3196
  	/*
ac39cf8cb   akpm@linux-foundation.org   memcg: fix mis-ac...
3197
3198
3199
  	 * We disallowed uncharge of pages under migration because mapcount
  	 * of the page goes down to zero, temporarly.
  	 * Clear the flag and check the page should be charged.
01b1ae63c   KAMEZAWA Hiroyuki   memcg: simple mig...
3200
  	 */
ac39cf8cb   akpm@linux-foundation.org   memcg: fix mis-ac...
3201
3202
3203
3204
  	pc = lookup_page_cgroup(oldpage);
  	lock_page_cgroup(pc);
  	ClearPageCgroupMigration(pc);
  	unlock_page_cgroup(pc);
01b1ae63c   KAMEZAWA Hiroyuki   memcg: simple mig...
3205

ac39cf8cb   akpm@linux-foundation.org   memcg: fix mis-ac...
3206
  	__mem_cgroup_uncharge_common(unused, MEM_CGROUP_CHARGE_TYPE_FORCE);
01b1ae63c   KAMEZAWA Hiroyuki   memcg: simple mig...
3207
  	/*
ac39cf8cb   akpm@linux-foundation.org   memcg: fix mis-ac...
3208
3209
3210
3211
3212
3213
  	 * If a page is a file cache, radix-tree replacement is very atomic
  	 * and we can skip this check. When it was an Anon page, its mapcount
  	 * goes down to 0. But because we added MIGRATION flage, it's not
  	 * uncharged yet. There are several case but page->mapcount check
  	 * and USED bit check in mem_cgroup_uncharge_page() will do enough
  	 * check. (see prepare_charge() also)
69029cd55   KAMEZAWA Hiroyuki   memcg: remove ref...
3214
  	 */
ac39cf8cb   akpm@linux-foundation.org   memcg: fix mis-ac...
3215
3216
  	if (PageAnon(used))
  		mem_cgroup_uncharge_page(used);
887032670   KAMEZAWA Hiroyuki   cgroup avoid perm...
3217
  	/*
ac39cf8cb   akpm@linux-foundation.org   memcg: fix mis-ac...
3218
3219
  	 * At migration, we may charge account against cgroup which has no
  	 * tasks.
887032670   KAMEZAWA Hiroyuki   cgroup avoid perm...
3220
3221
3222
3223
  	 * So, rmdir()->pre_destroy() can be called while we do this charge.
  	 * In that case, we need to call pre_destroy() again. check it here.
  	 */
  	cgroup_release_and_wakeup_rmdir(&mem->css);
ae41be374   KAMEZAWA Hiroyuki   bugfix for memory...
3224
  }
78fb74669   Pavel Emelianov   Memory controller...
3225

f212ad7cf   Daisuke Nishimura   memcg: add memcg ...
3226
3227
3228
3229
3230
3231
3232
3233
3234
3235
3236
3237
3238
3239
3240
3241
3242
3243
3244
3245
3246
3247
3248
3249
3250
3251
3252
3253
3254
3255
3256
3257
3258
3259
3260
3261
3262
3263
3264
3265
3266
3267
3268
3269
3270
3271
  #ifdef CONFIG_DEBUG_VM
  static struct page_cgroup *lookup_page_cgroup_used(struct page *page)
  {
  	struct page_cgroup *pc;
  
  	pc = lookup_page_cgroup(page);
  	if (likely(pc) && PageCgroupUsed(pc))
  		return pc;
  	return NULL;
  }
  
  bool mem_cgroup_bad_page_check(struct page *page)
  {
  	if (mem_cgroup_disabled())
  		return false;
  
  	return lookup_page_cgroup_used(page) != NULL;
  }
  
  void mem_cgroup_print_bad_page(struct page *page)
  {
  	struct page_cgroup *pc;
  
  	pc = lookup_page_cgroup_used(page);
  	if (pc) {
  		int ret = -1;
  		char *path;
  
  		printk(KERN_ALERT "pc:%p pc->flags:%lx pc->mem_cgroup:%p",
  		       pc, pc->flags, pc->mem_cgroup);
  
  		path = kmalloc(PATH_MAX, GFP_KERNEL);
  		if (path) {
  			rcu_read_lock();
  			ret = cgroup_path(pc->mem_cgroup->css.cgroup,
  							path, PATH_MAX);
  			rcu_read_unlock();
  		}
  
  		printk(KERN_CONT "(%s)
  ",
  				(ret < 0) ? "cannot get the path" : path);
  		kfree(path);
  	}
  }
  #endif
8c7c6e34a   KAMEZAWA Hiroyuki   memcg: mem+swap c...
3272
  static DEFINE_MUTEX(set_limit_mutex);
d38d2a758   KOSAKI Motohiro   mm: make mem_cgro...
3273
  static int mem_cgroup_resize_limit(struct mem_cgroup *memcg,
8c7c6e34a   KAMEZAWA Hiroyuki   memcg: mem+swap c...
3274
  				unsigned long long val)
628f42355   KAMEZAWA Hiroyuki   memcg: limit chan...
3275
  {
81d39c20f   KAMEZAWA Hiroyuki   memcg: fix shrink...
3276
  	int retry_count;
3c11ecf44   KAMEZAWA Hiroyuki   memcg: oom kill d...
3277
  	u64 memswlimit, memlimit;
628f42355   KAMEZAWA Hiroyuki   memcg: limit chan...
3278
  	int ret = 0;
81d39c20f   KAMEZAWA Hiroyuki   memcg: fix shrink...
3279
3280
  	int children = mem_cgroup_count_children(memcg);
  	u64 curusage, oldusage;
3c11ecf44   KAMEZAWA Hiroyuki   memcg: oom kill d...
3281
  	int enlarge;
81d39c20f   KAMEZAWA Hiroyuki   memcg: fix shrink...
3282
3283
3284
3285
3286
3287
3288
3289
3290
  
  	/*
  	 * For keeping hierarchical_reclaim simple, how long we should retry
  	 * is depends on callers. We set our retry-count to be function
  	 * of # of children which we should visit in this loop.
  	 */
  	retry_count = MEM_CGROUP_RECLAIM_RETRIES * children;
  
  	oldusage = res_counter_read_u64(&memcg->res, RES_USAGE);
628f42355   KAMEZAWA Hiroyuki   memcg: limit chan...
3291

3c11ecf44   KAMEZAWA Hiroyuki   memcg: oom kill d...
3292
  	enlarge = 0;
8c7c6e34a   KAMEZAWA Hiroyuki   memcg: mem+swap c...
3293
  	while (retry_count) {
628f42355   KAMEZAWA Hiroyuki   memcg: limit chan...
3294
3295
3296
3297
  		if (signal_pending(current)) {
  			ret = -EINTR;
  			break;
  		}
8c7c6e34a   KAMEZAWA Hiroyuki   memcg: mem+swap c...
3298
3299
3300
3301
3302
3303
3304
3305
3306
3307
  		/*
  		 * Rather than hide all in some function, I do this in
  		 * open coded manner. You see what this really does.
  		 * We have to guarantee mem->res.limit < mem->memsw.limit.
  		 */
  		mutex_lock(&set_limit_mutex);
  		memswlimit = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
  		if (memswlimit < val) {
  			ret = -EINVAL;
  			mutex_unlock(&set_limit_mutex);
628f42355   KAMEZAWA Hiroyuki   memcg: limit chan...
3308
3309
  			break;
  		}
3c11ecf44   KAMEZAWA Hiroyuki   memcg: oom kill d...
3310
3311
3312
3313
  
  		memlimit = res_counter_read_u64(&memcg->res, RES_LIMIT);
  		if (memlimit < val)
  			enlarge = 1;
8c7c6e34a   KAMEZAWA Hiroyuki   memcg: mem+swap c...
3314
  		ret = res_counter_set_limit(&memcg->res, val);
22a668d7c   KAMEZAWA Hiroyuki   memcg: fix behavi...
3315
3316
3317
3318
3319
3320
  		if (!ret) {
  			if (memswlimit == val)
  				memcg->memsw_is_minimum = true;
  			else
  				memcg->memsw_is_minimum = false;
  		}
8c7c6e34a   KAMEZAWA Hiroyuki   memcg: mem+swap c...
3321
3322
3323
3324
  		mutex_unlock(&set_limit_mutex);
  
  		if (!ret)
  			break;
aa20d489c   Bob Liu   memcg: code clean...
3325
  		mem_cgroup_hierarchical_reclaim(memcg, NULL, GFP_KERNEL,
0ae5e89c6   Ying Han   memcg: count the ...
3326
3327
  						MEM_CGROUP_RECLAIM_SHRINK,
  						NULL);
81d39c20f   KAMEZAWA Hiroyuki   memcg: fix shrink...
3328
3329
3330
3331
3332
3333
  		curusage = res_counter_read_u64(&memcg->res, RES_USAGE);
  		/* Usage is reduced ? */
    		if (curusage >= oldusage)
  			retry_count--;
  		else
  			oldusage = curusage;
8c7c6e34a   KAMEZAWA Hiroyuki   memcg: mem+swap c...
3334
  	}
3c11ecf44   KAMEZAWA Hiroyuki   memcg: oom kill d...
3335
3336
  	if (!ret && enlarge)
  		memcg_oom_recover(memcg);
14797e236   KOSAKI Motohiro   memcg: add inacti...
3337

8c7c6e34a   KAMEZAWA Hiroyuki   memcg: mem+swap c...
3338
3339
  	return ret;
  }
338c84310   Li Zefan   memcg: remove som...
3340
3341
  static int mem_cgroup_resize_memsw_limit(struct mem_cgroup *memcg,
  					unsigned long long val)
8c7c6e34a   KAMEZAWA Hiroyuki   memcg: mem+swap c...
3342
  {
81d39c20f   KAMEZAWA Hiroyuki   memcg: fix shrink...
3343
  	int retry_count;
3c11ecf44   KAMEZAWA Hiroyuki   memcg: oom kill d...
3344
  	u64 memlimit, memswlimit, oldusage, curusage;
81d39c20f   KAMEZAWA Hiroyuki   memcg: fix shrink...
3345
3346
  	int children = mem_cgroup_count_children(memcg);
  	int ret = -EBUSY;
3c11ecf44   KAMEZAWA Hiroyuki   memcg: oom kill d...
3347
  	int enlarge = 0;
8c7c6e34a   KAMEZAWA Hiroyuki   memcg: mem+swap c...
3348

81d39c20f   KAMEZAWA Hiroyuki   memcg: fix shrink...
3349
3350
3351
  	/* see mem_cgroup_resize_res_limit */
   	retry_count = children * MEM_CGROUP_RECLAIM_RETRIES;
  	oldusage = res_counter_read_u64(&memcg->memsw, RES_USAGE);
8c7c6e34a   KAMEZAWA Hiroyuki   memcg: mem+swap c...
3352
3353
3354
3355
3356
3357
3358
3359
3360
3361
3362
3363
3364
3365
3366
3367
3368
  	while (retry_count) {
  		if (signal_pending(current)) {
  			ret = -EINTR;
  			break;
  		}
  		/*
  		 * Rather than hide all in some function, I do this in
  		 * open coded manner. You see what this really does.
  		 * We have to guarantee mem->res.limit < mem->memsw.limit.
  		 */
  		mutex_lock(&set_limit_mutex);
  		memlimit = res_counter_read_u64(&memcg->res, RES_LIMIT);
  		if (memlimit > val) {
  			ret = -EINVAL;
  			mutex_unlock(&set_limit_mutex);
  			break;
  		}
3c11ecf44   KAMEZAWA Hiroyuki   memcg: oom kill d...
3369
3370
3371
  		memswlimit = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
  		if (memswlimit < val)
  			enlarge = 1;
8c7c6e34a   KAMEZAWA Hiroyuki   memcg: mem+swap c...
3372
  		ret = res_counter_set_limit(&memcg->memsw, val);
22a668d7c   KAMEZAWA Hiroyuki   memcg: fix behavi...
3373
3374
3375
3376
3377
3378
  		if (!ret) {
  			if (memlimit == val)
  				memcg->memsw_is_minimum = true;
  			else
  				memcg->memsw_is_minimum = false;
  		}
8c7c6e34a   KAMEZAWA Hiroyuki   memcg: mem+swap c...
3379
3380
3381
3382
  		mutex_unlock(&set_limit_mutex);
  
  		if (!ret)
  			break;
4e4169535   Balbir Singh   memory controller...
3383
  		mem_cgroup_hierarchical_reclaim(memcg, NULL, GFP_KERNEL,
75822b449   Balbir Singh   memory controller...
3384
  						MEM_CGROUP_RECLAIM_NOSWAP |
0ae5e89c6   Ying Han   memcg: count the ...
3385
3386
  						MEM_CGROUP_RECLAIM_SHRINK,
  						NULL);
8c7c6e34a   KAMEZAWA Hiroyuki   memcg: mem+swap c...
3387
  		curusage = res_counter_read_u64(&memcg->memsw, RES_USAGE);
81d39c20f   KAMEZAWA Hiroyuki   memcg: fix shrink...
3388
  		/* Usage is reduced ? */
8c7c6e34a   KAMEZAWA Hiroyuki   memcg: mem+swap c...
3389
  		if (curusage >= oldusage)
628f42355   KAMEZAWA Hiroyuki   memcg: limit chan...
3390
  			retry_count--;
81d39c20f   KAMEZAWA Hiroyuki   memcg: fix shrink...
3391
3392
  		else
  			oldusage = curusage;
628f42355   KAMEZAWA Hiroyuki   memcg: limit chan...
3393
  	}
3c11ecf44   KAMEZAWA Hiroyuki   memcg: oom kill d...
3394
3395
  	if (!ret && enlarge)
  		memcg_oom_recover(memcg);
628f42355   KAMEZAWA Hiroyuki   memcg: limit chan...
3396
3397
  	return ret;
  }
4e4169535   Balbir Singh   memory controller...
3398
  unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
0ae5e89c6   Ying Han   memcg: count the ...
3399
3400
  					    gfp_t gfp_mask,
  					    unsigned long *total_scanned)
4e4169535   Balbir Singh   memory controller...
3401
3402
3403
3404
3405
3406
  {
  	unsigned long nr_reclaimed = 0;
  	struct mem_cgroup_per_zone *mz, *next_mz = NULL;
  	unsigned long reclaimed;
  	int loop = 0;
  	struct mem_cgroup_tree_per_zone *mctz;
ef8745c1e   KAMEZAWA Hiroyuki   memcg: reduce che...
3407
  	unsigned long long excess;
0ae5e89c6   Ying Han   memcg: count the ...
3408
  	unsigned long nr_scanned;
4e4169535   Balbir Singh   memory controller...
3409
3410
3411
  
  	if (order > 0)
  		return 0;
00918b6ab   KOSAKI Motohiro   memcg: remove nid...
3412
  	mctz = soft_limit_tree_node_zone(zone_to_nid(zone), zone_idx(zone));
4e4169535   Balbir Singh   memory controller...
3413
3414
3415
3416
3417
3418
3419
3420
3421
3422
3423
3424
  	/*
  	 * This loop can run a while, specially if mem_cgroup's continuously
  	 * keep exceeding their soft limit and putting the system under
  	 * pressure
  	 */
  	do {
  		if (next_mz)
  			mz = next_mz;
  		else
  			mz = mem_cgroup_largest_soft_limit_node(mctz);
  		if (!mz)
  			break;
0ae5e89c6   Ying Han   memcg: count the ...
3425
  		nr_scanned = 0;
4e4169535   Balbir Singh   memory controller...
3426
3427
  		reclaimed = mem_cgroup_hierarchical_reclaim(mz->mem, zone,
  						gfp_mask,
0ae5e89c6   Ying Han   memcg: count the ...
3428
3429
  						MEM_CGROUP_RECLAIM_SOFT,
  						&nr_scanned);
4e4169535   Balbir Singh   memory controller...
3430
  		nr_reclaimed += reclaimed;
0ae5e89c6   Ying Han   memcg: count the ...
3431
  		*total_scanned += nr_scanned;
4e4169535   Balbir Singh   memory controller...
3432
3433
3434
3435
3436
3437
3438
3439
3440
3441
3442
3443
3444
3445
3446
3447
3448
3449
3450
3451
3452
3453
  		spin_lock(&mctz->lock);
  
  		/*
  		 * If we failed to reclaim anything from this memory cgroup
  		 * it is time to move on to the next cgroup
  		 */
  		next_mz = NULL;
  		if (!reclaimed) {
  			do {
  				/*
  				 * Loop until we find yet another one.
  				 *
  				 * By the time we get the soft_limit lock
  				 * again, someone might have aded the
  				 * group back on the RB tree. Iterate to
  				 * make sure we get a different mem.
  				 * mem_cgroup_largest_soft_limit_node returns
  				 * NULL if no other cgroup is present on
  				 * the tree
  				 */
  				next_mz =
  				__mem_cgroup_largest_soft_limit_node(mctz);
39cc98f1f   Michal Hocko   memcg: remove poi...
3454
  				if (next_mz == mz)
4e4169535   Balbir Singh   memory controller...
3455
  					css_put(&next_mz->mem->css);
39cc98f1f   Michal Hocko   memcg: remove poi...
3456
  				else /* next_mz == NULL or other memcg */
4e4169535   Balbir Singh   memory controller...
3457
3458
3459
  					break;
  			} while (1);
  		}
4e4169535   Balbir Singh   memory controller...
3460
  		__mem_cgroup_remove_exceeded(mz->mem, mz, mctz);
ef8745c1e   KAMEZAWA Hiroyuki   memcg: reduce che...
3461
  		excess = res_counter_soft_limit_excess(&mz->mem->res);
4e4169535   Balbir Singh   memory controller...
3462
3463
3464
3465
3466
3467
3468
3469
  		/*
  		 * One school of thought says that we should not add
  		 * back the node to the tree if reclaim returns 0.
  		 * But our reclaim could return 0, simply because due
  		 * to priority we are exposing a smaller subset of
  		 * memory to reclaim from. Consider this as a longer
  		 * term TODO.
  		 */
ef8745c1e   KAMEZAWA Hiroyuki   memcg: reduce che...
3470
3471
  		/* If excess == 0, no tree ops */
  		__mem_cgroup_insert_exceeded(mz->mem, mz, mctz, excess);
4e4169535   Balbir Singh   memory controller...
3472
3473
3474
3475
3476
3477
3478
3479
3480
3481
3482
3483
3484
3485
3486
3487
3488
  		spin_unlock(&mctz->lock);
  		css_put(&mz->mem->css);
  		loop++;
  		/*
  		 * Could not reclaim anything and there are no more
  		 * mem cgroups to try or we seem to be looping without
  		 * reclaiming anything.
  		 */
  		if (!nr_reclaimed &&
  			(next_mz == NULL ||
  			loop > MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS))
  			break;
  	} while (!nr_reclaimed);
  	if (next_mz)
  		css_put(&next_mz->mem->css);
  	return nr_reclaimed;
  }
c9b0ed514   KAMEZAWA Hiroyuki   memcg: helper fun...
3489
  /*
cc8475822   KAMEZAWA Hiroyuki   memory cgroup enh...
3490
   * This routine traverse page_cgroup in given list and drop them all.
cc8475822   KAMEZAWA Hiroyuki   memory cgroup enh...
3491
3492
   * *And* this routine doesn't reclaim page itself, just removes page_cgroup.
   */
f817ed485   KAMEZAWA Hiroyuki   memcg: move all a...
3493
  static int mem_cgroup_force_empty_list(struct mem_cgroup *mem,
08e552c69   KAMEZAWA Hiroyuki   memcg: synchroniz...
3494
  				int node, int zid, enum lru_list lru)
cc8475822   KAMEZAWA Hiroyuki   memory cgroup enh...
3495
  {
08e552c69   KAMEZAWA Hiroyuki   memcg: synchroniz...
3496
3497
  	struct zone *zone;
  	struct mem_cgroup_per_zone *mz;
f817ed485   KAMEZAWA Hiroyuki   memcg: move all a...
3498
  	struct page_cgroup *pc, *busy;
08e552c69   KAMEZAWA Hiroyuki   memcg: synchroniz...
3499
  	unsigned long flags, loop;
072c56c13   KAMEZAWA Hiroyuki   per-zone and recl...
3500
  	struct list_head *list;
f817ed485   KAMEZAWA Hiroyuki   memcg: move all a...
3501
  	int ret = 0;
072c56c13   KAMEZAWA Hiroyuki   per-zone and recl...
3502

08e552c69   KAMEZAWA Hiroyuki   memcg: synchroniz...
3503
3504
  	zone = &NODE_DATA(node)->node_zones[zid];
  	mz = mem_cgroup_zoneinfo(mem, node, zid);
b69408e88   Christoph Lameter   vmscan: Use an in...
3505
  	list = &mz->lists[lru];
cc8475822   KAMEZAWA Hiroyuki   memory cgroup enh...
3506

f817ed485   KAMEZAWA Hiroyuki   memcg: move all a...
3507
3508
3509
3510
3511
  	loop = MEM_CGROUP_ZSTAT(mz, lru);
  	/* give some margin against EBUSY etc...*/
  	loop += 256;
  	busy = NULL;
  	while (loop--) {
5564e88ba   Johannes Weiner   memcg: condense p...
3512
  		struct page *page;
f817ed485   KAMEZAWA Hiroyuki   memcg: move all a...
3513
  		ret = 0;
08e552c69   KAMEZAWA Hiroyuki   memcg: synchroniz...
3514
  		spin_lock_irqsave(&zone->lru_lock, flags);
f817ed485   KAMEZAWA Hiroyuki   memcg: move all a...
3515
  		if (list_empty(list)) {
08e552c69   KAMEZAWA Hiroyuki   memcg: synchroniz...
3516
  			spin_unlock_irqrestore(&zone->lru_lock, flags);
52d4b9ac0   KAMEZAWA Hiroyuki   memcg: allocate a...
3517
  			break;
f817ed485   KAMEZAWA Hiroyuki   memcg: move all a...
3518
3519
3520
3521
  		}
  		pc = list_entry(list->prev, struct page_cgroup, lru);
  		if (busy == pc) {
  			list_move(&pc->lru, list);
648bcc771   Thiago Farina   mm/memcontrol.c: ...
3522
  			busy = NULL;
08e552c69   KAMEZAWA Hiroyuki   memcg: synchroniz...
3523
  			spin_unlock_irqrestore(&zone->lru_lock, flags);
f817ed485   KAMEZAWA Hiroyuki   memcg: move all a...
3524
3525
  			continue;
  		}
08e552c69   KAMEZAWA Hiroyuki   memcg: synchroniz...
3526
  		spin_unlock_irqrestore(&zone->lru_lock, flags);
f817ed485   KAMEZAWA Hiroyuki   memcg: move all a...
3527

6b3ae58ef   Johannes Weiner   memcg: remove dir...
3528
  		page = lookup_cgroup_page(pc);
5564e88ba   Johannes Weiner   memcg: condense p...
3529
3530
  
  		ret = mem_cgroup_move_parent(page, pc, mem, GFP_KERNEL);
f817ed485   KAMEZAWA Hiroyuki   memcg: move all a...
3531
  		if (ret == -ENOMEM)
52d4b9ac0   KAMEZAWA Hiroyuki   memcg: allocate a...
3532
  			break;
f817ed485   KAMEZAWA Hiroyuki   memcg: move all a...
3533
3534
3535
3536
3537
3538
3539
  
  		if (ret == -EBUSY || ret == -EINVAL) {
  			/* found lock contention or "pc" is obsolete. */
  			busy = pc;
  			cond_resched();
  		} else
  			busy = NULL;
cc8475822   KAMEZAWA Hiroyuki   memory cgroup enh...
3540
  	}
08e552c69   KAMEZAWA Hiroyuki   memcg: synchroniz...
3541

f817ed485   KAMEZAWA Hiroyuki   memcg: move all a...
3542
3543
3544
  	if (!ret && !list_empty(list))
  		return -EBUSY;
  	return ret;
cc8475822   KAMEZAWA Hiroyuki   memory cgroup enh...
3545
3546
3547
3548
3549
3550
  }
  
  /*
   * make mem_cgroup's charge to be 0 if there is no task.
   * This enables deleting this mem_cgroup.
   */
c1e862c1f   KAMEZAWA Hiroyuki   memcg: new force_...
3551
  static int mem_cgroup_force_empty(struct mem_cgroup *mem, bool free_all)
cc8475822   KAMEZAWA Hiroyuki   memory cgroup enh...
3552
  {
f817ed485   KAMEZAWA Hiroyuki   memcg: move all a...
3553
3554
3555
  	int ret;
  	int node, zid, shrink;
  	int nr_retries = MEM_CGROUP_RECLAIM_RETRIES;
c1e862c1f   KAMEZAWA Hiroyuki   memcg: new force_...
3556
  	struct cgroup *cgrp = mem->css.cgroup;
8869b8f6e   Hugh Dickins   memcg: memcontrol...
3557

cc8475822   KAMEZAWA Hiroyuki   memory cgroup enh...
3558
  	css_get(&mem->css);
f817ed485   KAMEZAWA Hiroyuki   memcg: move all a...
3559
3560
  
  	shrink = 0;
c1e862c1f   KAMEZAWA Hiroyuki   memcg: new force_...
3561
3562
3563
  	/* should free all ? */
  	if (free_all)
  		goto try_to_free;
f817ed485   KAMEZAWA Hiroyuki   memcg: move all a...
3564
  move_account:
fce664775   Daisuke Nishimura   memcg: ensure lis...
3565
  	do {
f817ed485   KAMEZAWA Hiroyuki   memcg: move all a...
3566
  		ret = -EBUSY;
c1e862c1f   KAMEZAWA Hiroyuki   memcg: new force_...
3567
3568
3569
3570
  		if (cgroup_task_count(cgrp) || !list_empty(&cgrp->children))
  			goto out;
  		ret = -EINTR;
  		if (signal_pending(current))
cc8475822   KAMEZAWA Hiroyuki   memory cgroup enh...
3571
  			goto out;
52d4b9ac0   KAMEZAWA Hiroyuki   memcg: allocate a...
3572
3573
  		/* This is for making all *used* pages to be on LRU. */
  		lru_add_drain_all();
d38144b7a   Michal Hocko   memcg: unify sync...
3574
  		drain_all_stock_sync(mem);
f817ed485   KAMEZAWA Hiroyuki   memcg: move all a...
3575
  		ret = 0;
32047e2a8   KAMEZAWA Hiroyuki   memcg: avoid lock...
3576
  		mem_cgroup_start_move(mem);
299b4eaa3   KAMEZAWA Hiroyuki   memcg: NULL point...
3577
  		for_each_node_state(node, N_HIGH_MEMORY) {
f817ed485   KAMEZAWA Hiroyuki   memcg: move all a...
3578
  			for (zid = 0; !ret && zid < MAX_NR_ZONES; zid++) {
b69408e88   Christoph Lameter   vmscan: Use an in...
3579
  				enum lru_list l;
f817ed485   KAMEZAWA Hiroyuki   memcg: move all a...
3580
3581
  				for_each_lru(l) {
  					ret = mem_cgroup_force_empty_list(mem,
08e552c69   KAMEZAWA Hiroyuki   memcg: synchroniz...
3582
  							node, zid, l);
f817ed485   KAMEZAWA Hiroyuki   memcg: move all a...
3583
3584
3585
  					if (ret)
  						break;
  				}
1ecaab2bd   KAMEZAWA Hiroyuki   per-zone and recl...
3586
  			}
f817ed485   KAMEZAWA Hiroyuki   memcg: move all a...
3587
3588
3589
  			if (ret)
  				break;
  		}
32047e2a8   KAMEZAWA Hiroyuki   memcg: avoid lock...
3590
  		mem_cgroup_end_move(mem);
3c11ecf44   KAMEZAWA Hiroyuki   memcg: oom kill d...
3591
  		memcg_oom_recover(mem);
f817ed485   KAMEZAWA Hiroyuki   memcg: move all a...
3592
3593
3594
  		/* it seems parent cgroup doesn't have enough mem */
  		if (ret == -ENOMEM)
  			goto try_to_free;
52d4b9ac0   KAMEZAWA Hiroyuki   memcg: allocate a...
3595
  		cond_resched();
fce664775   Daisuke Nishimura   memcg: ensure lis...
3596
3597
  	/* "ret" should also be checked to ensure all lists are empty. */
  	} while (mem->res.usage > 0 || ret);
cc8475822   KAMEZAWA Hiroyuki   memory cgroup enh...
3598
3599
3600
  out:
  	css_put(&mem->css);
  	return ret;
f817ed485   KAMEZAWA Hiroyuki   memcg: move all a...
3601
3602
  
  try_to_free:
c1e862c1f   KAMEZAWA Hiroyuki   memcg: new force_...
3603
3604
  	/* returns EBUSY if there is a task or if we come here twice. */
  	if (cgroup_task_count(cgrp) || !list_empty(&cgrp->children) || shrink) {
f817ed485   KAMEZAWA Hiroyuki   memcg: move all a...
3605
3606
3607
  		ret = -EBUSY;
  		goto out;
  	}
c1e862c1f   KAMEZAWA Hiroyuki   memcg: new force_...
3608
3609
  	/* we call try-to-free pages for make this cgroup empty */
  	lru_add_drain_all();
f817ed485   KAMEZAWA Hiroyuki   memcg: move all a...
3610
3611
3612
3613
  	/* try to free all pages in this cgroup */
  	shrink = 1;
  	while (nr_retries && mem->res.usage > 0) {
  		int progress;
c1e862c1f   KAMEZAWA Hiroyuki   memcg: new force_...
3614
3615
3616
3617
3618
  
  		if (signal_pending(current)) {
  			ret = -EINTR;
  			goto out;
  		}
a7885eb8a   KOSAKI Motohiro   memcg: swappiness
3619
  		progress = try_to_free_mem_cgroup_pages(mem, GFP_KERNEL,
185efc0f9   Johannes Weiner   memcg: Revert "me...
3620
  						false);
c1e862c1f   KAMEZAWA Hiroyuki   memcg: new force_...
3621
  		if (!progress) {
f817ed485   KAMEZAWA Hiroyuki   memcg: move all a...
3622
  			nr_retries--;
c1e862c1f   KAMEZAWA Hiroyuki   memcg: new force_...
3623
  			/* maybe some writeback is necessary */
8aa7e847d   Jens Axboe   Fix congestion_wa...
3624
  			congestion_wait(BLK_RW_ASYNC, HZ/10);
c1e862c1f   KAMEZAWA Hiroyuki   memcg: new force_...
3625
  		}
f817ed485   KAMEZAWA Hiroyuki   memcg: move all a...
3626
3627
  
  	}
08e552c69   KAMEZAWA Hiroyuki   memcg: synchroniz...
3628
  	lru_add_drain();
f817ed485   KAMEZAWA Hiroyuki   memcg: move all a...
3629
  	/* try move_account...there may be some *locked* pages. */
fce664775   Daisuke Nishimura   memcg: ensure lis...
3630
  	goto move_account;
cc8475822   KAMEZAWA Hiroyuki   memory cgroup enh...
3631
  }
c1e862c1f   KAMEZAWA Hiroyuki   memcg: new force_...
3632
3633
3634
3635
  int mem_cgroup_force_empty_write(struct cgroup *cont, unsigned int event)
  {
  	return mem_cgroup_force_empty(mem_cgroup_from_cont(cont), true);
  }
18f59ea7d   Balbir Singh   memcg: memory cgr...
3636
3637
3638
3639
3640
3641
3642
3643
3644
3645
3646
3647
3648
3649
3650
3651
3652
3653
  static u64 mem_cgroup_hierarchy_read(struct cgroup *cont, struct cftype *cft)
  {
  	return mem_cgroup_from_cont(cont)->use_hierarchy;
  }
  
  static int mem_cgroup_hierarchy_write(struct cgroup *cont, struct cftype *cft,
  					u64 val)
  {
  	int retval = 0;
  	struct mem_cgroup *mem = mem_cgroup_from_cont(cont);
  	struct cgroup *parent = cont->parent;
  	struct mem_cgroup *parent_mem = NULL;
  
  	if (parent)
  		parent_mem = mem_cgroup_from_cont(parent);
  
  	cgroup_lock();
  	/*
af901ca18   André Goddard Rosa   tree-wide: fix as...
3654
  	 * If parent's use_hierarchy is set, we can't make any modifications
18f59ea7d   Balbir Singh   memcg: memory cgr...
3655
3656
3657
3658
3659
3660
3661
3662
3663
3664
3665
3666
3667
3668
3669
3670
3671
3672
  	 * in the child subtrees. If it is unset, then the change can
  	 * occur, provided the current cgroup has no children.
  	 *
  	 * For the root cgroup, parent_mem is NULL, we allow value to be
  	 * set if there are no children.
  	 */
  	if ((!parent_mem || !parent_mem->use_hierarchy) &&
  				(val == 1 || val == 0)) {
  		if (list_empty(&cont->children))
  			mem->use_hierarchy = val;
  		else
  			retval = -EBUSY;
  	} else
  		retval = -EINVAL;
  	cgroup_unlock();
  
  	return retval;
  }
0c3e73e84   Balbir Singh   memcg: improve re...
3673

7a159cc9d   Johannes Weiner   memcg: use native...
3674
3675
  static unsigned long mem_cgroup_recursive_stat(struct mem_cgroup *mem,
  					       enum mem_cgroup_stat_index idx)
0c3e73e84   Balbir Singh   memcg: improve re...
3676
  {
7d74b06f2   KAMEZAWA Hiroyuki   memcg: use for_ea...
3677
  	struct mem_cgroup *iter;
7a159cc9d   Johannes Weiner   memcg: use native...
3678
  	long val = 0;
0c3e73e84   Balbir Singh   memcg: improve re...
3679

7a159cc9d   Johannes Weiner   memcg: use native...
3680
  	/* Per-cpu values can be negative, use a signed accumulator */
7d74b06f2   KAMEZAWA Hiroyuki   memcg: use for_ea...
3681
3682
3683
3684
3685
3686
  	for_each_mem_cgroup_tree(iter, mem)
  		val += mem_cgroup_read_stat(iter, idx);
  
  	if (val < 0) /* race ? */
  		val = 0;
  	return val;
0c3e73e84   Balbir Singh   memcg: improve re...
3687
  }
104f39284   Kirill A. Shutemov   memcg: extract me...
3688
3689
  static inline u64 mem_cgroup_usage(struct mem_cgroup *mem, bool swap)
  {
7d74b06f2   KAMEZAWA Hiroyuki   memcg: use for_ea...
3690
  	u64 val;
104f39284   Kirill A. Shutemov   memcg: extract me...
3691
3692
3693
3694
3695
3696
3697
  
  	if (!mem_cgroup_is_root(mem)) {
  		if (!swap)
  			return res_counter_read_u64(&mem->res, RES_USAGE);
  		else
  			return res_counter_read_u64(&mem->memsw, RES_USAGE);
  	}
7a159cc9d   Johannes Weiner   memcg: use native...
3698
3699
  	val = mem_cgroup_recursive_stat(mem, MEM_CGROUP_STAT_CACHE);
  	val += mem_cgroup_recursive_stat(mem, MEM_CGROUP_STAT_RSS);
104f39284   Kirill A. Shutemov   memcg: extract me...
3700

7d74b06f2   KAMEZAWA Hiroyuki   memcg: use for_ea...
3701
  	if (swap)
7a159cc9d   Johannes Weiner   memcg: use native...
3702
  		val += mem_cgroup_recursive_stat(mem, MEM_CGROUP_STAT_SWAPOUT);
104f39284   Kirill A. Shutemov   memcg: extract me...
3703
3704
3705
  
  	return val << PAGE_SHIFT;
  }
2c3daa722   Paul Menage   CGroup API files:...
3706
  static u64 mem_cgroup_read(struct cgroup *cont, struct cftype *cft)
8cdea7c05   Balbir Singh   Memory controller...
3707
  {
8c7c6e34a   KAMEZAWA Hiroyuki   memcg: mem+swap c...
3708
  	struct mem_cgroup *mem = mem_cgroup_from_cont(cont);
104f39284   Kirill A. Shutemov   memcg: extract me...
3709
  	u64 val;
8c7c6e34a   KAMEZAWA Hiroyuki   memcg: mem+swap c...
3710
3711
3712
3713
3714
3715
  	int type, name;
  
  	type = MEMFILE_TYPE(cft->private);
  	name = MEMFILE_ATTR(cft->private);
  	switch (type) {
  	case _MEM:
104f39284   Kirill A. Shutemov   memcg: extract me...
3716
3717
3718
  		if (name == RES_USAGE)
  			val = mem_cgroup_usage(mem, false);
  		else
0c3e73e84   Balbir Singh   memcg: improve re...
3719
  			val = res_counter_read_u64(&mem->res, name);
8c7c6e34a   KAMEZAWA Hiroyuki   memcg: mem+swap c...
3720
3721
  		break;
  	case _MEMSWAP:
104f39284   Kirill A. Shutemov   memcg: extract me...
3722
3723
3724
  		if (name == RES_USAGE)
  			val = mem_cgroup_usage(mem, true);
  		else
0c3e73e84   Balbir Singh   memcg: improve re...
3725
  			val = res_counter_read_u64(&mem->memsw, name);
8c7c6e34a   KAMEZAWA Hiroyuki   memcg: mem+swap c...
3726
3727
3728
3729
3730
3731
  		break;
  	default:
  		BUG();
  		break;
  	}
  	return val;
8cdea7c05   Balbir Singh   Memory controller...
3732
  }
628f42355   KAMEZAWA Hiroyuki   memcg: limit chan...
3733
3734
3735
3736
  /*
   * The user of this function is...
   * RES_LIMIT.
   */
856c13aa1   Paul Menage   cgroup files: con...
3737
3738
  static int mem_cgroup_write(struct cgroup *cont, struct cftype *cft,
  			    const char *buffer)
8cdea7c05   Balbir Singh   Memory controller...
3739
  {
628f42355   KAMEZAWA Hiroyuki   memcg: limit chan...
3740
  	struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
8c7c6e34a   KAMEZAWA Hiroyuki   memcg: mem+swap c...
3741
  	int type, name;
628f42355   KAMEZAWA Hiroyuki   memcg: limit chan...
3742
3743
  	unsigned long long val;
  	int ret;
8c7c6e34a   KAMEZAWA Hiroyuki   memcg: mem+swap c...
3744
3745
3746
  	type = MEMFILE_TYPE(cft->private);
  	name = MEMFILE_ATTR(cft->private);
  	switch (name) {
628f42355   KAMEZAWA Hiroyuki   memcg: limit chan...
3747
  	case RES_LIMIT:
4b3bde4c9   Balbir Singh   memcg: remove the...
3748
3749
3750
3751
  		if (mem_cgroup_is_root(memcg)) { /* Can't set limit on root */
  			ret = -EINVAL;
  			break;
  		}
628f42355   KAMEZAWA Hiroyuki   memcg: limit chan...
3752
3753
  		/* This function does all necessary parse...reuse it */
  		ret = res_counter_memparse_write_strategy(buffer, &val);
8c7c6e34a   KAMEZAWA Hiroyuki   memcg: mem+swap c...
3754
3755
3756
  		if (ret)
  			break;
  		if (type == _MEM)
628f42355   KAMEZAWA Hiroyuki   memcg: limit chan...
3757
  			ret = mem_cgroup_resize_limit(memcg, val);
8c7c6e34a   KAMEZAWA Hiroyuki   memcg: mem+swap c...
3758
3759
  		else
  			ret = mem_cgroup_resize_memsw_limit(memcg, val);
628f42355   KAMEZAWA Hiroyuki   memcg: limit chan...
3760
  		break;
296c81d89   Balbir Singh   memory controller...
3761
3762
3763
3764
3765
3766
3767
3768
3769
3770
3771
3772
3773
3774
  	case RES_SOFT_LIMIT:
  		ret = res_counter_memparse_write_strategy(buffer, &val);
  		if (ret)
  			break;
  		/*
  		 * For memsw, soft limits are hard to implement in terms
  		 * of semantics, for now, we support soft limits for
  		 * control without swap
  		 */
  		if (type == _MEM)
  			ret = res_counter_set_soft_limit(&memcg->res, val);
  		else
  			ret = -EINVAL;
  		break;
628f42355   KAMEZAWA Hiroyuki   memcg: limit chan...
3775
3776
3777
3778
3779
  	default:
  		ret = -EINVAL; /* should be BUG() ? */
  		break;
  	}
  	return ret;
8cdea7c05   Balbir Singh   Memory controller...
3780
  }
fee7b548e   KAMEZAWA Hiroyuki   memcg: show real ...
3781
3782
3783
3784
3785
3786
3787
3788
3789
3790
3791
3792
3793
3794
3795
3796
3797
3798
3799
3800
3801
3802
3803
3804
3805
3806
3807
  static void memcg_get_hierarchical_limit(struct mem_cgroup *memcg,
  		unsigned long long *mem_limit, unsigned long long *memsw_limit)
  {
  	struct cgroup *cgroup;
  	unsigned long long min_limit, min_memsw_limit, tmp;
  
  	min_limit = res_counter_read_u64(&memcg->res, RES_LIMIT);
  	min_memsw_limit = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
  	cgroup = memcg->css.cgroup;
  	if (!memcg->use_hierarchy)
  		goto out;
  
  	while (cgroup->parent) {
  		cgroup = cgroup->parent;
  		memcg = mem_cgroup_from_cont(cgroup);
  		if (!memcg->use_hierarchy)
  			break;
  		tmp = res_counter_read_u64(&memcg->res, RES_LIMIT);
  		min_limit = min(min_limit, tmp);
  		tmp = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
  		min_memsw_limit = min(min_memsw_limit, tmp);
  	}
  out:
  	*mem_limit = min_limit;
  	*memsw_limit = min_memsw_limit;
  	return;
  }
29f2a4dac   Pavel Emelyanov   memcgroup: implem...
3808
  static int mem_cgroup_reset(struct cgroup *cont, unsigned int event)
c84872e16   Pavel Emelyanov   memcgroup: add th...
3809
3810
  {
  	struct mem_cgroup *mem;
8c7c6e34a   KAMEZAWA Hiroyuki   memcg: mem+swap c...
3811
  	int type, name;
c84872e16   Pavel Emelyanov   memcgroup: add th...
3812
3813
  
  	mem = mem_cgroup_from_cont(cont);
8c7c6e34a   KAMEZAWA Hiroyuki   memcg: mem+swap c...
3814
3815
3816
  	type = MEMFILE_TYPE(event);
  	name = MEMFILE_ATTR(event);
  	switch (name) {
29f2a4dac   Pavel Emelyanov   memcgroup: implem...
3817
  	case RES_MAX_USAGE:
8c7c6e34a   KAMEZAWA Hiroyuki   memcg: mem+swap c...
3818
3819
3820
3821
  		if (type == _MEM)
  			res_counter_reset_max(&mem->res);
  		else
  			res_counter_reset_max(&mem->memsw);
29f2a4dac   Pavel Emelyanov   memcgroup: implem...
3822
3823
  		break;
  	case RES_FAILCNT:
8c7c6e34a   KAMEZAWA Hiroyuki   memcg: mem+swap c...
3824
3825
3826
3827
  		if (type == _MEM)
  			res_counter_reset_failcnt(&mem->res);
  		else
  			res_counter_reset_failcnt(&mem->memsw);
29f2a4dac   Pavel Emelyanov   memcgroup: implem...
3828
3829
  		break;
  	}
f64c3f549   Balbir Singh   memory controller...
3830

85cc59db1   Pavel Emelyanov   memcgroup: use tr...
3831
  	return 0;
c84872e16   Pavel Emelyanov   memcgroup: add th...
3832
  }
7dc74be03   Daisuke Nishimura   memcg: add interf...
3833
3834
3835
3836
3837
  static u64 mem_cgroup_move_charge_read(struct cgroup *cgrp,
  					struct cftype *cft)
  {
  	return mem_cgroup_from_cont(cgrp)->move_charge_at_immigrate;
  }
024914477   Daisuke Nishimura   memcg: move charg...
3838
  #ifdef CONFIG_MMU
7dc74be03   Daisuke Nishimura   memcg: add interf...
3839
3840
3841
3842
3843
3844
3845
3846
3847
3848
3849
3850
3851
3852
3853
3854
3855
3856
  static int mem_cgroup_move_charge_write(struct cgroup *cgrp,
  					struct cftype *cft, u64 val)
  {
  	struct mem_cgroup *mem = mem_cgroup_from_cont(cgrp);
  
  	if (val >= (1 << NR_MOVE_TYPE))
  		return -EINVAL;
  	/*
  	 * We check this value several times in both in can_attach() and
  	 * attach(), so we need cgroup lock to prevent this value from being
  	 * inconsistent.
  	 */
  	cgroup_lock();
  	mem->move_charge_at_immigrate = val;
  	cgroup_unlock();
  
  	return 0;
  }
024914477   Daisuke Nishimura   memcg: move charg...
3857
3858
3859
3860
3861
3862
3863
  #else
  static int mem_cgroup_move_charge_write(struct cgroup *cgrp,
  					struct cftype *cft, u64 val)
  {
  	return -ENOSYS;
  }
  #endif
7dc74be03   Daisuke Nishimura   memcg: add interf...
3864

14067bb3e   KAMEZAWA Hiroyuki   memcg: hierarchic...
3865
3866
3867
3868
3869
  
  /* For read statistics */
  enum {
  	MCS_CACHE,
  	MCS_RSS,
d8046582d   KAMEZAWA Hiroyuki   memcg: make memcg...
3870
  	MCS_FILE_MAPPED,
14067bb3e   KAMEZAWA Hiroyuki   memcg: hierarchic...
3871
3872
  	MCS_PGPGIN,
  	MCS_PGPGOUT,
1dd3a2732   Daisuke Nishimura   memcg: show swap ...
3873
  	MCS_SWAP,
456f998ec   Ying Han   memcg: add the pa...
3874
3875
  	MCS_PGFAULT,
  	MCS_PGMAJFAULT,
14067bb3e   KAMEZAWA Hiroyuki   memcg: hierarchic...
3876
3877
3878
3879
3880
3881
3882
3883
3884
3885
  	MCS_INACTIVE_ANON,
  	MCS_ACTIVE_ANON,
  	MCS_INACTIVE_FILE,
  	MCS_ACTIVE_FILE,
  	MCS_UNEVICTABLE,
  	NR_MCS_STAT,
  };
  
  struct mcs_total_stat {
  	s64 stat[NR_MCS_STAT];
d2ceb9b7d   KAMEZAWA Hiroyuki   memory cgroup enh...
3886
  };
14067bb3e   KAMEZAWA Hiroyuki   memcg: hierarchic...
3887
3888
3889
3890
3891
3892
  struct {
  	char *local_name;
  	char *total_name;
  } memcg_stat_strings[NR_MCS_STAT] = {
  	{"cache", "total_cache"},
  	{"rss", "total_rss"},
d69b042f3   Balbir Singh   memcg: add file-b...
3893
  	{"mapped_file", "total_mapped_file"},
14067bb3e   KAMEZAWA Hiroyuki   memcg: hierarchic...
3894
3895
  	{"pgpgin", "total_pgpgin"},
  	{"pgpgout", "total_pgpgout"},
1dd3a2732   Daisuke Nishimura   memcg: show swap ...
3896
  	{"swap", "total_swap"},
456f998ec   Ying Han   memcg: add the pa...
3897
3898
  	{"pgfault", "total_pgfault"},
  	{"pgmajfault", "total_pgmajfault"},
14067bb3e   KAMEZAWA Hiroyuki   memcg: hierarchic...
3899
3900
3901
3902
3903
3904
  	{"inactive_anon", "total_inactive_anon"},
  	{"active_anon", "total_active_anon"},
  	{"inactive_file", "total_inactive_file"},
  	{"active_file", "total_active_file"},
  	{"unevictable", "total_unevictable"}
  };
7d74b06f2   KAMEZAWA Hiroyuki   memcg: use for_ea...
3905
3906
  static void
  mem_cgroup_get_local_stat(struct mem_cgroup *mem, struct mcs_total_stat *s)
14067bb3e   KAMEZAWA Hiroyuki   memcg: hierarchic...
3907
  {
14067bb3e   KAMEZAWA Hiroyuki   memcg: hierarchic...
3908
3909
3910
  	s64 val;
  
  	/* per cpu stat */
c62b1a3b3   KAMEZAWA Hiroyuki   memcg: use generi...
3911
  	val = mem_cgroup_read_stat(mem, MEM_CGROUP_STAT_CACHE);
14067bb3e   KAMEZAWA Hiroyuki   memcg: hierarchic...
3912
  	s->stat[MCS_CACHE] += val * PAGE_SIZE;
c62b1a3b3   KAMEZAWA Hiroyuki   memcg: use generi...
3913
  	val = mem_cgroup_read_stat(mem, MEM_CGROUP_STAT_RSS);
14067bb3e   KAMEZAWA Hiroyuki   memcg: hierarchic...
3914
  	s->stat[MCS_RSS] += val * PAGE_SIZE;
c62b1a3b3   KAMEZAWA Hiroyuki   memcg: use generi...
3915
  	val = mem_cgroup_read_stat(mem, MEM_CGROUP_STAT_FILE_MAPPED);
d8046582d   KAMEZAWA Hiroyuki   memcg: make memcg...
3916
  	s->stat[MCS_FILE_MAPPED] += val * PAGE_SIZE;
e9f8974f2   Johannes Weiner   memcg: break out ...
3917
  	val = mem_cgroup_read_events(mem, MEM_CGROUP_EVENTS_PGPGIN);
14067bb3e   KAMEZAWA Hiroyuki   memcg: hierarchic...
3918
  	s->stat[MCS_PGPGIN] += val;
e9f8974f2   Johannes Weiner   memcg: break out ...
3919
  	val = mem_cgroup_read_events(mem, MEM_CGROUP_EVENTS_PGPGOUT);
14067bb3e   KAMEZAWA Hiroyuki   memcg: hierarchic...
3920
  	s->stat[MCS_PGPGOUT] += val;
1dd3a2732   Daisuke Nishimura   memcg: show swap ...
3921
  	if (do_swap_account) {
c62b1a3b3   KAMEZAWA Hiroyuki   memcg: use generi...
3922
  		val = mem_cgroup_read_stat(mem, MEM_CGROUP_STAT_SWAPOUT);
1dd3a2732   Daisuke Nishimura   memcg: show swap ...
3923
3924
  		s->stat[MCS_SWAP] += val * PAGE_SIZE;
  	}
456f998ec   Ying Han   memcg: add the pa...
3925
3926
3927
3928
  	val = mem_cgroup_read_events(mem, MEM_CGROUP_EVENTS_PGFAULT);
  	s->stat[MCS_PGFAULT] += val;
  	val = mem_cgroup_read_events(mem, MEM_CGROUP_EVENTS_PGMAJFAULT);
  	s->stat[MCS_PGMAJFAULT] += val;
14067bb3e   KAMEZAWA Hiroyuki   memcg: hierarchic...
3929
3930
  
  	/* per zone stat */
bb2a0de92   KAMEZAWA Hiroyuki   memcg: consolidat...
3931
  	val = mem_cgroup_nr_lru_pages(mem, BIT(LRU_INACTIVE_ANON));
14067bb3e   KAMEZAWA Hiroyuki   memcg: hierarchic...
3932
  	s->stat[MCS_INACTIVE_ANON] += val * PAGE_SIZE;
bb2a0de92   KAMEZAWA Hiroyuki   memcg: consolidat...
3933
  	val = mem_cgroup_nr_lru_pages(mem, BIT(LRU_ACTIVE_ANON));
14067bb3e   KAMEZAWA Hiroyuki   memcg: hierarchic...
3934
  	s->stat[MCS_ACTIVE_ANON] += val * PAGE_SIZE;
bb2a0de92   KAMEZAWA Hiroyuki   memcg: consolidat...
3935
  	val = mem_cgroup_nr_lru_pages(mem, BIT(LRU_INACTIVE_FILE));
14067bb3e   KAMEZAWA Hiroyuki   memcg: hierarchic...
3936
  	s->stat[MCS_INACTIVE_FILE] += val * PAGE_SIZE;
bb2a0de92   KAMEZAWA Hiroyuki   memcg: consolidat...
3937
  	val = mem_cgroup_nr_lru_pages(mem, BIT(LRU_ACTIVE_FILE));
14067bb3e   KAMEZAWA Hiroyuki   memcg: hierarchic...
3938
  	s->stat[MCS_ACTIVE_FILE] += val * PAGE_SIZE;
bb2a0de92   KAMEZAWA Hiroyuki   memcg: consolidat...
3939
  	val = mem_cgroup_nr_lru_pages(mem, BIT(LRU_UNEVICTABLE));
14067bb3e   KAMEZAWA Hiroyuki   memcg: hierarchic...
3940
  	s->stat[MCS_UNEVICTABLE] += val * PAGE_SIZE;
14067bb3e   KAMEZAWA Hiroyuki   memcg: hierarchic...
3941
3942
3943
3944
3945
  }
  
  static void
  mem_cgroup_get_total_stat(struct mem_cgroup *mem, struct mcs_total_stat *s)
  {
7d74b06f2   KAMEZAWA Hiroyuki   memcg: use for_ea...
3946
3947
3948
3949
  	struct mem_cgroup *iter;
  
  	for_each_mem_cgroup_tree(iter, mem)
  		mem_cgroup_get_local_stat(iter, s);
14067bb3e   KAMEZAWA Hiroyuki   memcg: hierarchic...
3950
  }
406eb0c9b   Ying Han   memcg: add memory...
3951
3952
3953
3954
3955
3956
3957
3958
  #ifdef CONFIG_NUMA
  static int mem_control_numa_stat_show(struct seq_file *m, void *arg)
  {
  	int nid;
  	unsigned long total_nr, file_nr, anon_nr, unevictable_nr;
  	unsigned long node_nr;
  	struct cgroup *cont = m->private;
  	struct mem_cgroup *mem_cont = mem_cgroup_from_cont(cont);
bb2a0de92   KAMEZAWA Hiroyuki   memcg: consolidat...
3959
  	total_nr = mem_cgroup_nr_lru_pages(mem_cont, LRU_ALL);
406eb0c9b   Ying Han   memcg: add memory...
3960
3961
  	seq_printf(m, "total=%lu", total_nr);
  	for_each_node_state(nid, N_HIGH_MEMORY) {
bb2a0de92   KAMEZAWA Hiroyuki   memcg: consolidat...
3962
  		node_nr = mem_cgroup_node_nr_lru_pages(mem_cont, nid, LRU_ALL);
406eb0c9b   Ying Han   memcg: add memory...
3963
3964
3965
3966
  		seq_printf(m, " N%d=%lu", nid, node_nr);
  	}
  	seq_putc(m, '
  ');
bb2a0de92   KAMEZAWA Hiroyuki   memcg: consolidat...
3967
  	file_nr = mem_cgroup_nr_lru_pages(mem_cont, LRU_ALL_FILE);
406eb0c9b   Ying Han   memcg: add memory...
3968
3969
  	seq_printf(m, "file=%lu", file_nr);
  	for_each_node_state(nid, N_HIGH_MEMORY) {
bb2a0de92   KAMEZAWA Hiroyuki   memcg: consolidat...
3970
3971
  		node_nr = mem_cgroup_node_nr_lru_pages(mem_cont, nid,
  				LRU_ALL_FILE);
406eb0c9b   Ying Han   memcg: add memory...
3972
3973
3974
3975
  		seq_printf(m, " N%d=%lu", nid, node_nr);
  	}
  	seq_putc(m, '
  ');
bb2a0de92   KAMEZAWA Hiroyuki   memcg: consolidat...
3976
  	anon_nr = mem_cgroup_nr_lru_pages(mem_cont, LRU_ALL_ANON);
406eb0c9b   Ying Han   memcg: add memory...
3977
3978
  	seq_printf(m, "anon=%lu", anon_nr);
  	for_each_node_state(nid, N_HIGH_MEMORY) {
bb2a0de92   KAMEZAWA Hiroyuki   memcg: consolidat...
3979
3980
  		node_nr = mem_cgroup_node_nr_lru_pages(mem_cont, nid,
  				LRU_ALL_ANON);
406eb0c9b   Ying Han   memcg: add memory...
3981
3982
3983
3984
  		seq_printf(m, " N%d=%lu", nid, node_nr);
  	}
  	seq_putc(m, '
  ');
bb2a0de92   KAMEZAWA Hiroyuki   memcg: consolidat...
3985
  	unevictable_nr = mem_cgroup_nr_lru_pages(mem_cont, BIT(LRU_UNEVICTABLE));
406eb0c9b   Ying Han   memcg: add memory...
3986
3987
  	seq_printf(m, "unevictable=%lu", unevictable_nr);
  	for_each_node_state(nid, N_HIGH_MEMORY) {
bb2a0de92   KAMEZAWA Hiroyuki   memcg: consolidat...
3988
3989
  		node_nr = mem_cgroup_node_nr_lru_pages(mem_cont, nid,
  				BIT(LRU_UNEVICTABLE));
406eb0c9b   Ying Han   memcg: add memory...
3990
3991
3992
3993
3994
3995
3996
  		seq_printf(m, " N%d=%lu", nid, node_nr);
  	}
  	seq_putc(m, '
  ');
  	return 0;
  }
  #endif /* CONFIG_NUMA */
c64745cf0   Paul Menage   CGroup API files:...
3997
3998
  static int mem_control_stat_show(struct cgroup *cont, struct cftype *cft,
  				 struct cgroup_map_cb *cb)
d2ceb9b7d   KAMEZAWA Hiroyuki   memory cgroup enh...
3999
  {
d2ceb9b7d   KAMEZAWA Hiroyuki   memory cgroup enh...
4000
  	struct mem_cgroup *mem_cont = mem_cgroup_from_cont(cont);
14067bb3e   KAMEZAWA Hiroyuki   memcg: hierarchic...
4001
  	struct mcs_total_stat mystat;
d2ceb9b7d   KAMEZAWA Hiroyuki   memory cgroup enh...
4002
  	int i;
14067bb3e   KAMEZAWA Hiroyuki   memcg: hierarchic...
4003
4004
  	memset(&mystat, 0, sizeof(mystat));
  	mem_cgroup_get_local_stat(mem_cont, &mystat);
d2ceb9b7d   KAMEZAWA Hiroyuki   memory cgroup enh...
4005

406eb0c9b   Ying Han   memcg: add memory...
4006

1dd3a2732   Daisuke Nishimura   memcg: show swap ...
4007
4008
4009
  	for (i = 0; i < NR_MCS_STAT; i++) {
  		if (i == MCS_SWAP && !do_swap_account)
  			continue;
14067bb3e   KAMEZAWA Hiroyuki   memcg: hierarchic...
4010
  		cb->fill(cb, memcg_stat_strings[i].local_name, mystat.stat[i]);
1dd3a2732   Daisuke Nishimura   memcg: show swap ...
4011
  	}
7b854121e   Lee Schermerhorn   Unevictable LRU P...
4012

14067bb3e   KAMEZAWA Hiroyuki   memcg: hierarchic...
4013
  	/* Hierarchical information */
fee7b548e   KAMEZAWA Hiroyuki   memcg: show real ...
4014
4015
4016
4017
4018
4019
4020
  	{
  		unsigned long long limit, memsw_limit;
  		memcg_get_hierarchical_limit(mem_cont, &limit, &memsw_limit);
  		cb->fill(cb, "hierarchical_memory_limit", limit);
  		if (do_swap_account)
  			cb->fill(cb, "hierarchical_memsw_limit", memsw_limit);
  	}
7f016ee8b   KOSAKI Motohiro   memcg: show recla...
4021

14067bb3e   KAMEZAWA Hiroyuki   memcg: hierarchic...
4022
4023
  	memset(&mystat, 0, sizeof(mystat));
  	mem_cgroup_get_total_stat(mem_cont, &mystat);
1dd3a2732   Daisuke Nishimura   memcg: show swap ...
4024
4025
4026
  	for (i = 0; i < NR_MCS_STAT; i++) {
  		if (i == MCS_SWAP && !do_swap_account)
  			continue;
14067bb3e   KAMEZAWA Hiroyuki   memcg: hierarchic...
4027
  		cb->fill(cb, memcg_stat_strings[i].total_name, mystat.stat[i]);
1dd3a2732   Daisuke Nishimura   memcg: show swap ...
4028
  	}
14067bb3e   KAMEZAWA Hiroyuki   memcg: hierarchic...
4029

7f016ee8b   KOSAKI Motohiro   memcg: show recla...
4030
  #ifdef CONFIG_DEBUG_VM
c772be939   KOSAKI Motohiro   memcg: fix calcul...
4031
  	cb->fill(cb, "inactive_ratio", calc_inactive_ratio(mem_cont, NULL));
7f016ee8b   KOSAKI Motohiro   memcg: show recla...
4032
4033
4034
4035
4036
4037
4038
4039
4040
4041
4042
4043
4044
4045
4046
4047
4048
4049
4050
4051
4052
4053
4054
4055
4056
4057
  
  	{
  		int nid, zid;
  		struct mem_cgroup_per_zone *mz;
  		unsigned long recent_rotated[2] = {0, 0};
  		unsigned long recent_scanned[2] = {0, 0};
  
  		for_each_online_node(nid)
  			for (zid = 0; zid < MAX_NR_ZONES; zid++) {
  				mz = mem_cgroup_zoneinfo(mem_cont, nid, zid);
  
  				recent_rotated[0] +=
  					mz->reclaim_stat.recent_rotated[0];
  				recent_rotated[1] +=
  					mz->reclaim_stat.recent_rotated[1];
  				recent_scanned[0] +=
  					mz->reclaim_stat.recent_scanned[0];
  				recent_scanned[1] +=
  					mz->reclaim_stat.recent_scanned[1];
  			}
  		cb->fill(cb, "recent_rotated_anon", recent_rotated[0]);
  		cb->fill(cb, "recent_rotated_file", recent_rotated[1]);
  		cb->fill(cb, "recent_scanned_anon", recent_scanned[0]);
  		cb->fill(cb, "recent_scanned_file", recent_scanned[1]);
  	}
  #endif
d2ceb9b7d   KAMEZAWA Hiroyuki   memory cgroup enh...
4058
4059
  	return 0;
  }
a7885eb8a   KOSAKI Motohiro   memcg: swappiness
4060
4061
4062
  static u64 mem_cgroup_swappiness_read(struct cgroup *cgrp, struct cftype *cft)
  {
  	struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
1f4c025b5   KAMEZAWA Hiroyuki   memcg: export mem...
4063
  	return mem_cgroup_swappiness(memcg);
a7885eb8a   KOSAKI Motohiro   memcg: swappiness
4064
4065
4066
4067
4068
4069
4070
  }
  
  static int mem_cgroup_swappiness_write(struct cgroup *cgrp, struct cftype *cft,
  				       u64 val)
  {
  	struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
  	struct mem_cgroup *parent;
068b38c1f   Li Zefan   memcg: fix a race...
4071

a7885eb8a   KOSAKI Motohiro   memcg: swappiness
4072
4073
4074
4075
4076
4077
4078
  	if (val > 100)
  		return -EINVAL;
  
  	if (cgrp->parent == NULL)
  		return -EINVAL;
  
  	parent = mem_cgroup_from_cont(cgrp->parent);
068b38c1f   Li Zefan   memcg: fix a race...
4079
4080
  
  	cgroup_lock();
a7885eb8a   KOSAKI Motohiro   memcg: swappiness
4081
4082
  	/* If under hierarchy, only empty-root can set this value */
  	if ((parent->use_hierarchy) ||
068b38c1f   Li Zefan   memcg: fix a race...
4083
4084
  	    (memcg->use_hierarchy && !list_empty(&cgrp->children))) {
  		cgroup_unlock();
a7885eb8a   KOSAKI Motohiro   memcg: swappiness
4085
  		return -EINVAL;
068b38c1f   Li Zefan   memcg: fix a race...
4086
  	}
a7885eb8a   KOSAKI Motohiro   memcg: swappiness
4087

a7885eb8a   KOSAKI Motohiro   memcg: swappiness
4088
  	memcg->swappiness = val;
a7885eb8a   KOSAKI Motohiro   memcg: swappiness
4089

068b38c1f   Li Zefan   memcg: fix a race...
4090
  	cgroup_unlock();
a7885eb8a   KOSAKI Motohiro   memcg: swappiness
4091
4092
  	return 0;
  }
2e72b6347   Kirill A. Shutemov   memcg: implement ...
4093
4094
4095
4096
4097
4098
4099
4100
  static void __mem_cgroup_threshold(struct mem_cgroup *memcg, bool swap)
  {
  	struct mem_cgroup_threshold_ary *t;
  	u64 usage;
  	int i;
  
  	rcu_read_lock();
  	if (!swap)
2c488db27   Kirill A. Shutemov   memcg: clean up m...
4101
  		t = rcu_dereference(memcg->thresholds.primary);
2e72b6347   Kirill A. Shutemov   memcg: implement ...
4102
  	else
2c488db27   Kirill A. Shutemov   memcg: clean up m...
4103
  		t = rcu_dereference(memcg->memsw_thresholds.primary);
2e72b6347   Kirill A. Shutemov   memcg: implement ...
4104
4105
4106
4107
4108
4109
4110
4111
4112
4113
4114
  
  	if (!t)
  		goto unlock;
  
  	usage = mem_cgroup_usage(memcg, swap);
  
  	/*
  	 * current_threshold points to threshold just below usage.
  	 * If it's not true, a threshold was crossed after last
  	 * call of __mem_cgroup_threshold().
  	 */
5407a5625   Phil Carmody   mm: remove unnece...
4115
  	i = t->current_threshold;
2e72b6347   Kirill A. Shutemov   memcg: implement ...
4116
4117
4118
4119
4120
4121
4122
4123
4124
4125
4126
4127
4128
4129
4130
4131
4132
4133
4134
4135
4136
4137
4138
  
  	/*
  	 * Iterate backward over array of thresholds starting from
  	 * current_threshold and check if a threshold is crossed.
  	 * If none of thresholds below usage is crossed, we read
  	 * only one element of the array here.
  	 */
  	for (; i >= 0 && unlikely(t->entries[i].threshold > usage); i--)
  		eventfd_signal(t->entries[i].eventfd, 1);
  
  	/* i = current_threshold + 1 */
  	i++;
  
  	/*
  	 * Iterate forward over array of thresholds starting from
  	 * current_threshold+1 and check if a threshold is crossed.
  	 * If none of thresholds above usage is crossed, we read
  	 * only one element of the array here.
  	 */
  	for (; i < t->size && unlikely(t->entries[i].threshold <= usage); i++)
  		eventfd_signal(t->entries[i].eventfd, 1);
  
  	/* Update current_threshold */
5407a5625   Phil Carmody   mm: remove unnece...
4139
  	t->current_threshold = i - 1;
2e72b6347   Kirill A. Shutemov   memcg: implement ...
4140
4141
4142
4143
4144
4145
  unlock:
  	rcu_read_unlock();
  }
  
  static void mem_cgroup_threshold(struct mem_cgroup *memcg)
  {
ad4ca5f4b   Kirill A. Shutemov   memcg: fix thresh...
4146
4147
4148
4149
4150
4151
4152
  	while (memcg) {
  		__mem_cgroup_threshold(memcg, false);
  		if (do_swap_account)
  			__mem_cgroup_threshold(memcg, true);
  
  		memcg = parent_mem_cgroup(memcg);
  	}
2e72b6347   Kirill A. Shutemov   memcg: implement ...
4153
4154
4155
4156
4157
4158
4159
4160
4161
  }
  
  static int compare_thresholds(const void *a, const void *b)
  {
  	const struct mem_cgroup_threshold *_a = a;
  	const struct mem_cgroup_threshold *_b = b;
  
  	return _a->threshold - _b->threshold;
  }
7d74b06f2   KAMEZAWA Hiroyuki   memcg: use for_ea...
4162
  static int mem_cgroup_oom_notify_cb(struct mem_cgroup *mem)
9490ff275   KAMEZAWA Hiroyuki   memcg: oom notifier
4163
4164
4165
4166
4167
4168
4169
4170
4171
4172
  {
  	struct mem_cgroup_eventfd_list *ev;
  
  	list_for_each_entry(ev, &mem->oom_notify, list)
  		eventfd_signal(ev->eventfd, 1);
  	return 0;
  }
  
  static void mem_cgroup_oom_notify(struct mem_cgroup *mem)
  {
7d74b06f2   KAMEZAWA Hiroyuki   memcg: use for_ea...
4173
4174
4175
4176
  	struct mem_cgroup *iter;
  
  	for_each_mem_cgroup_tree(iter, mem)
  		mem_cgroup_oom_notify_cb(iter);
9490ff275   KAMEZAWA Hiroyuki   memcg: oom notifier
4177
4178
4179
4180
  }
  
  static int mem_cgroup_usage_register_event(struct cgroup *cgrp,
  	struct cftype *cft, struct eventfd_ctx *eventfd, const char *args)
2e72b6347   Kirill A. Shutemov   memcg: implement ...
4181
4182
  {
  	struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
2c488db27   Kirill A. Shutemov   memcg: clean up m...
4183
4184
  	struct mem_cgroup_thresholds *thresholds;
  	struct mem_cgroup_threshold_ary *new;
2e72b6347   Kirill A. Shutemov   memcg: implement ...
4185
4186
  	int type = MEMFILE_TYPE(cft->private);
  	u64 threshold, usage;
2c488db27   Kirill A. Shutemov   memcg: clean up m...
4187
  	int i, size, ret;
2e72b6347   Kirill A. Shutemov   memcg: implement ...
4188
4189
4190
4191
4192
4193
  
  	ret = res_counter_memparse_write_strategy(args, &threshold);
  	if (ret)
  		return ret;
  
  	mutex_lock(&memcg->thresholds_lock);
2c488db27   Kirill A. Shutemov   memcg: clean up m...
4194

2e72b6347   Kirill A. Shutemov   memcg: implement ...
4195
  	if (type == _MEM)
2c488db27   Kirill A. Shutemov   memcg: clean up m...
4196
  		thresholds = &memcg->thresholds;
2e72b6347   Kirill A. Shutemov   memcg: implement ...
4197
  	else if (type == _MEMSWAP)
2c488db27   Kirill A. Shutemov   memcg: clean up m...
4198
  		thresholds = &memcg->memsw_thresholds;
2e72b6347   Kirill A. Shutemov   memcg: implement ...
4199
4200
4201
4202
4203
4204
  	else
  		BUG();
  
  	usage = mem_cgroup_usage(memcg, type == _MEMSWAP);
  
  	/* Check if a threshold crossed before adding a new one */
2c488db27   Kirill A. Shutemov   memcg: clean up m...
4205
  	if (thresholds->primary)
2e72b6347   Kirill A. Shutemov   memcg: implement ...
4206
  		__mem_cgroup_threshold(memcg, type == _MEMSWAP);
2c488db27   Kirill A. Shutemov   memcg: clean up m...
4207
  	size = thresholds->primary ? thresholds->primary->size + 1 : 1;
2e72b6347   Kirill A. Shutemov   memcg: implement ...
4208
4209
  
  	/* Allocate memory for new array of thresholds */
2c488db27   Kirill A. Shutemov   memcg: clean up m...
4210
  	new = kmalloc(sizeof(*new) + size * sizeof(struct mem_cgroup_threshold),
2e72b6347   Kirill A. Shutemov   memcg: implement ...
4211
  			GFP_KERNEL);
2c488db27   Kirill A. Shutemov   memcg: clean up m...
4212
  	if (!new) {
2e72b6347   Kirill A. Shutemov   memcg: implement ...
4213
4214
4215
  		ret = -ENOMEM;
  		goto unlock;
  	}
2c488db27   Kirill A. Shutemov   memcg: clean up m...
4216
  	new->size = size;
2e72b6347   Kirill A. Shutemov   memcg: implement ...
4217
4218
  
  	/* Copy thresholds (if any) to new array */
2c488db27   Kirill A. Shutemov   memcg: clean up m...
4219
4220
  	if (thresholds->primary) {
  		memcpy(new->entries, thresholds->primary->entries, (size - 1) *
2e72b6347   Kirill A. Shutemov   memcg: implement ...
4221
  				sizeof(struct mem_cgroup_threshold));
2c488db27   Kirill A. Shutemov   memcg: clean up m...
4222
  	}
2e72b6347   Kirill A. Shutemov   memcg: implement ...
4223
  	/* Add new threshold */
2c488db27   Kirill A. Shutemov   memcg: clean up m...
4224
4225
  	new->entries[size - 1].eventfd = eventfd;
  	new->entries[size - 1].threshold = threshold;
2e72b6347   Kirill A. Shutemov   memcg: implement ...
4226
4227
  
  	/* Sort thresholds. Registering of new threshold isn't time-critical */
2c488db27   Kirill A. Shutemov   memcg: clean up m...
4228
  	sort(new->entries, size, sizeof(struct mem_cgroup_threshold),
2e72b6347   Kirill A. Shutemov   memcg: implement ...
4229
4230
4231
  			compare_thresholds, NULL);
  
  	/* Find current threshold */
2c488db27   Kirill A. Shutemov   memcg: clean up m...
4232
  	new->current_threshold = -1;
2e72b6347   Kirill A. Shutemov   memcg: implement ...
4233
  	for (i = 0; i < size; i++) {
2c488db27   Kirill A. Shutemov   memcg: clean up m...
4234
  		if (new->entries[i].threshold < usage) {
2e72b6347   Kirill A. Shutemov   memcg: implement ...
4235
  			/*
2c488db27   Kirill A. Shutemov   memcg: clean up m...
4236
4237
  			 * new->current_threshold will not be used until
  			 * rcu_assign_pointer(), so it's safe to increment
2e72b6347   Kirill A. Shutemov   memcg: implement ...
4238
4239
  			 * it here.
  			 */
2c488db27   Kirill A. Shutemov   memcg: clean up m...
4240
  			++new->current_threshold;
2e72b6347   Kirill A. Shutemov   memcg: implement ...
4241
4242
  		}
  	}
2c488db27   Kirill A. Shutemov   memcg: clean up m...
4243
4244
4245
4246
4247
  	/* Free old spare buffer and save old primary buffer as spare */
  	kfree(thresholds->spare);
  	thresholds->spare = thresholds->primary;
  
  	rcu_assign_pointer(thresholds->primary, new);
2e72b6347   Kirill A. Shutemov   memcg: implement ...
4248

907860ed3   Kirill A. Shutemov   cgroups: make cft...
4249
  	/* To be sure that nobody uses thresholds */
2e72b6347   Kirill A. Shutemov   memcg: implement ...
4250
  	synchronize_rcu();
2e72b6347   Kirill A. Shutemov   memcg: implement ...
4251
4252
4253
4254
4255
  unlock:
  	mutex_unlock(&memcg->thresholds_lock);
  
  	return ret;
  }
907860ed3   Kirill A. Shutemov   cgroups: make cft...
4256
  static void mem_cgroup_usage_unregister_event(struct cgroup *cgrp,
9490ff275   KAMEZAWA Hiroyuki   memcg: oom notifier
4257
  	struct cftype *cft, struct eventfd_ctx *eventfd)
2e72b6347   Kirill A. Shutemov   memcg: implement ...
4258
4259
  {
  	struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
2c488db27   Kirill A. Shutemov   memcg: clean up m...
4260
4261
  	struct mem_cgroup_thresholds *thresholds;
  	struct mem_cgroup_threshold_ary *new;
2e72b6347   Kirill A. Shutemov   memcg: implement ...
4262
4263
  	int type = MEMFILE_TYPE(cft->private);
  	u64 usage;
2c488db27   Kirill A. Shutemov   memcg: clean up m...
4264
  	int i, j, size;
2e72b6347   Kirill A. Shutemov   memcg: implement ...
4265
4266
4267
  
  	mutex_lock(&memcg->thresholds_lock);
  	if (type == _MEM)
2c488db27   Kirill A. Shutemov   memcg: clean up m...
4268
  		thresholds = &memcg->thresholds;
2e72b6347   Kirill A. Shutemov   memcg: implement ...
4269
  	else if (type == _MEMSWAP)
2c488db27   Kirill A. Shutemov   memcg: clean up m...
4270
  		thresholds = &memcg->memsw_thresholds;
2e72b6347   Kirill A. Shutemov   memcg: implement ...
4271
4272
4273
4274
4275
4276
4277
4278
4279
4280
4281
4282
4283
4284
4285
  	else
  		BUG();
  
  	/*
  	 * Something went wrong if we trying to unregister a threshold
  	 * if we don't have thresholds
  	 */
  	BUG_ON(!thresholds);
  
  	usage = mem_cgroup_usage(memcg, type == _MEMSWAP);
  
  	/* Check if a threshold crossed before removing */
  	__mem_cgroup_threshold(memcg, type == _MEMSWAP);
  
  	/* Calculate new number of threshold */
2c488db27   Kirill A. Shutemov   memcg: clean up m...
4286
4287
4288
  	size = 0;
  	for (i = 0; i < thresholds->primary->size; i++) {
  		if (thresholds->primary->entries[i].eventfd != eventfd)
2e72b6347   Kirill A. Shutemov   memcg: implement ...
4289
4290
  			size++;
  	}
2c488db27   Kirill A. Shutemov   memcg: clean up m...
4291
  	new = thresholds->spare;
907860ed3   Kirill A. Shutemov   cgroups: make cft...
4292

2e72b6347   Kirill A. Shutemov   memcg: implement ...
4293
4294
  	/* Set thresholds array to NULL if we don't have thresholds */
  	if (!size) {
2c488db27   Kirill A. Shutemov   memcg: clean up m...
4295
4296
  		kfree(new);
  		new = NULL;
907860ed3   Kirill A. Shutemov   cgroups: make cft...
4297
  		goto swap_buffers;
2e72b6347   Kirill A. Shutemov   memcg: implement ...
4298
  	}
2c488db27   Kirill A. Shutemov   memcg: clean up m...
4299
  	new->size = size;
2e72b6347   Kirill A. Shutemov   memcg: implement ...
4300
4301
  
  	/* Copy thresholds and find current threshold */
2c488db27   Kirill A. Shutemov   memcg: clean up m...
4302
4303
4304
  	new->current_threshold = -1;
  	for (i = 0, j = 0; i < thresholds->primary->size; i++) {
  		if (thresholds->primary->entries[i].eventfd == eventfd)
2e72b6347   Kirill A. Shutemov   memcg: implement ...
4305
  			continue;
2c488db27   Kirill A. Shutemov   memcg: clean up m...
4306
4307
  		new->entries[j] = thresholds->primary->entries[i];
  		if (new->entries[j].threshold < usage) {
2e72b6347   Kirill A. Shutemov   memcg: implement ...
4308
  			/*
2c488db27   Kirill A. Shutemov   memcg: clean up m...
4309
  			 * new->current_threshold will not be used
2e72b6347   Kirill A. Shutemov   memcg: implement ...
4310
4311
4312
  			 * until rcu_assign_pointer(), so it's safe to increment
  			 * it here.
  			 */
2c488db27   Kirill A. Shutemov   memcg: clean up m...
4313
  			++new->current_threshold;
2e72b6347   Kirill A. Shutemov   memcg: implement ...
4314
4315
4316
  		}
  		j++;
  	}
907860ed3   Kirill A. Shutemov   cgroups: make cft...
4317
  swap_buffers:
2c488db27   Kirill A. Shutemov   memcg: clean up m...
4318
4319
4320
  	/* Swap primary and spare array */
  	thresholds->spare = thresholds->primary;
  	rcu_assign_pointer(thresholds->primary, new);
2e72b6347   Kirill A. Shutemov   memcg: implement ...
4321

907860ed3   Kirill A. Shutemov   cgroups: make cft...
4322
  	/* To be sure that nobody uses thresholds */
2e72b6347   Kirill A. Shutemov   memcg: implement ...
4323
  	synchronize_rcu();
2e72b6347   Kirill A. Shutemov   memcg: implement ...
4324
  	mutex_unlock(&memcg->thresholds_lock);
2e72b6347   Kirill A. Shutemov   memcg: implement ...
4325
  }
c1e862c1f   KAMEZAWA Hiroyuki   memcg: new force_...
4326

9490ff275   KAMEZAWA Hiroyuki   memcg: oom notifier
4327
4328
4329
4330
4331
4332
4333
4334
4335
4336
4337
  static int mem_cgroup_oom_register_event(struct cgroup *cgrp,
  	struct cftype *cft, struct eventfd_ctx *eventfd, const char *args)
  {
  	struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
  	struct mem_cgroup_eventfd_list *event;
  	int type = MEMFILE_TYPE(cft->private);
  
  	BUG_ON(type != _OOM_TYPE);
  	event = kmalloc(sizeof(*event),	GFP_KERNEL);
  	if (!event)
  		return -ENOMEM;
1af8efe96   Michal Hocko   memcg: change mem...
4338
  	spin_lock(&memcg_oom_lock);
9490ff275   KAMEZAWA Hiroyuki   memcg: oom notifier
4339
4340
4341
4342
4343
  
  	event->eventfd = eventfd;
  	list_add(&event->list, &memcg->oom_notify);
  
  	/* already in OOM ? */
79dfdaccd   Michal Hocko   memcg: make oom_l...
4344
  	if (atomic_read(&memcg->under_oom))
9490ff275   KAMEZAWA Hiroyuki   memcg: oom notifier
4345
  		eventfd_signal(eventfd, 1);
1af8efe96   Michal Hocko   memcg: change mem...
4346
  	spin_unlock(&memcg_oom_lock);
9490ff275   KAMEZAWA Hiroyuki   memcg: oom notifier
4347
4348
4349
  
  	return 0;
  }
907860ed3   Kirill A. Shutemov   cgroups: make cft...
4350
  static void mem_cgroup_oom_unregister_event(struct cgroup *cgrp,
9490ff275   KAMEZAWA Hiroyuki   memcg: oom notifier
4351
4352
4353
4354
4355
4356
4357
  	struct cftype *cft, struct eventfd_ctx *eventfd)
  {
  	struct mem_cgroup *mem = mem_cgroup_from_cont(cgrp);
  	struct mem_cgroup_eventfd_list *ev, *tmp;
  	int type = MEMFILE_TYPE(cft->private);
  
  	BUG_ON(type != _OOM_TYPE);
1af8efe96   Michal Hocko   memcg: change mem...
4358
  	spin_lock(&memcg_oom_lock);
9490ff275   KAMEZAWA Hiroyuki   memcg: oom notifier
4359
4360
4361
4362
4363
4364
4365
  
  	list_for_each_entry_safe(ev, tmp, &mem->oom_notify, list) {
  		if (ev->eventfd == eventfd) {
  			list_del(&ev->list);
  			kfree(ev);
  		}
  	}
1af8efe96   Michal Hocko   memcg: change mem...
4366
  	spin_unlock(&memcg_oom_lock);
9490ff275   KAMEZAWA Hiroyuki   memcg: oom notifier
4367
  }
3c11ecf44   KAMEZAWA Hiroyuki   memcg: oom kill d...
4368
4369
4370
4371
4372
4373
  static int mem_cgroup_oom_control_read(struct cgroup *cgrp,
  	struct cftype *cft,  struct cgroup_map_cb *cb)
  {
  	struct mem_cgroup *mem = mem_cgroup_from_cont(cgrp);
  
  	cb->fill(cb, "oom_kill_disable", mem->oom_kill_disable);
79dfdaccd   Michal Hocko   memcg: make oom_l...
4374
  	if (atomic_read(&mem->under_oom))
3c11ecf44   KAMEZAWA Hiroyuki   memcg: oom kill d...
4375
4376
4377
4378
4379
  		cb->fill(cb, "under_oom", 1);
  	else
  		cb->fill(cb, "under_oom", 0);
  	return 0;
  }
3c11ecf44   KAMEZAWA Hiroyuki   memcg: oom kill d...
4380
4381
4382
4383
4384
4385
4386
4387
4388
4389
4390
4391
4392
4393
4394
4395
4396
4397
4398
4399
  static int mem_cgroup_oom_control_write(struct cgroup *cgrp,
  	struct cftype *cft, u64 val)
  {
  	struct mem_cgroup *mem = mem_cgroup_from_cont(cgrp);
  	struct mem_cgroup *parent;
  
  	/* cannot set to root cgroup and only 0 and 1 are allowed */
  	if (!cgrp->parent || !((val == 0) || (val == 1)))
  		return -EINVAL;
  
  	parent = mem_cgroup_from_cont(cgrp->parent);
  
  	cgroup_lock();
  	/* oom-kill-disable is a flag for subhierarchy. */
  	if ((parent->use_hierarchy) ||
  	    (mem->use_hierarchy && !list_empty(&cgrp->children))) {
  		cgroup_unlock();
  		return -EINVAL;
  	}
  	mem->oom_kill_disable = val;
4d845ebf4   KAMEZAWA Hiroyuki   memcg: fix wake u...
4400
4401
  	if (!val)
  		memcg_oom_recover(mem);
3c11ecf44   KAMEZAWA Hiroyuki   memcg: oom kill d...
4402
4403
4404
  	cgroup_unlock();
  	return 0;
  }
406eb0c9b   Ying Han   memcg: add memory...
4405
4406
4407
4408
4409
4410
4411
4412
4413
4414
4415
4416
4417
4418
4419
  #ifdef CONFIG_NUMA
  static const struct file_operations mem_control_numa_stat_file_operations = {
  	.read = seq_read,
  	.llseek = seq_lseek,
  	.release = single_release,
  };
  
  static int mem_control_numa_stat_open(struct inode *unused, struct file *file)
  {
  	struct cgroup *cont = file->f_dentry->d_parent->d_fsdata;
  
  	file->f_op = &mem_control_numa_stat_file_operations;
  	return single_open(file, mem_control_numa_stat_show, cont);
  }
  #endif /* CONFIG_NUMA */
8cdea7c05   Balbir Singh   Memory controller...
4420
4421
  static struct cftype mem_cgroup_files[] = {
  	{
0eea10301   Balbir Singh   Memory controller...
4422
  		.name = "usage_in_bytes",
8c7c6e34a   KAMEZAWA Hiroyuki   memcg: mem+swap c...
4423
  		.private = MEMFILE_PRIVATE(_MEM, RES_USAGE),
2c3daa722   Paul Menage   CGroup API files:...
4424
  		.read_u64 = mem_cgroup_read,
9490ff275   KAMEZAWA Hiroyuki   memcg: oom notifier
4425
4426
  		.register_event = mem_cgroup_usage_register_event,
  		.unregister_event = mem_cgroup_usage_unregister_event,
8cdea7c05   Balbir Singh   Memory controller...
4427
4428
  	},
  	{
c84872e16   Pavel Emelyanov   memcgroup: add th...
4429
  		.name = "max_usage_in_bytes",
8c7c6e34a   KAMEZAWA Hiroyuki   memcg: mem+swap c...
4430
  		.private = MEMFILE_PRIVATE(_MEM, RES_MAX_USAGE),
29f2a4dac   Pavel Emelyanov   memcgroup: implem...
4431
  		.trigger = mem_cgroup_reset,
c84872e16   Pavel Emelyanov   memcgroup: add th...
4432
4433
4434
  		.read_u64 = mem_cgroup_read,
  	},
  	{
0eea10301   Balbir Singh   Memory controller...
4435
  		.name = "limit_in_bytes",
8c7c6e34a   KAMEZAWA Hiroyuki   memcg: mem+swap c...
4436
  		.private = MEMFILE_PRIVATE(_MEM, RES_LIMIT),
856c13aa1   Paul Menage   cgroup files: con...
4437
  		.write_string = mem_cgroup_write,
2c3daa722   Paul Menage   CGroup API files:...
4438
  		.read_u64 = mem_cgroup_read,
8cdea7c05   Balbir Singh   Memory controller...
4439
4440
  	},
  	{
296c81d89   Balbir Singh   memory controller...
4441
4442
4443
4444
4445
4446
  		.name = "soft_limit_in_bytes",
  		.private = MEMFILE_PRIVATE(_MEM, RES_SOFT_LIMIT),
  		.write_string = mem_cgroup_write,
  		.read_u64 = mem_cgroup_read,
  	},
  	{
8cdea7c05   Balbir Singh   Memory controller...
4447
  		.name = "failcnt",
8c7c6e34a   KAMEZAWA Hiroyuki   memcg: mem+swap c...
4448
  		.private = MEMFILE_PRIVATE(_MEM, RES_FAILCNT),
29f2a4dac   Pavel Emelyanov   memcgroup: implem...
4449
  		.trigger = mem_cgroup_reset,
2c3daa722   Paul Menage   CGroup API files:...
4450
  		.read_u64 = mem_cgroup_read,
8cdea7c05   Balbir Singh   Memory controller...
4451
  	},
8697d3319   Balbir Singh   Memory controller...
4452
  	{
d2ceb9b7d   KAMEZAWA Hiroyuki   memory cgroup enh...
4453
  		.name = "stat",
c64745cf0   Paul Menage   CGroup API files:...
4454
  		.read_map = mem_control_stat_show,
d2ceb9b7d   KAMEZAWA Hiroyuki   memory cgroup enh...
4455
  	},
c1e862c1f   KAMEZAWA Hiroyuki   memcg: new force_...
4456
4457
4458
4459
  	{
  		.name = "force_empty",
  		.trigger = mem_cgroup_force_empty_write,
  	},
18f59ea7d   Balbir Singh   memcg: memory cgr...
4460
4461
4462
4463
4464
  	{
  		.name = "use_hierarchy",
  		.write_u64 = mem_cgroup_hierarchy_write,
  		.read_u64 = mem_cgroup_hierarchy_read,
  	},
a7885eb8a   KOSAKI Motohiro   memcg: swappiness
4465
4466
4467
4468
4469
  	{
  		.name = "swappiness",
  		.read_u64 = mem_cgroup_swappiness_read,
  		.write_u64 = mem_cgroup_swappiness_write,
  	},
7dc74be03   Daisuke Nishimura   memcg: add interf...
4470
4471
4472
4473
4474
  	{
  		.name = "move_charge_at_immigrate",
  		.read_u64 = mem_cgroup_move_charge_read,
  		.write_u64 = mem_cgroup_move_charge_write,
  	},
9490ff275   KAMEZAWA Hiroyuki   memcg: oom notifier
4475
4476
  	{
  		.name = "oom_control",
3c11ecf44   KAMEZAWA Hiroyuki   memcg: oom kill d...
4477
4478
  		.read_map = mem_cgroup_oom_control_read,
  		.write_u64 = mem_cgroup_oom_control_write,
9490ff275   KAMEZAWA Hiroyuki   memcg: oom notifier
4479
4480
4481
4482
  		.register_event = mem_cgroup_oom_register_event,
  		.unregister_event = mem_cgroup_oom_unregister_event,
  		.private = MEMFILE_PRIVATE(_OOM_TYPE, OOM_CONTROL),
  	},
406eb0c9b   Ying Han   memcg: add memory...
4483
4484
4485
4486
  #ifdef CONFIG_NUMA
  	{
  		.name = "numa_stat",
  		.open = mem_control_numa_stat_open,
895771271   KAMEZAWA Hiroyuki   mm: memory.numa_s...
4487
  		.mode = S_IRUGO,
406eb0c9b   Ying Han   memcg: add memory...
4488
4489
  	},
  #endif
8cdea7c05   Balbir Singh   Memory controller...
4490
  };
8c7c6e34a   KAMEZAWA Hiroyuki   memcg: mem+swap c...
4491
4492
4493
4494
4495
4496
  #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
  static struct cftype memsw_cgroup_files[] = {
  	{
  		.name = "memsw.usage_in_bytes",
  		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_USAGE),
  		.read_u64 = mem_cgroup_read,
9490ff275   KAMEZAWA Hiroyuki   memcg: oom notifier
4497
4498
  		.register_event = mem_cgroup_usage_register_event,
  		.unregister_event = mem_cgroup_usage_unregister_event,
8c7c6e34a   KAMEZAWA Hiroyuki   memcg: mem+swap c...
4499
4500
4501
4502
4503
4504
4505
4506
4507
4508
4509
4510
4511
4512
4513
4514
4515
4516
4517
4518
4519
4520
4521
4522
4523
4524
4525
4526
4527
4528
4529
4530
4531
4532
  	},
  	{
  		.name = "memsw.max_usage_in_bytes",
  		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_MAX_USAGE),
  		.trigger = mem_cgroup_reset,
  		.read_u64 = mem_cgroup_read,
  	},
  	{
  		.name = "memsw.limit_in_bytes",
  		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_LIMIT),
  		.write_string = mem_cgroup_write,
  		.read_u64 = mem_cgroup_read,
  	},
  	{
  		.name = "memsw.failcnt",
  		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_FAILCNT),
  		.trigger = mem_cgroup_reset,
  		.read_u64 = mem_cgroup_read,
  	},
  };
  
  static int register_memsw_files(struct cgroup *cont, struct cgroup_subsys *ss)
  {
  	if (!do_swap_account)
  		return 0;
  	return cgroup_add_files(cont, ss, memsw_cgroup_files,
  				ARRAY_SIZE(memsw_cgroup_files));
  };
  #else
  static int register_memsw_files(struct cgroup *cont, struct cgroup_subsys *ss)
  {
  	return 0;
  }
  #endif
6d12e2d8d   KAMEZAWA Hiroyuki   per-zone and recl...
4533
4534
4535
  static int alloc_mem_cgroup_per_zone_info(struct mem_cgroup *mem, int node)
  {
  	struct mem_cgroup_per_node *pn;
1ecaab2bd   KAMEZAWA Hiroyuki   per-zone and recl...
4536
  	struct mem_cgroup_per_zone *mz;
b69408e88   Christoph Lameter   vmscan: Use an in...
4537
  	enum lru_list l;
41e3355de   KAMEZAWA Hiroyuki   memcg: fix node_s...
4538
  	int zone, tmp = node;
1ecaab2bd   KAMEZAWA Hiroyuki   per-zone and recl...
4539
4540
4541
4542
4543
4544
4545
4546
  	/*
  	 * This routine is called against possible nodes.
  	 * But it's BUG to call kmalloc() against offline node.
  	 *
  	 * TODO: this routine can waste much memory for nodes which will
  	 *       never be onlined. It's better to use memory hotplug callback
  	 *       function.
  	 */
41e3355de   KAMEZAWA Hiroyuki   memcg: fix node_s...
4547
4548
  	if (!node_state(node, N_NORMAL_MEMORY))
  		tmp = -1;
17295c88a   Jesper Juhl   memcg: use [kv]za...
4549
  	pn = kzalloc_node(sizeof(*pn), GFP_KERNEL, tmp);
6d12e2d8d   KAMEZAWA Hiroyuki   per-zone and recl...
4550
4551
  	if (!pn)
  		return 1;
1ecaab2bd   KAMEZAWA Hiroyuki   per-zone and recl...
4552

6d12e2d8d   KAMEZAWA Hiroyuki   per-zone and recl...
4553
  	mem->info.nodeinfo[node] = pn;
1ecaab2bd   KAMEZAWA Hiroyuki   per-zone and recl...
4554
4555
  	for (zone = 0; zone < MAX_NR_ZONES; zone++) {
  		mz = &pn->zoneinfo[zone];
b69408e88   Christoph Lameter   vmscan: Use an in...
4556
4557
  		for_each_lru(l)
  			INIT_LIST_HEAD(&mz->lists[l]);
f64c3f549   Balbir Singh   memory controller...
4558
  		mz->usage_in_excess = 0;
4e4169535   Balbir Singh   memory controller...
4559
4560
  		mz->on_tree = false;
  		mz->mem = mem;
1ecaab2bd   KAMEZAWA Hiroyuki   per-zone and recl...
4561
  	}
6d12e2d8d   KAMEZAWA Hiroyuki   per-zone and recl...
4562
4563
  	return 0;
  }
1ecaab2bd   KAMEZAWA Hiroyuki   per-zone and recl...
4564
4565
4566
4567
  static void free_mem_cgroup_per_zone_info(struct mem_cgroup *mem, int node)
  {
  	kfree(mem->info.nodeinfo[node]);
  }
333279487   KAMEZAWA Hiroyuki   memcgroup: use vm...
4568
4569
4570
  static struct mem_cgroup *mem_cgroup_alloc(void)
  {
  	struct mem_cgroup *mem;
c62b1a3b3   KAMEZAWA Hiroyuki   memcg: use generi...
4571
  	int size = sizeof(struct mem_cgroup);
333279487   KAMEZAWA Hiroyuki   memcgroup: use vm...
4572

c62b1a3b3   KAMEZAWA Hiroyuki   memcg: use generi...
4573
  	/* Can be very big if MAX_NUMNODES is very big */
c8dad2bb6   Jan Blunck   memcg: reduce siz...
4574
  	if (size < PAGE_SIZE)
17295c88a   Jesper Juhl   memcg: use [kv]za...
4575
  		mem = kzalloc(size, GFP_KERNEL);
333279487   KAMEZAWA Hiroyuki   memcgroup: use vm...
4576
  	else
17295c88a   Jesper Juhl   memcg: use [kv]za...
4577
  		mem = vzalloc(size);
333279487   KAMEZAWA Hiroyuki   memcgroup: use vm...
4578

e7bbcdf37   Dan Carpenter   memcontrol: fix p...
4579
4580
  	if (!mem)
  		return NULL;
c62b1a3b3   KAMEZAWA Hiroyuki   memcg: use generi...
4581
  	mem->stat = alloc_percpu(struct mem_cgroup_stat_cpu);
d2e61b8dc   Dan Carpenter   memcg: null deref...
4582
4583
  	if (!mem->stat)
  		goto out_free;
711d3d2c9   KAMEZAWA Hiroyuki   memcg: cpu hotplu...
4584
  	spin_lock_init(&mem->pcp_counter_lock);
333279487   KAMEZAWA Hiroyuki   memcgroup: use vm...
4585
  	return mem;
d2e61b8dc   Dan Carpenter   memcg: null deref...
4586
4587
4588
4589
4590
4591
4592
  
  out_free:
  	if (size < PAGE_SIZE)
  		kfree(mem);
  	else
  		vfree(mem);
  	return NULL;
333279487   KAMEZAWA Hiroyuki   memcgroup: use vm...
4593
  }
8c7c6e34a   KAMEZAWA Hiroyuki   memcg: mem+swap c...
4594
4595
4596
4597
4598
4599
4600
4601
  /*
   * At destroying mem_cgroup, references from swap_cgroup can remain.
   * (scanning all at force_empty is too costly...)
   *
   * Instead of clearing all references at force_empty, we remember
   * the number of reference from swap_cgroup and free mem_cgroup when
   * it goes down to 0.
   *
8c7c6e34a   KAMEZAWA Hiroyuki   memcg: mem+swap c...
4602
4603
   * Removal of cgroup itself succeeds regardless of refs from swap.
   */
a7ba0eef3   KAMEZAWA Hiroyuki   memcg: fix double...
4604
  static void __mem_cgroup_free(struct mem_cgroup *mem)
333279487   KAMEZAWA Hiroyuki   memcgroup: use vm...
4605
  {
08e552c69   KAMEZAWA Hiroyuki   memcg: synchroniz...
4606
  	int node;
f64c3f549   Balbir Singh   memory controller...
4607
  	mem_cgroup_remove_from_trees(mem);
04046e1a0   KAMEZAWA Hiroyuki   memcg: use CSS ID
4608
  	free_css_id(&mem_cgroup_subsys, &mem->css);
08e552c69   KAMEZAWA Hiroyuki   memcg: synchroniz...
4609
4610
  	for_each_node_state(node, N_POSSIBLE)
  		free_mem_cgroup_per_zone_info(mem, node);
c62b1a3b3   KAMEZAWA Hiroyuki   memcg: use generi...
4611
4612
  	free_percpu(mem->stat);
  	if (sizeof(struct mem_cgroup) < PAGE_SIZE)
333279487   KAMEZAWA Hiroyuki   memcgroup: use vm...
4613
4614
4615
4616
  		kfree(mem);
  	else
  		vfree(mem);
  }
8c7c6e34a   KAMEZAWA Hiroyuki   memcg: mem+swap c...
4617
4618
4619
4620
  static void mem_cgroup_get(struct mem_cgroup *mem)
  {
  	atomic_inc(&mem->refcnt);
  }
483c30b51   Daisuke Nishimura   memcg: improve pe...
4621
  static void __mem_cgroup_put(struct mem_cgroup *mem, int count)
8c7c6e34a   KAMEZAWA Hiroyuki   memcg: mem+swap c...
4622
  {
483c30b51   Daisuke Nishimura   memcg: improve pe...
4623
  	if (atomic_sub_and_test(count, &mem->refcnt)) {
7bcc1bb12   Daisuke Nishimura   memcg: get/put pa...
4624
  		struct mem_cgroup *parent = parent_mem_cgroup(mem);
a7ba0eef3   KAMEZAWA Hiroyuki   memcg: fix double...
4625
  		__mem_cgroup_free(mem);
7bcc1bb12   Daisuke Nishimura   memcg: get/put pa...
4626
4627
4628
  		if (parent)
  			mem_cgroup_put(parent);
  	}
8c7c6e34a   KAMEZAWA Hiroyuki   memcg: mem+swap c...
4629
  }
483c30b51   Daisuke Nishimura   memcg: improve pe...
4630
4631
4632
4633
  static void mem_cgroup_put(struct mem_cgroup *mem)
  {
  	__mem_cgroup_put(mem, 1);
  }
7bcc1bb12   Daisuke Nishimura   memcg: get/put pa...
4634
4635
4636
4637
4638
4639
4640
4641
4642
  /*
   * Returns the parent mem_cgroup in memcgroup hierarchy with hierarchy enabled.
   */
  static struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *mem)
  {
  	if (!mem->res.parent)
  		return NULL;
  	return mem_cgroup_from_res_counter(mem->res.parent, res);
  }
333279487   KAMEZAWA Hiroyuki   memcgroup: use vm...
4643

c077719be   KAMEZAWA Hiroyuki   memcg: mem+swap c...
4644
4645
4646
  #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
  static void __init enable_swap_cgroup(void)
  {
f8d665422   Hirokazu Takahashi   memcg: add mem_cg...
4647
  	if (!mem_cgroup_disabled() && really_do_swap_account)
c077719be   KAMEZAWA Hiroyuki   memcg: mem+swap c...
4648
4649
4650
4651
4652
4653
4654
  		do_swap_account = 1;
  }
  #else
  static void __init enable_swap_cgroup(void)
  {
  }
  #endif
f64c3f549   Balbir Singh   memory controller...
4655
4656
4657
4658
4659
4660
4661
4662
4663
4664
4665
4666
4667
4668
4669
4670
4671
4672
4673
4674
4675
4676
4677
4678
  static int mem_cgroup_soft_limit_tree_init(void)
  {
  	struct mem_cgroup_tree_per_node *rtpn;
  	struct mem_cgroup_tree_per_zone *rtpz;
  	int tmp, node, zone;
  
  	for_each_node_state(node, N_POSSIBLE) {
  		tmp = node;
  		if (!node_state(node, N_NORMAL_MEMORY))
  			tmp = -1;
  		rtpn = kzalloc_node(sizeof(*rtpn), GFP_KERNEL, tmp);
  		if (!rtpn)
  			return 1;
  
  		soft_limit_tree.rb_tree_per_node[node] = rtpn;
  
  		for (zone = 0; zone < MAX_NR_ZONES; zone++) {
  			rtpz = &rtpn->rb_tree_per_zone[zone];
  			rtpz->rb_root = RB_ROOT;
  			spin_lock_init(&rtpz->lock);
  		}
  	}
  	return 0;
  }
0eb253e22   Li Zefan   memcg: fix sectio...
4679
  static struct cgroup_subsys_state * __ref
8cdea7c05   Balbir Singh   Memory controller...
4680
4681
  mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont)
  {
28dbc4b6a   Balbir Singh   memcg: memory cgr...
4682
  	struct mem_cgroup *mem, *parent;
04046e1a0   KAMEZAWA Hiroyuki   memcg: use CSS ID
4683
  	long error = -ENOMEM;
6d12e2d8d   KAMEZAWA Hiroyuki   per-zone and recl...
4684
  	int node;
8cdea7c05   Balbir Singh   Memory controller...
4685

c8dad2bb6   Jan Blunck   memcg: reduce siz...
4686
4687
  	mem = mem_cgroup_alloc();
  	if (!mem)
04046e1a0   KAMEZAWA Hiroyuki   memcg: use CSS ID
4688
  		return ERR_PTR(error);
78fb74669   Pavel Emelianov   Memory controller...
4689

6d12e2d8d   KAMEZAWA Hiroyuki   per-zone and recl...
4690
4691
4692
  	for_each_node_state(node, N_POSSIBLE)
  		if (alloc_mem_cgroup_per_zone_info(mem, node))
  			goto free_out;
f64c3f549   Balbir Singh   memory controller...
4693

c077719be   KAMEZAWA Hiroyuki   memcg: mem+swap c...
4694
  	/* root ? */
28dbc4b6a   Balbir Singh   memcg: memory cgr...
4695
  	if (cont->parent == NULL) {
cdec2e426   KAMEZAWA Hiroyuki   memcg: coalesce c...
4696
  		int cpu;
c077719be   KAMEZAWA Hiroyuki   memcg: mem+swap c...
4697
  		enable_swap_cgroup();
28dbc4b6a   Balbir Singh   memcg: memory cgr...
4698
  		parent = NULL;
4b3bde4c9   Balbir Singh   memcg: remove the...
4699
  		root_mem_cgroup = mem;
f64c3f549   Balbir Singh   memory controller...
4700
4701
  		if (mem_cgroup_soft_limit_tree_init())
  			goto free_out;
cdec2e426   KAMEZAWA Hiroyuki   memcg: coalesce c...
4702
4703
4704
4705
4706
  		for_each_possible_cpu(cpu) {
  			struct memcg_stock_pcp *stock =
  						&per_cpu(memcg_stock, cpu);
  			INIT_WORK(&stock->work, drain_local_stock);
  		}
711d3d2c9   KAMEZAWA Hiroyuki   memcg: cpu hotplu...
4707
  		hotcpu_notifier(memcg_cpu_hotplug_callback, 0);
18f59ea7d   Balbir Singh   memcg: memory cgr...
4708
  	} else {
28dbc4b6a   Balbir Singh   memcg: memory cgr...
4709
  		parent = mem_cgroup_from_cont(cont->parent);
18f59ea7d   Balbir Singh   memcg: memory cgr...
4710
  		mem->use_hierarchy = parent->use_hierarchy;
3c11ecf44   KAMEZAWA Hiroyuki   memcg: oom kill d...
4711
  		mem->oom_kill_disable = parent->oom_kill_disable;
18f59ea7d   Balbir Singh   memcg: memory cgr...
4712
  	}
28dbc4b6a   Balbir Singh   memcg: memory cgr...
4713

18f59ea7d   Balbir Singh   memcg: memory cgr...
4714
4715
4716
  	if (parent && parent->use_hierarchy) {
  		res_counter_init(&mem->res, &parent->res);
  		res_counter_init(&mem->memsw, &parent->memsw);
7bcc1bb12   Daisuke Nishimura   memcg: get/put pa...
4717
4718
4719
4720
4721
4722
4723
  		/*
  		 * We increment refcnt of the parent to ensure that we can
  		 * safely access it on res_counter_charge/uncharge.
  		 * This refcnt will be decremented when freeing this
  		 * mem_cgroup(see mem_cgroup_put).
  		 */
  		mem_cgroup_get(parent);
18f59ea7d   Balbir Singh   memcg: memory cgr...
4724
4725
4726
4727
  	} else {
  		res_counter_init(&mem->res, NULL);
  		res_counter_init(&mem->memsw, NULL);
  	}
04046e1a0   KAMEZAWA Hiroyuki   memcg: use CSS ID
4728
  	mem->last_scanned_child = 0;
889976dbc   Ying Han   memcg: reclaim me...
4729
  	mem->last_scanned_node = MAX_NUMNODES;
9490ff275   KAMEZAWA Hiroyuki   memcg: oom notifier
4730
  	INIT_LIST_HEAD(&mem->oom_notify);
6d61ef409   Balbir Singh   memcg: memory cgr...
4731

a7885eb8a   KOSAKI Motohiro   memcg: swappiness
4732
  	if (parent)
1f4c025b5   KAMEZAWA Hiroyuki   memcg: export mem...
4733
  		mem->swappiness = mem_cgroup_swappiness(parent);
a7ba0eef3   KAMEZAWA Hiroyuki   memcg: fix double...
4734
  	atomic_set(&mem->refcnt, 1);
7dc74be03   Daisuke Nishimura   memcg: add interf...
4735
  	mem->move_charge_at_immigrate = 0;
2e72b6347   Kirill A. Shutemov   memcg: implement ...
4736
  	mutex_init(&mem->thresholds_lock);
8cdea7c05   Balbir Singh   Memory controller...
4737
  	return &mem->css;
6d12e2d8d   KAMEZAWA Hiroyuki   per-zone and recl...
4738
  free_out:
a7ba0eef3   KAMEZAWA Hiroyuki   memcg: fix double...
4739
  	__mem_cgroup_free(mem);
4b3bde4c9   Balbir Singh   memcg: remove the...
4740
  	root_mem_cgroup = NULL;
04046e1a0   KAMEZAWA Hiroyuki   memcg: use CSS ID
4741
  	return ERR_PTR(error);
8cdea7c05   Balbir Singh   Memory controller...
4742
  }
ec64f5154   KAMEZAWA Hiroyuki   cgroup: fix frequ...
4743
  static int mem_cgroup_pre_destroy(struct cgroup_subsys *ss,
df878fb04   KAMEZAWA Hiroyuki   memory cgroup enh...
4744
4745
4746
  					struct cgroup *cont)
  {
  	struct mem_cgroup *mem = mem_cgroup_from_cont(cont);
ec64f5154   KAMEZAWA Hiroyuki   cgroup: fix frequ...
4747
4748
  
  	return mem_cgroup_force_empty(mem, false);
df878fb04   KAMEZAWA Hiroyuki   memory cgroup enh...
4749
  }
8cdea7c05   Balbir Singh   Memory controller...
4750
4751
4752
  static void mem_cgroup_destroy(struct cgroup_subsys *ss,
  				struct cgroup *cont)
  {
c268e9946   Daisuke Nishimura   memcg: fix hierar...
4753
  	struct mem_cgroup *mem = mem_cgroup_from_cont(cont);
c268e9946   Daisuke Nishimura   memcg: fix hierar...
4754

c268e9946   Daisuke Nishimura   memcg: fix hierar...
4755
  	mem_cgroup_put(mem);
8cdea7c05   Balbir Singh   Memory controller...
4756
4757
4758
4759
4760
  }
  
  static int mem_cgroup_populate(struct cgroup_subsys *ss,
  				struct cgroup *cont)
  {
8c7c6e34a   KAMEZAWA Hiroyuki   memcg: mem+swap c...
4761
4762
4763
4764
4765
4766
4767
4768
  	int ret;
  
  	ret = cgroup_add_files(cont, ss, mem_cgroup_files,
  				ARRAY_SIZE(mem_cgroup_files));
  
  	if (!ret)
  		ret = register_memsw_files(cont, ss);
  	return ret;
8cdea7c05   Balbir Singh   Memory controller...
4769
  }
024914477   Daisuke Nishimura   memcg: move charg...
4770
  #ifdef CONFIG_MMU
7dc74be03   Daisuke Nishimura   memcg: add interf...
4771
  /* Handlers for move charge at task migration. */
854ffa8d1   Daisuke Nishimura   memcg: improve pe...
4772
4773
  #define PRECHARGE_COUNT_AT_ONCE	256
  static int mem_cgroup_do_precharge(unsigned long count)
7dc74be03   Daisuke Nishimura   memcg: add interf...
4774
  {
854ffa8d1   Daisuke Nishimura   memcg: improve pe...
4775
4776
  	int ret = 0;
  	int batch_count = PRECHARGE_COUNT_AT_ONCE;
4ffef5fef   Daisuke Nishimura   memcg: move charg...
4777
  	struct mem_cgroup *mem = mc.to;
854ffa8d1   Daisuke Nishimura   memcg: improve pe...
4778
4779
4780
4781
4782
4783
4784
4785
4786
4787
4788
4789
4790
4791
4792
4793
4794
4795
4796
4797
4798
4799
  	if (mem_cgroup_is_root(mem)) {
  		mc.precharge += count;
  		/* we don't need css_get for root */
  		return ret;
  	}
  	/* try to charge at once */
  	if (count > 1) {
  		struct res_counter *dummy;
  		/*
  		 * "mem" cannot be under rmdir() because we've already checked
  		 * by cgroup_lock_live_cgroup() that it is not removed and we
  		 * are still under the same cgroup_mutex. So we can postpone
  		 * css_get().
  		 */
  		if (res_counter_charge(&mem->res, PAGE_SIZE * count, &dummy))
  			goto one_by_one;
  		if (do_swap_account && res_counter_charge(&mem->memsw,
  						PAGE_SIZE * count, &dummy)) {
  			res_counter_uncharge(&mem->res, PAGE_SIZE * count);
  			goto one_by_one;
  		}
  		mc.precharge += count;
854ffa8d1   Daisuke Nishimura   memcg: improve pe...
4800
4801
4802
4803
4804
4805
4806
4807
4808
4809
4810
4811
4812
  		return ret;
  	}
  one_by_one:
  	/* fall back to one by one charge */
  	while (count--) {
  		if (signal_pending(current)) {
  			ret = -EINTR;
  			break;
  		}
  		if (!batch_count--) {
  			batch_count = PRECHARGE_COUNT_AT_ONCE;
  			cond_resched();
  		}
7ec99d621   Johannes Weiner   memcg: unify char...
4813
  		ret = __mem_cgroup_try_charge(NULL, GFP_KERNEL, 1, &mem, false);
854ffa8d1   Daisuke Nishimura   memcg: improve pe...
4814
4815
4816
4817
4818
  		if (ret || !mem)
  			/* mem_cgroup_clear_mc() will do uncharge later */
  			return -ENOMEM;
  		mc.precharge++;
  	}
4ffef5fef   Daisuke Nishimura   memcg: move charg...
4819
4820
4821
4822
4823
4824
4825
4826
  	return ret;
  }
  
  /**
   * is_target_pte_for_mc - check a pte whether it is valid for move charge
   * @vma: the vma the pte to be checked belongs
   * @addr: the address corresponding to the pte to be checked
   * @ptent: the pte to be checked
024914477   Daisuke Nishimura   memcg: move charg...
4827
   * @target: the pointer the target page or swap ent will be stored(can be NULL)
4ffef5fef   Daisuke Nishimura   memcg: move charg...
4828
4829
4830
4831
4832
4833
   *
   * Returns
   *   0(MC_TARGET_NONE): if the pte is not a target for move charge.
   *   1(MC_TARGET_PAGE): if the page corresponding to this pte is a target for
   *     move charge. if @target is not NULL, the page is stored in target->page
   *     with extra refcnt got(Callers should handle it).
024914477   Daisuke Nishimura   memcg: move charg...
4834
4835
4836
   *   2(MC_TARGET_SWAP): if the swap entry corresponding to this pte is a
   *     target for charge migration. if @target is not NULL, the entry is stored
   *     in target->ent.
4ffef5fef   Daisuke Nishimura   memcg: move charg...
4837
4838
4839
   *
   * Called with pte lock held.
   */
4ffef5fef   Daisuke Nishimura   memcg: move charg...
4840
4841
  union mc_target {
  	struct page	*page;
024914477   Daisuke Nishimura   memcg: move charg...
4842
  	swp_entry_t	ent;
4ffef5fef   Daisuke Nishimura   memcg: move charg...
4843
  };
4ffef5fef   Daisuke Nishimura   memcg: move charg...
4844
4845
4846
  enum mc_target_type {
  	MC_TARGET_NONE,	/* not used */
  	MC_TARGET_PAGE,
024914477   Daisuke Nishimura   memcg: move charg...
4847
  	MC_TARGET_SWAP,
4ffef5fef   Daisuke Nishimura   memcg: move charg...
4848
  };
90254a658   Daisuke Nishimura   memcg: clean up m...
4849
4850
  static struct page *mc_handle_present_pte(struct vm_area_struct *vma,
  						unsigned long addr, pte_t ptent)
4ffef5fef   Daisuke Nishimura   memcg: move charg...
4851
  {
90254a658   Daisuke Nishimura   memcg: clean up m...
4852
  	struct page *page = vm_normal_page(vma, addr, ptent);
4ffef5fef   Daisuke Nishimura   memcg: move charg...
4853

90254a658   Daisuke Nishimura   memcg: clean up m...
4854
4855
4856
4857
4858
4859
  	if (!page || !page_mapped(page))
  		return NULL;
  	if (PageAnon(page)) {
  		/* we don't move shared anon */
  		if (!move_anon() || page_mapcount(page) > 2)
  			return NULL;
87946a722   Daisuke Nishimura   memcg: move charg...
4860
4861
  	} else if (!move_file())
  		/* we ignore mapcount for file pages */
90254a658   Daisuke Nishimura   memcg: clean up m...
4862
4863
4864
4865
4866
4867
4868
4869
4870
4871
4872
4873
4874
4875
4876
4877
4878
4879
  		return NULL;
  	if (!get_page_unless_zero(page))
  		return NULL;
  
  	return page;
  }
  
  static struct page *mc_handle_swap_pte(struct vm_area_struct *vma,
  			unsigned long addr, pte_t ptent, swp_entry_t *entry)
  {
  	int usage_count;
  	struct page *page = NULL;
  	swp_entry_t ent = pte_to_swp_entry(ptent);
  
  	if (!move_anon() || non_swap_entry(ent))
  		return NULL;
  	usage_count = mem_cgroup_count_swap_user(ent, &page);
  	if (usage_count > 1) { /* we don't move shared anon */
024914477   Daisuke Nishimura   memcg: move charg...
4880
4881
  		if (page)
  			put_page(page);
90254a658   Daisuke Nishimura   memcg: clean up m...
4882
  		return NULL;
024914477   Daisuke Nishimura   memcg: move charg...
4883
  	}
90254a658   Daisuke Nishimura   memcg: clean up m...
4884
4885
4886
4887
4888
  	if (do_swap_account)
  		entry->val = ent.val;
  
  	return page;
  }
87946a722   Daisuke Nishimura   memcg: move charg...
4889
4890
4891
4892
4893
4894
4895
4896
4897
4898
4899
4900
4901
4902
4903
4904
4905
4906
4907
4908
4909
  static struct page *mc_handle_file_pte(struct vm_area_struct *vma,
  			unsigned long addr, pte_t ptent, swp_entry_t *entry)
  {
  	struct page *page = NULL;
  	struct inode *inode;
  	struct address_space *mapping;
  	pgoff_t pgoff;
  
  	if (!vma->vm_file) /* anonymous vma */
  		return NULL;
  	if (!move_file())
  		return NULL;
  
  	inode = vma->vm_file->f_path.dentry->d_inode;
  	mapping = vma->vm_file->f_mapping;
  	if (pte_none(ptent))
  		pgoff = linear_page_index(vma, addr);
  	else /* pte_file(ptent) is true */
  		pgoff = pte_to_pgoff(ptent);
  
  	/* page is moved even if it's not RSS of this task(page-faulted). */
aa3b18955   Hugh Dickins   tmpfs: convert me...
4910
4911
4912
4913
4914
4915
  	page = find_get_page(mapping, pgoff);
  
  #ifdef CONFIG_SWAP
  	/* shmem/tmpfs may report page out on swap: account for that too. */
  	if (radix_tree_exceptional_entry(page)) {
  		swp_entry_t swap = radix_to_swp_entry(page);
87946a722   Daisuke Nishimura   memcg: move charg...
4916
  		if (do_swap_account)
aa3b18955   Hugh Dickins   tmpfs: convert me...
4917
4918
  			*entry = swap;
  		page = find_get_page(&swapper_space, swap.val);
87946a722   Daisuke Nishimura   memcg: move charg...
4919
  	}
aa3b18955   Hugh Dickins   tmpfs: convert me...
4920
  #endif
87946a722   Daisuke Nishimura   memcg: move charg...
4921
4922
  	return page;
  }
90254a658   Daisuke Nishimura   memcg: clean up m...
4923
4924
4925
4926
4927
4928
4929
4930
4931
4932
4933
4934
  static int is_target_pte_for_mc(struct vm_area_struct *vma,
  		unsigned long addr, pte_t ptent, union mc_target *target)
  {
  	struct page *page = NULL;
  	struct page_cgroup *pc;
  	int ret = 0;
  	swp_entry_t ent = { .val = 0 };
  
  	if (pte_present(ptent))
  		page = mc_handle_present_pte(vma, addr, ptent);
  	else if (is_swap_pte(ptent))
  		page = mc_handle_swap_pte(vma, addr, ptent, &ent);
87946a722   Daisuke Nishimura   memcg: move charg...
4935
4936
  	else if (pte_none(ptent) || pte_file(ptent))
  		page = mc_handle_file_pte(vma, addr, ptent, &ent);
90254a658   Daisuke Nishimura   memcg: clean up m...
4937
4938
4939
  
  	if (!page && !ent.val)
  		return 0;
024914477   Daisuke Nishimura   memcg: move charg...
4940
4941
4942
4943
4944
4945
4946
4947
4948
4949
4950
4951
4952
4953
4954
  	if (page) {
  		pc = lookup_page_cgroup(page);
  		/*
  		 * Do only loose check w/o page_cgroup lock.
  		 * mem_cgroup_move_account() checks the pc is valid or not under
  		 * the lock.
  		 */
  		if (PageCgroupUsed(pc) && pc->mem_cgroup == mc.from) {
  			ret = MC_TARGET_PAGE;
  			if (target)
  				target->page = page;
  		}
  		if (!ret || !target)
  			put_page(page);
  	}
90254a658   Daisuke Nishimura   memcg: clean up m...
4955
4956
  	/* There is a swap entry and a page doesn't exist or isn't charged */
  	if (ent.val && !ret &&
7f0f15464   KAMEZAWA Hiroyuki   memcg: fix css_id...
4957
4958
4959
4960
  			css_id(&mc.from->css) == lookup_swap_cgroup(ent)) {
  		ret = MC_TARGET_SWAP;
  		if (target)
  			target->ent = ent;
4ffef5fef   Daisuke Nishimura   memcg: move charg...
4961
  	}
4ffef5fef   Daisuke Nishimura   memcg: move charg...
4962
4963
4964
4965
4966
4967
4968
4969
4970
4971
  	return ret;
  }
  
  static int mem_cgroup_count_precharge_pte_range(pmd_t *pmd,
  					unsigned long addr, unsigned long end,
  					struct mm_walk *walk)
  {
  	struct vm_area_struct *vma = walk->private;
  	pte_t *pte;
  	spinlock_t *ptl;
033193275   Dave Hansen   pagewalk: only sp...
4972
  	split_huge_page_pmd(walk->mm, pmd);
4ffef5fef   Daisuke Nishimura   memcg: move charg...
4973
4974
4975
4976
4977
4978
  	pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
  	for (; addr != end; pte++, addr += PAGE_SIZE)
  		if (is_target_pte_for_mc(vma, addr, *pte, NULL))
  			mc.precharge++;	/* increment precharge temporarily */
  	pte_unmap_unlock(pte - 1, ptl);
  	cond_resched();
7dc74be03   Daisuke Nishimura   memcg: add interf...
4979
4980
  	return 0;
  }
4ffef5fef   Daisuke Nishimura   memcg: move charg...
4981
4982
4983
4984
  static unsigned long mem_cgroup_count_precharge(struct mm_struct *mm)
  {
  	unsigned long precharge;
  	struct vm_area_struct *vma;
dfe076b09   Daisuke Nishimura   memcg: fix deadlo...
4985
  	down_read(&mm->mmap_sem);
4ffef5fef   Daisuke Nishimura   memcg: move charg...
4986
4987
4988
4989
4990
4991
4992
4993
  	for (vma = mm->mmap; vma; vma = vma->vm_next) {
  		struct mm_walk mem_cgroup_count_precharge_walk = {
  			.pmd_entry = mem_cgroup_count_precharge_pte_range,
  			.mm = mm,
  			.private = vma,
  		};
  		if (is_vm_hugetlb_page(vma))
  			continue;
4ffef5fef   Daisuke Nishimura   memcg: move charg...
4994
4995
4996
  		walk_page_range(vma->vm_start, vma->vm_end,
  					&mem_cgroup_count_precharge_walk);
  	}
dfe076b09   Daisuke Nishimura   memcg: fix deadlo...
4997
  	up_read(&mm->mmap_sem);
4ffef5fef   Daisuke Nishimura   memcg: move charg...
4998
4999
5000
5001
5002
5003
  
  	precharge = mc.precharge;
  	mc.precharge = 0;
  
  	return precharge;
  }
4ffef5fef   Daisuke Nishimura   memcg: move charg...
5004
5005
  static int mem_cgroup_precharge_mc(struct mm_struct *mm)
  {
dfe076b09   Daisuke Nishimura   memcg: fix deadlo...
5006
5007
5008
5009
5010
  	unsigned long precharge = mem_cgroup_count_precharge(mm);
  
  	VM_BUG_ON(mc.moving_task);
  	mc.moving_task = current;
  	return mem_cgroup_do_precharge(precharge);
4ffef5fef   Daisuke Nishimura   memcg: move charg...
5011
  }
dfe076b09   Daisuke Nishimura   memcg: fix deadlo...
5012
5013
  /* cancels all extra charges on mc.from and mc.to, and wakes up all waiters. */
  static void __mem_cgroup_clear_mc(void)
4ffef5fef   Daisuke Nishimura   memcg: move charg...
5014
  {
2bd9bb206   KAMEZAWA Hiroyuki   memcg: clean up w...
5015
5016
  	struct mem_cgroup *from = mc.from;
  	struct mem_cgroup *to = mc.to;
4ffef5fef   Daisuke Nishimura   memcg: move charg...
5017
  	/* we must uncharge all the leftover precharges from mc.to */
854ffa8d1   Daisuke Nishimura   memcg: improve pe...
5018
5019
5020
5021
5022
5023
5024
5025
5026
5027
5028
  	if (mc.precharge) {
  		__mem_cgroup_cancel_charge(mc.to, mc.precharge);
  		mc.precharge = 0;
  	}
  	/*
  	 * we didn't uncharge from mc.from at mem_cgroup_move_account(), so
  	 * we must uncharge here.
  	 */
  	if (mc.moved_charge) {
  		__mem_cgroup_cancel_charge(mc.from, mc.moved_charge);
  		mc.moved_charge = 0;
4ffef5fef   Daisuke Nishimura   memcg: move charg...
5029
  	}
483c30b51   Daisuke Nishimura   memcg: improve pe...
5030
5031
  	/* we must fixup refcnts and charges */
  	if (mc.moved_swap) {
483c30b51   Daisuke Nishimura   memcg: improve pe...
5032
5033
5034
5035
5036
5037
5038
5039
5040
5041
5042
5043
5044
  		/* uncharge swap account from the old cgroup */
  		if (!mem_cgroup_is_root(mc.from))
  			res_counter_uncharge(&mc.from->memsw,
  						PAGE_SIZE * mc.moved_swap);
  		__mem_cgroup_put(mc.from, mc.moved_swap);
  
  		if (!mem_cgroup_is_root(mc.to)) {
  			/*
  			 * we charged both to->res and to->memsw, so we should
  			 * uncharge to->res.
  			 */
  			res_counter_uncharge(&mc.to->res,
  						PAGE_SIZE * mc.moved_swap);
483c30b51   Daisuke Nishimura   memcg: improve pe...
5045
5046
  		}
  		/* we've already done mem_cgroup_get(mc.to) */
483c30b51   Daisuke Nishimura   memcg: improve pe...
5047
5048
  		mc.moved_swap = 0;
  	}
dfe076b09   Daisuke Nishimura   memcg: fix deadlo...
5049
5050
5051
5052
5053
5054
5055
5056
5057
5058
5059
5060
5061
5062
5063
  	memcg_oom_recover(from);
  	memcg_oom_recover(to);
  	wake_up_all(&mc.waitq);
  }
  
  static void mem_cgroup_clear_mc(void)
  {
  	struct mem_cgroup *from = mc.from;
  
  	/*
  	 * we must clear moving_task before waking up waiters at the end of
  	 * task migration.
  	 */
  	mc.moving_task = NULL;
  	__mem_cgroup_clear_mc();
2bd9bb206   KAMEZAWA Hiroyuki   memcg: clean up w...
5064
  	spin_lock(&mc.lock);
4ffef5fef   Daisuke Nishimura   memcg: move charg...
5065
5066
  	mc.from = NULL;
  	mc.to = NULL;
2bd9bb206   KAMEZAWA Hiroyuki   memcg: clean up w...
5067
  	spin_unlock(&mc.lock);
32047e2a8   KAMEZAWA Hiroyuki   memcg: avoid lock...
5068
  	mem_cgroup_end_move(from);
4ffef5fef   Daisuke Nishimura   memcg: move charg...
5069
  }
7dc74be03   Daisuke Nishimura   memcg: add interf...
5070
5071
  static int mem_cgroup_can_attach(struct cgroup_subsys *ss,
  				struct cgroup *cgroup,
f780bdb7c   Ben Blum   cgroups: add per-...
5072
  				struct task_struct *p)
7dc74be03   Daisuke Nishimura   memcg: add interf...
5073
5074
5075
5076
5077
5078
5079
5080
5081
5082
5083
5084
5085
  {
  	int ret = 0;
  	struct mem_cgroup *mem = mem_cgroup_from_cont(cgroup);
  
  	if (mem->move_charge_at_immigrate) {
  		struct mm_struct *mm;
  		struct mem_cgroup *from = mem_cgroup_from_task(p);
  
  		VM_BUG_ON(from == mem);
  
  		mm = get_task_mm(p);
  		if (!mm)
  			return 0;
7dc74be03   Daisuke Nishimura   memcg: add interf...
5086
  		/* We move charges only when we move a owner of the mm */
4ffef5fef   Daisuke Nishimura   memcg: move charg...
5087
5088
5089
5090
  		if (mm->owner == p) {
  			VM_BUG_ON(mc.from);
  			VM_BUG_ON(mc.to);
  			VM_BUG_ON(mc.precharge);
854ffa8d1   Daisuke Nishimura   memcg: improve pe...
5091
  			VM_BUG_ON(mc.moved_charge);
483c30b51   Daisuke Nishimura   memcg: improve pe...
5092
  			VM_BUG_ON(mc.moved_swap);
32047e2a8   KAMEZAWA Hiroyuki   memcg: avoid lock...
5093
  			mem_cgroup_start_move(from);
2bd9bb206   KAMEZAWA Hiroyuki   memcg: clean up w...
5094
  			spin_lock(&mc.lock);
4ffef5fef   Daisuke Nishimura   memcg: move charg...
5095
5096
  			mc.from = from;
  			mc.to = mem;
2bd9bb206   KAMEZAWA Hiroyuki   memcg: clean up w...
5097
  			spin_unlock(&mc.lock);
dfe076b09   Daisuke Nishimura   memcg: fix deadlo...
5098
  			/* We set mc.moving_task later */
4ffef5fef   Daisuke Nishimura   memcg: move charg...
5099
5100
5101
5102
  
  			ret = mem_cgroup_precharge_mc(mm);
  			if (ret)
  				mem_cgroup_clear_mc();
dfe076b09   Daisuke Nishimura   memcg: fix deadlo...
5103
5104
  		}
  		mmput(mm);
7dc74be03   Daisuke Nishimura   memcg: add interf...
5105
5106
5107
5108
5109
5110
  	}
  	return ret;
  }
  
  static void mem_cgroup_cancel_attach(struct cgroup_subsys *ss,
  				struct cgroup *cgroup,
f780bdb7c   Ben Blum   cgroups: add per-...
5111
  				struct task_struct *p)
7dc74be03   Daisuke Nishimura   memcg: add interf...
5112
  {
4ffef5fef   Daisuke Nishimura   memcg: move charg...
5113
  	mem_cgroup_clear_mc();
7dc74be03   Daisuke Nishimura   memcg: add interf...
5114
  }
4ffef5fef   Daisuke Nishimura   memcg: move charg...
5115
5116
5117
  static int mem_cgroup_move_charge_pte_range(pmd_t *pmd,
  				unsigned long addr, unsigned long end,
  				struct mm_walk *walk)
7dc74be03   Daisuke Nishimura   memcg: add interf...
5118
  {
4ffef5fef   Daisuke Nishimura   memcg: move charg...
5119
5120
5121
5122
  	int ret = 0;
  	struct vm_area_struct *vma = walk->private;
  	pte_t *pte;
  	spinlock_t *ptl;
033193275   Dave Hansen   pagewalk: only sp...
5123
  	split_huge_page_pmd(walk->mm, pmd);
4ffef5fef   Daisuke Nishimura   memcg: move charg...
5124
5125
5126
5127
5128
5129
5130
5131
  retry:
  	pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
  	for (; addr != end; addr += PAGE_SIZE) {
  		pte_t ptent = *(pte++);
  		union mc_target target;
  		int type;
  		struct page *page;
  		struct page_cgroup *pc;
024914477   Daisuke Nishimura   memcg: move charg...
5132
  		swp_entry_t ent;
4ffef5fef   Daisuke Nishimura   memcg: move charg...
5133
5134
5135
5136
5137
5138
5139
5140
5141
5142
5143
  
  		if (!mc.precharge)
  			break;
  
  		type = is_target_pte_for_mc(vma, addr, ptent, &target);
  		switch (type) {
  		case MC_TARGET_PAGE:
  			page = target.page;
  			if (isolate_lru_page(page))
  				goto put;
  			pc = lookup_page_cgroup(page);
7ec99d621   Johannes Weiner   memcg: unify char...
5144
5145
  			if (!mem_cgroup_move_account(page, 1, pc,
  						     mc.from, mc.to, false)) {
4ffef5fef   Daisuke Nishimura   memcg: move charg...
5146
  				mc.precharge--;
854ffa8d1   Daisuke Nishimura   memcg: improve pe...
5147
5148
  				/* we uncharge from mc.from later. */
  				mc.moved_charge++;
4ffef5fef   Daisuke Nishimura   memcg: move charg...
5149
5150
5151
5152
5153
  			}
  			putback_lru_page(page);
  put:			/* is_target_pte_for_mc() gets the page */
  			put_page(page);
  			break;
024914477   Daisuke Nishimura   memcg: move charg...
5154
5155
  		case MC_TARGET_SWAP:
  			ent = target.ent;
483c30b51   Daisuke Nishimura   memcg: improve pe...
5156
5157
  			if (!mem_cgroup_move_swap_account(ent,
  						mc.from, mc.to, false)) {
024914477   Daisuke Nishimura   memcg: move charg...
5158
  				mc.precharge--;
483c30b51   Daisuke Nishimura   memcg: improve pe...
5159
5160
5161
  				/* we fixup refcnts and charges later. */
  				mc.moved_swap++;
  			}
024914477   Daisuke Nishimura   memcg: move charg...
5162
  			break;
4ffef5fef   Daisuke Nishimura   memcg: move charg...
5163
5164
5165
5166
5167
5168
5169
5170
5171
5172
5173
5174
5175
5176
  		default:
  			break;
  		}
  	}
  	pte_unmap_unlock(pte - 1, ptl);
  	cond_resched();
  
  	if (addr != end) {
  		/*
  		 * We have consumed all precharges we got in can_attach().
  		 * We try charge one by one, but don't do any additional
  		 * charges to mc.to if we have failed in charge once in attach()
  		 * phase.
  		 */
854ffa8d1   Daisuke Nishimura   memcg: improve pe...
5177
  		ret = mem_cgroup_do_precharge(1);
4ffef5fef   Daisuke Nishimura   memcg: move charg...
5178
5179
5180
5181
5182
5183
5184
5185
5186
5187
5188
5189
  		if (!ret)
  			goto retry;
  	}
  
  	return ret;
  }
  
  static void mem_cgroup_move_charge(struct mm_struct *mm)
  {
  	struct vm_area_struct *vma;
  
  	lru_add_drain_all();
dfe076b09   Daisuke Nishimura   memcg: fix deadlo...
5190
5191
5192
5193
5194
5195
5196
5197
5198
5199
5200
5201
5202
  retry:
  	if (unlikely(!down_read_trylock(&mm->mmap_sem))) {
  		/*
  		 * Someone who are holding the mmap_sem might be waiting in
  		 * waitq. So we cancel all extra charges, wake up all waiters,
  		 * and retry. Because we cancel precharges, we might not be able
  		 * to move enough charges, but moving charge is a best-effort
  		 * feature anyway, so it wouldn't be a big problem.
  		 */
  		__mem_cgroup_clear_mc();
  		cond_resched();
  		goto retry;
  	}
4ffef5fef   Daisuke Nishimura   memcg: move charg...
5203
5204
5205
5206
5207
5208
5209
5210
5211
  	for (vma = mm->mmap; vma; vma = vma->vm_next) {
  		int ret;
  		struct mm_walk mem_cgroup_move_charge_walk = {
  			.pmd_entry = mem_cgroup_move_charge_pte_range,
  			.mm = mm,
  			.private = vma,
  		};
  		if (is_vm_hugetlb_page(vma))
  			continue;
4ffef5fef   Daisuke Nishimura   memcg: move charg...
5212
5213
5214
5215
5216
5217
5218
5219
5220
  		ret = walk_page_range(vma->vm_start, vma->vm_end,
  						&mem_cgroup_move_charge_walk);
  		if (ret)
  			/*
  			 * means we have consumed all precharges and failed in
  			 * doing additional charge. Just abandon here.
  			 */
  			break;
  	}
dfe076b09   Daisuke Nishimura   memcg: fix deadlo...
5221
  	up_read(&mm->mmap_sem);
7dc74be03   Daisuke Nishimura   memcg: add interf...
5222
  }
67e465a77   Balbir Singh   Memory controller...
5223
5224
5225
  static void mem_cgroup_move_task(struct cgroup_subsys *ss,
  				struct cgroup *cont,
  				struct cgroup *old_cont,
f780bdb7c   Ben Blum   cgroups: add per-...
5226
  				struct task_struct *p)
67e465a77   Balbir Singh   Memory controller...
5227
  {
a433658c3   KOSAKI Motohiro   vmscan,memcg: mem...
5228
  	struct mm_struct *mm = get_task_mm(p);
dfe076b09   Daisuke Nishimura   memcg: fix deadlo...
5229

dfe076b09   Daisuke Nishimura   memcg: fix deadlo...
5230
  	if (mm) {
a433658c3   KOSAKI Motohiro   vmscan,memcg: mem...
5231
5232
5233
  		if (mc.to)
  			mem_cgroup_move_charge(mm);
  		put_swap_token(mm);
dfe076b09   Daisuke Nishimura   memcg: fix deadlo...
5234
5235
  		mmput(mm);
  	}
a433658c3   KOSAKI Motohiro   vmscan,memcg: mem...
5236
5237
  	if (mc.to)
  		mem_cgroup_clear_mc();
67e465a77   Balbir Singh   Memory controller...
5238
  }
5cfb80a73   Daisuke Nishimura   memcg: disable mo...
5239
5240
5241
  #else	/* !CONFIG_MMU */
  static int mem_cgroup_can_attach(struct cgroup_subsys *ss,
  				struct cgroup *cgroup,
f780bdb7c   Ben Blum   cgroups: add per-...
5242
  				struct task_struct *p)
5cfb80a73   Daisuke Nishimura   memcg: disable mo...
5243
5244
5245
5246
5247
  {
  	return 0;
  }
  static void mem_cgroup_cancel_attach(struct cgroup_subsys *ss,
  				struct cgroup *cgroup,
f780bdb7c   Ben Blum   cgroups: add per-...
5248
  				struct task_struct *p)
5cfb80a73   Daisuke Nishimura   memcg: disable mo...
5249
5250
5251
5252
5253
  {
  }
  static void mem_cgroup_move_task(struct cgroup_subsys *ss,
  				struct cgroup *cont,
  				struct cgroup *old_cont,
f780bdb7c   Ben Blum   cgroups: add per-...
5254
  				struct task_struct *p)
5cfb80a73   Daisuke Nishimura   memcg: disable mo...
5255
5256
5257
  {
  }
  #endif
67e465a77   Balbir Singh   Memory controller...
5258

8cdea7c05   Balbir Singh   Memory controller...
5259
5260
5261
5262
  struct cgroup_subsys mem_cgroup_subsys = {
  	.name = "memory",
  	.subsys_id = mem_cgroup_subsys_id,
  	.create = mem_cgroup_create,
df878fb04   KAMEZAWA Hiroyuki   memory cgroup enh...
5263
  	.pre_destroy = mem_cgroup_pre_destroy,
8cdea7c05   Balbir Singh   Memory controller...
5264
5265
  	.destroy = mem_cgroup_destroy,
  	.populate = mem_cgroup_populate,
7dc74be03   Daisuke Nishimura   memcg: add interf...
5266
5267
  	.can_attach = mem_cgroup_can_attach,
  	.cancel_attach = mem_cgroup_cancel_attach,
67e465a77   Balbir Singh   Memory controller...
5268
  	.attach = mem_cgroup_move_task,
6d12e2d8d   KAMEZAWA Hiroyuki   per-zone and recl...
5269
  	.early_init = 0,
04046e1a0   KAMEZAWA Hiroyuki   memcg: use CSS ID
5270
  	.use_id = 1,
8cdea7c05   Balbir Singh   Memory controller...
5271
  };
c077719be   KAMEZAWA Hiroyuki   memcg: mem+swap c...
5272
5273
  
  #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
a42c390cf   Michal Hocko   cgroups: make swa...
5274
5275
5276
  static int __init enable_swap_account(char *s)
  {
  	/* consider enabled if no parameter or 1 is given */
a2c8990ae   Michal Hocko   memsw: remove nos...
5277
  	if (!strcmp(s, "1"))
a42c390cf   Michal Hocko   cgroups: make swa...
5278
  		really_do_swap_account = 1;
a2c8990ae   Michal Hocko   memsw: remove nos...
5279
  	else if (!strcmp(s, "0"))
a42c390cf   Michal Hocko   cgroups: make swa...
5280
5281
5282
  		really_do_swap_account = 0;
  	return 1;
  }
a2c8990ae   Michal Hocko   memsw: remove nos...
5283
  __setup("swapaccount=", enable_swap_account);
c077719be   KAMEZAWA Hiroyuki   memcg: mem+swap c...
5284

c077719be   KAMEZAWA Hiroyuki   memcg: mem+swap c...
5285
  #endif