Blame view

mm/list_lru.c 14.6 KB
a38e40824   Dave Chinner   list: add a new L...
1
2
3
4
5
6
7
8
  /*
   * Copyright (c) 2013 Red Hat, Inc. and Parallels Inc. All rights reserved.
   * Authors: David Chinner and Glauber Costa
   *
   * Generic LRU infrastructure
   */
  #include <linux/kernel.h>
  #include <linux/module.h>
3b1d58a4c   Dave Chinner   list_lru: per-nod...
9
  #include <linux/mm.h>
a38e40824   Dave Chinner   list: add a new L...
10
  #include <linux/list_lru.h>
5ca302c8e   Glauber Costa   list_lru: dynamic...
11
  #include <linux/slab.h>
c0a5b5609   Vladimir Davydov   list_lru: organiz...
12
  #include <linux/mutex.h>
60d3fd32a   Vladimir Davydov   list_lru: introdu...
13
  #include <linux/memcontrol.h>
c0a5b5609   Vladimir Davydov   list_lru: organiz...
14

84c07d11a   Kirill Tkhai   mm: introduce CON...
15
  #ifdef CONFIG_MEMCG_KMEM
c0a5b5609   Vladimir Davydov   list_lru: organiz...
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
  static LIST_HEAD(list_lrus);
  static DEFINE_MUTEX(list_lrus_mutex);
  
  static void list_lru_register(struct list_lru *lru)
  {
  	mutex_lock(&list_lrus_mutex);
  	list_add(&lru->list, &list_lrus);
  	mutex_unlock(&list_lrus_mutex);
  }
  
  static void list_lru_unregister(struct list_lru *lru)
  {
  	mutex_lock(&list_lrus_mutex);
  	list_del(&lru->list);
  	mutex_unlock(&list_lrus_mutex);
  }
c0a5b5609   Vladimir Davydov   list_lru: organiz...
32

fae91d6d8   Kirill Tkhai   mm/list_lru.c: se...
33
34
35
36
  static int lru_shrinker_id(struct list_lru *lru)
  {
  	return lru->shrinker_id;
  }
60d3fd32a   Vladimir Davydov   list_lru: introdu...
37
38
  static inline bool list_lru_memcg_aware(struct list_lru *lru)
  {
145949a13   Raghavendra K T   mm/list_lru.c: re...
39
40
41
42
  	/*
  	 * This needs node 0 to be always present, even
  	 * in the systems supporting sparse numa ids.
  	 */
60d3fd32a   Vladimir Davydov   list_lru: introdu...
43
44
45
46
47
48
  	return !!lru->node[0].memcg_lrus;
  }
  
  static inline struct list_lru_one *
  list_lru_from_memcg_idx(struct list_lru_node *nlru, int idx)
  {
0c7c1bed7   Kirill Tkhai   mm: make counting...
49
  	struct list_lru_memcg *memcg_lrus;
60d3fd32a   Vladimir Davydov   list_lru: introdu...
50
  	/*
0c7c1bed7   Kirill Tkhai   mm: make counting...
51
52
  	 * Either lock or RCU protects the array of per cgroup lists
  	 * from relocation (see memcg_update_list_lru_node).
60d3fd32a   Vladimir Davydov   list_lru: introdu...
53
  	 */
0c7c1bed7   Kirill Tkhai   mm: make counting...
54
55
56
57
  	memcg_lrus = rcu_dereference_check(nlru->memcg_lrus,
  					   lockdep_is_held(&nlru->lock));
  	if (memcg_lrus && idx >= 0)
  		return memcg_lrus->lru[idx];
60d3fd32a   Vladimir Davydov   list_lru: introdu...
58
59
  	return &nlru->lru;
  }
df4065516   Vladimir Davydov   memcg: simplify a...
60
61
62
63
64
65
66
67
68
  static __always_inline struct mem_cgroup *mem_cgroup_from_kmem(void *ptr)
  {
  	struct page *page;
  
  	if (!memcg_kmem_enabled())
  		return NULL;
  	page = virt_to_head_page(ptr);
  	return page->mem_cgroup;
  }
60d3fd32a   Vladimir Davydov   list_lru: introdu...
69
  static inline struct list_lru_one *
44bd4a475   Kirill Tkhai   mm/list_lru.c: ad...
70
71
  list_lru_from_kmem(struct list_lru_node *nlru, void *ptr,
  		   struct mem_cgroup **memcg_ptr)
60d3fd32a   Vladimir Davydov   list_lru: introdu...
72
  {
44bd4a475   Kirill Tkhai   mm/list_lru.c: ad...
73
74
  	struct list_lru_one *l = &nlru->lru;
  	struct mem_cgroup *memcg = NULL;
60d3fd32a   Vladimir Davydov   list_lru: introdu...
75
76
  
  	if (!nlru->memcg_lrus)
44bd4a475   Kirill Tkhai   mm/list_lru.c: ad...
77
  		goto out;
60d3fd32a   Vladimir Davydov   list_lru: introdu...
78
79
80
  
  	memcg = mem_cgroup_from_kmem(ptr);
  	if (!memcg)
44bd4a475   Kirill Tkhai   mm/list_lru.c: ad...
81
  		goto out;
60d3fd32a   Vladimir Davydov   list_lru: introdu...
82

44bd4a475   Kirill Tkhai   mm/list_lru.c: ad...
83
84
85
86
87
  	l = list_lru_from_memcg_idx(nlru, memcg_cache_id(memcg));
  out:
  	if (memcg_ptr)
  		*memcg_ptr = memcg;
  	return l;
60d3fd32a   Vladimir Davydov   list_lru: introdu...
88
89
  }
  #else
e0295238e   Kirill Tkhai   mm/list_lru.c: co...
90
91
92
93
94
95
96
  static void list_lru_register(struct list_lru *lru)
  {
  }
  
  static void list_lru_unregister(struct list_lru *lru)
  {
  }
fae91d6d8   Kirill Tkhai   mm/list_lru.c: se...
97
98
99
100
  static int lru_shrinker_id(struct list_lru *lru)
  {
  	return -1;
  }
60d3fd32a   Vladimir Davydov   list_lru: introdu...
101
102
103
104
105
106
107
108
109
110
111
112
  static inline bool list_lru_memcg_aware(struct list_lru *lru)
  {
  	return false;
  }
  
  static inline struct list_lru_one *
  list_lru_from_memcg_idx(struct list_lru_node *nlru, int idx)
  {
  	return &nlru->lru;
  }
  
  static inline struct list_lru_one *
44bd4a475   Kirill Tkhai   mm/list_lru.c: ad...
113
114
  list_lru_from_kmem(struct list_lru_node *nlru, void *ptr,
  		   struct mem_cgroup **memcg_ptr)
60d3fd32a   Vladimir Davydov   list_lru: introdu...
115
  {
44bd4a475   Kirill Tkhai   mm/list_lru.c: ad...
116
117
  	if (memcg_ptr)
  		*memcg_ptr = NULL;
60d3fd32a   Vladimir Davydov   list_lru: introdu...
118
119
  	return &nlru->lru;
  }
84c07d11a   Kirill Tkhai   mm: introduce CON...
120
  #endif /* CONFIG_MEMCG_KMEM */
60d3fd32a   Vladimir Davydov   list_lru: introdu...
121

a38e40824   Dave Chinner   list: add a new L...
122
123
  bool list_lru_add(struct list_lru *lru, struct list_head *item)
  {
3b1d58a4c   Dave Chinner   list_lru: per-nod...
124
125
  	int nid = page_to_nid(virt_to_page(item));
  	struct list_lru_node *nlru = &lru->node[nid];
fae91d6d8   Kirill Tkhai   mm/list_lru.c: se...
126
  	struct mem_cgroup *memcg;
60d3fd32a   Vladimir Davydov   list_lru: introdu...
127
  	struct list_lru_one *l;
3b1d58a4c   Dave Chinner   list_lru: per-nod...
128
129
  
  	spin_lock(&nlru->lock);
a38e40824   Dave Chinner   list: add a new L...
130
  	if (list_empty(item)) {
fae91d6d8   Kirill Tkhai   mm/list_lru.c: se...
131
  		l = list_lru_from_kmem(nlru, item, &memcg);
60d3fd32a   Vladimir Davydov   list_lru: introdu...
132
  		list_add_tail(item, &l->list);
fae91d6d8   Kirill Tkhai   mm/list_lru.c: se...
133
134
135
136
  		/* Set shrinker bit if the first element was added */
  		if (!l->nr_items++)
  			memcg_set_shrinker_bit(memcg, nid,
  					       lru_shrinker_id(lru));
2c80cd57c   Sahitya Tummala   mm/list_lru.c: fi...
137
  		nlru->nr_items++;
3b1d58a4c   Dave Chinner   list_lru: per-nod...
138
  		spin_unlock(&nlru->lock);
a38e40824   Dave Chinner   list: add a new L...
139
140
  		return true;
  	}
3b1d58a4c   Dave Chinner   list_lru: per-nod...
141
  	spin_unlock(&nlru->lock);
a38e40824   Dave Chinner   list: add a new L...
142
143
144
145
146
147
  	return false;
  }
  EXPORT_SYMBOL_GPL(list_lru_add);
  
  bool list_lru_del(struct list_lru *lru, struct list_head *item)
  {
3b1d58a4c   Dave Chinner   list_lru: per-nod...
148
149
  	int nid = page_to_nid(virt_to_page(item));
  	struct list_lru_node *nlru = &lru->node[nid];
60d3fd32a   Vladimir Davydov   list_lru: introdu...
150
  	struct list_lru_one *l;
3b1d58a4c   Dave Chinner   list_lru: per-nod...
151
152
  
  	spin_lock(&nlru->lock);
a38e40824   Dave Chinner   list: add a new L...
153
  	if (!list_empty(item)) {
44bd4a475   Kirill Tkhai   mm/list_lru.c: ad...
154
  		l = list_lru_from_kmem(nlru, item, NULL);
a38e40824   Dave Chinner   list: add a new L...
155
  		list_del_init(item);
60d3fd32a   Vladimir Davydov   list_lru: introdu...
156
  		l->nr_items--;
2c80cd57c   Sahitya Tummala   mm/list_lru.c: fi...
157
  		nlru->nr_items--;
3b1d58a4c   Dave Chinner   list_lru: per-nod...
158
  		spin_unlock(&nlru->lock);
a38e40824   Dave Chinner   list: add a new L...
159
160
  		return true;
  	}
3b1d58a4c   Dave Chinner   list_lru: per-nod...
161
  	spin_unlock(&nlru->lock);
a38e40824   Dave Chinner   list: add a new L...
162
163
164
  	return false;
  }
  EXPORT_SYMBOL_GPL(list_lru_del);
3f97b1632   Vladimir Davydov   list_lru: add hel...
165
166
167
168
169
170
171
172
173
174
175
176
177
178
  void list_lru_isolate(struct list_lru_one *list, struct list_head *item)
  {
  	list_del_init(item);
  	list->nr_items--;
  }
  EXPORT_SYMBOL_GPL(list_lru_isolate);
  
  void list_lru_isolate_move(struct list_lru_one *list, struct list_head *item,
  			   struct list_head *head)
  {
  	list_move(item, head);
  	list->nr_items--;
  }
  EXPORT_SYMBOL_GPL(list_lru_isolate_move);
930eaac5e   Andrew Morton   mm/list_lru.c: fo...
179
180
  unsigned long list_lru_count_one(struct list_lru *lru,
  				 int nid, struct mem_cgroup *memcg)
a38e40824   Dave Chinner   list: add a new L...
181
  {
6a4f496fd   Glauber Costa   list_lru: per-nod...
182
  	struct list_lru_node *nlru = &lru->node[nid];
60d3fd32a   Vladimir Davydov   list_lru: introdu...
183
184
  	struct list_lru_one *l;
  	unsigned long count;
3b1d58a4c   Dave Chinner   list_lru: per-nod...
185

0c7c1bed7   Kirill Tkhai   mm: make counting...
186
  	rcu_read_lock();
930eaac5e   Andrew Morton   mm/list_lru.c: fo...
187
  	l = list_lru_from_memcg_idx(nlru, memcg_cache_id(memcg));
60d3fd32a   Vladimir Davydov   list_lru: introdu...
188
  	count = l->nr_items;
0c7c1bed7   Kirill Tkhai   mm: make counting...
189
  	rcu_read_unlock();
3b1d58a4c   Dave Chinner   list_lru: per-nod...
190
191
192
  
  	return count;
  }
60d3fd32a   Vladimir Davydov   list_lru: introdu...
193
194
195
196
  EXPORT_SYMBOL_GPL(list_lru_count_one);
  
  unsigned long list_lru_count_node(struct list_lru *lru, int nid)
  {
2c80cd57c   Sahitya Tummala   mm/list_lru.c: fi...
197
  	struct list_lru_node *nlru;
60d3fd32a   Vladimir Davydov   list_lru: introdu...
198

2c80cd57c   Sahitya Tummala   mm/list_lru.c: fi...
199
200
  	nlru = &lru->node[nid];
  	return nlru->nr_items;
60d3fd32a   Vladimir Davydov   list_lru: introdu...
201
  }
6a4f496fd   Glauber Costa   list_lru: per-nod...
202
  EXPORT_SYMBOL_GPL(list_lru_count_node);
3b1d58a4c   Dave Chinner   list_lru: per-nod...
203

60d3fd32a   Vladimir Davydov   list_lru: introdu...
204
  static unsigned long
6e018968f   Sebastian Andrzej Siewior   mm/list_lru.c: pa...
205
  __list_lru_walk_one(struct list_lru_node *nlru, int memcg_idx,
60d3fd32a   Vladimir Davydov   list_lru: introdu...
206
207
  		    list_lru_walk_cb isolate, void *cb_arg,
  		    unsigned long *nr_to_walk)
3b1d58a4c   Dave Chinner   list_lru: per-nod...
208
  {
60d3fd32a   Vladimir Davydov   list_lru: introdu...
209
  	struct list_lru_one *l;
a38e40824   Dave Chinner   list: add a new L...
210
  	struct list_head *item, *n;
3b1d58a4c   Dave Chinner   list_lru: per-nod...
211
  	unsigned long isolated = 0;
a38e40824   Dave Chinner   list: add a new L...
212

60d3fd32a   Vladimir Davydov   list_lru: introdu...
213
  	l = list_lru_from_memcg_idx(nlru, memcg_idx);
a38e40824   Dave Chinner   list: add a new L...
214
  restart:
60d3fd32a   Vladimir Davydov   list_lru: introdu...
215
  	list_for_each_safe(item, n, &l->list) {
a38e40824   Dave Chinner   list: add a new L...
216
  		enum lru_status ret;
5cedf721a   Dave Chinner   list_lru: fix bro...
217
218
219
220
221
  
  		/*
  		 * decrement nr_to_walk first so that we don't livelock if we
  		 * get stuck on large numbesr of LRU_RETRY items
  		 */
c56b097af   Russell King   mm: list_lru: fix...
222
  		if (!*nr_to_walk)
5cedf721a   Dave Chinner   list_lru: fix bro...
223
  			break;
c56b097af   Russell King   mm: list_lru: fix...
224
  		--*nr_to_walk;
5cedf721a   Dave Chinner   list_lru: fix bro...
225

3f97b1632   Vladimir Davydov   list_lru: add hel...
226
  		ret = isolate(item, l, &nlru->lock, cb_arg);
a38e40824   Dave Chinner   list: add a new L...
227
  		switch (ret) {
449dd6984   Johannes Weiner   mm: keep page cac...
228
229
  		case LRU_REMOVED_RETRY:
  			assert_spin_locked(&nlru->lock);
5b568acc3   Gustavo A. R. Silva   mm/list_lru.c: ma...
230
  			/* fall through */
a38e40824   Dave Chinner   list: add a new L...
231
  		case LRU_REMOVED:
3b1d58a4c   Dave Chinner   list_lru: per-nod...
232
  			isolated++;
2c80cd57c   Sahitya Tummala   mm/list_lru.c: fi...
233
  			nlru->nr_items--;
449dd6984   Johannes Weiner   mm: keep page cac...
234
235
236
237
238
239
240
  			/*
  			 * If the lru lock has been dropped, our list
  			 * traversal is now invalid and so we have to
  			 * restart from scratch.
  			 */
  			if (ret == LRU_REMOVED_RETRY)
  				goto restart;
a38e40824   Dave Chinner   list: add a new L...
241
242
  			break;
  		case LRU_ROTATE:
60d3fd32a   Vladimir Davydov   list_lru: introdu...
243
  			list_move_tail(item, &l->list);
a38e40824   Dave Chinner   list: add a new L...
244
245
246
247
  			break;
  		case LRU_SKIP:
  			break;
  		case LRU_RETRY:
5cedf721a   Dave Chinner   list_lru: fix bro...
248
249
250
251
  			/*
  			 * The lru lock has been dropped, our list traversal is
  			 * now invalid and so we have to restart from scratch.
  			 */
449dd6984   Johannes Weiner   mm: keep page cac...
252
  			assert_spin_locked(&nlru->lock);
a38e40824   Dave Chinner   list: add a new L...
253
254
255
256
  			goto restart;
  		default:
  			BUG();
  		}
a38e40824   Dave Chinner   list: add a new L...
257
  	}
3b1d58a4c   Dave Chinner   list_lru: per-nod...
258
259
  	return isolated;
  }
60d3fd32a   Vladimir Davydov   list_lru: introdu...
260
261
262
263
264
265
  
  unsigned long
  list_lru_walk_one(struct list_lru *lru, int nid, struct mem_cgroup *memcg,
  		  list_lru_walk_cb isolate, void *cb_arg,
  		  unsigned long *nr_to_walk)
  {
6cfe57a96   Sebastian Andrzej Siewior   mm/list_lru.c: mo...
266
267
268
269
  	struct list_lru_node *nlru = &lru->node[nid];
  	unsigned long ret;
  
  	spin_lock(&nlru->lock);
6e018968f   Sebastian Andrzej Siewior   mm/list_lru.c: pa...
270
271
  	ret = __list_lru_walk_one(nlru, memcg_cache_id(memcg), isolate, cb_arg,
  				  nr_to_walk);
6cfe57a96   Sebastian Andrzej Siewior   mm/list_lru.c: mo...
272
273
  	spin_unlock(&nlru->lock);
  	return ret;
60d3fd32a   Vladimir Davydov   list_lru: introdu...
274
275
  }
  EXPORT_SYMBOL_GPL(list_lru_walk_one);
6b51e8819   Sebastian Andrzej Siewior   mm/list_lru: intr...
276
277
278
279
280
281
282
283
284
285
286
287
288
289
  unsigned long
  list_lru_walk_one_irq(struct list_lru *lru, int nid, struct mem_cgroup *memcg,
  		      list_lru_walk_cb isolate, void *cb_arg,
  		      unsigned long *nr_to_walk)
  {
  	struct list_lru_node *nlru = &lru->node[nid];
  	unsigned long ret;
  
  	spin_lock_irq(&nlru->lock);
  	ret = __list_lru_walk_one(nlru, memcg_cache_id(memcg), isolate, cb_arg,
  				  nr_to_walk);
  	spin_unlock_irq(&nlru->lock);
  	return ret;
  }
60d3fd32a   Vladimir Davydov   list_lru: introdu...
290
291
292
293
294
295
  unsigned long list_lru_walk_node(struct list_lru *lru, int nid,
  				 list_lru_walk_cb isolate, void *cb_arg,
  				 unsigned long *nr_to_walk)
  {
  	long isolated = 0;
  	int memcg_idx;
87a5ffc16   Sebastian Andrzej Siewior   mm/list_lru.c: us...
296
297
  	isolated += list_lru_walk_one(lru, nid, NULL, isolate, cb_arg,
  				      nr_to_walk);
60d3fd32a   Vladimir Davydov   list_lru: introdu...
298
299
  	if (*nr_to_walk > 0 && list_lru_memcg_aware(lru)) {
  		for_each_memcg_cache_index(memcg_idx) {
6cfe57a96   Sebastian Andrzej Siewior   mm/list_lru.c: mo...
300
301
302
  			struct list_lru_node *nlru = &lru->node[nid];
  
  			spin_lock(&nlru->lock);
6e018968f   Sebastian Andrzej Siewior   mm/list_lru.c: pa...
303
304
305
  			isolated += __list_lru_walk_one(nlru, memcg_idx,
  							isolate, cb_arg,
  							nr_to_walk);
6cfe57a96   Sebastian Andrzej Siewior   mm/list_lru.c: mo...
306
  			spin_unlock(&nlru->lock);
60d3fd32a   Vladimir Davydov   list_lru: introdu...
307
308
309
310
311
312
  			if (*nr_to_walk <= 0)
  				break;
  		}
  	}
  	return isolated;
  }
3b1d58a4c   Dave Chinner   list_lru: per-nod...
313
  EXPORT_SYMBOL_GPL(list_lru_walk_node);
60d3fd32a   Vladimir Davydov   list_lru: introdu...
314
315
316
317
318
  static void init_one_lru(struct list_lru_one *l)
  {
  	INIT_LIST_HEAD(&l->list);
  	l->nr_items = 0;
  }
84c07d11a   Kirill Tkhai   mm: introduce CON...
319
  #ifdef CONFIG_MEMCG_KMEM
60d3fd32a   Vladimir Davydov   list_lru: introdu...
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
  static void __memcg_destroy_list_lru_node(struct list_lru_memcg *memcg_lrus,
  					  int begin, int end)
  {
  	int i;
  
  	for (i = begin; i < end; i++)
  		kfree(memcg_lrus->lru[i]);
  }
  
  static int __memcg_init_list_lru_node(struct list_lru_memcg *memcg_lrus,
  				      int begin, int end)
  {
  	int i;
  
  	for (i = begin; i < end; i++) {
  		struct list_lru_one *l;
  
  		l = kmalloc(sizeof(struct list_lru_one), GFP_KERNEL);
  		if (!l)
  			goto fail;
  
  		init_one_lru(l);
  		memcg_lrus->lru[i] = l;
  	}
  	return 0;
  fail:
  	__memcg_destroy_list_lru_node(memcg_lrus, begin, i - 1);
  	return -ENOMEM;
  }
  
  static int memcg_init_list_lru_node(struct list_lru_node *nlru)
  {
0c7c1bed7   Kirill Tkhai   mm: make counting...
352
  	struct list_lru_memcg *memcg_lrus;
60d3fd32a   Vladimir Davydov   list_lru: introdu...
353
  	int size = memcg_nr_cache_ids;
0c7c1bed7   Kirill Tkhai   mm: make counting...
354
355
356
  	memcg_lrus = kvmalloc(sizeof(*memcg_lrus) +
  			      size * sizeof(void *), GFP_KERNEL);
  	if (!memcg_lrus)
60d3fd32a   Vladimir Davydov   list_lru: introdu...
357
  		return -ENOMEM;
0c7c1bed7   Kirill Tkhai   mm: make counting...
358
359
  	if (__memcg_init_list_lru_node(memcg_lrus, 0, size)) {
  		kvfree(memcg_lrus);
60d3fd32a   Vladimir Davydov   list_lru: introdu...
360
361
  		return -ENOMEM;
  	}
0c7c1bed7   Kirill Tkhai   mm: make counting...
362
  	RCU_INIT_POINTER(nlru->memcg_lrus, memcg_lrus);
60d3fd32a   Vladimir Davydov   list_lru: introdu...
363
364
365
366
367
368
  
  	return 0;
  }
  
  static void memcg_destroy_list_lru_node(struct list_lru_node *nlru)
  {
0c7c1bed7   Kirill Tkhai   mm: make counting...
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
  	struct list_lru_memcg *memcg_lrus;
  	/*
  	 * This is called when shrinker has already been unregistered,
  	 * and nobody can use it. So, there is no need to use kvfree_rcu().
  	 */
  	memcg_lrus = rcu_dereference_protected(nlru->memcg_lrus, true);
  	__memcg_destroy_list_lru_node(memcg_lrus, 0, memcg_nr_cache_ids);
  	kvfree(memcg_lrus);
  }
  
  static void kvfree_rcu(struct rcu_head *head)
  {
  	struct list_lru_memcg *mlru;
  
  	mlru = container_of(head, struct list_lru_memcg, rcu);
  	kvfree(mlru);
60d3fd32a   Vladimir Davydov   list_lru: introdu...
385
386
387
388
389
390
391
392
  }
  
  static int memcg_update_list_lru_node(struct list_lru_node *nlru,
  				      int old_size, int new_size)
  {
  	struct list_lru_memcg *old, *new;
  
  	BUG_ON(old_size > new_size);
0c7c1bed7   Kirill Tkhai   mm: make counting...
393
394
395
  	old = rcu_dereference_protected(nlru->memcg_lrus,
  					lockdep_is_held(&list_lrus_mutex));
  	new = kvmalloc(sizeof(*new) + new_size * sizeof(void *), GFP_KERNEL);
60d3fd32a   Vladimir Davydov   list_lru: introdu...
396
397
398
399
  	if (!new)
  		return -ENOMEM;
  
  	if (__memcg_init_list_lru_node(new, old_size, new_size)) {
f80c7dab9   Johannes Weiner   mm: memcontrol: u...
400
  		kvfree(new);
60d3fd32a   Vladimir Davydov   list_lru: introdu...
401
402
  		return -ENOMEM;
  	}
0c7c1bed7   Kirill Tkhai   mm: make counting...
403
  	memcpy(&new->lru, &old->lru, old_size * sizeof(void *));
60d3fd32a   Vladimir Davydov   list_lru: introdu...
404
405
  
  	/*
0c7c1bed7   Kirill Tkhai   mm: make counting...
406
407
  	 * The locking below allows readers that hold nlru->lock avoid taking
  	 * rcu_read_lock (see list_lru_from_memcg_idx).
60d3fd32a   Vladimir Davydov   list_lru: introdu...
408
409
410
411
412
  	 *
  	 * Since list_lru_{add,del} may be called under an IRQ-safe lock,
  	 * we have to use IRQ-safe primitives here to avoid deadlock.
  	 */
  	spin_lock_irq(&nlru->lock);
0c7c1bed7   Kirill Tkhai   mm: make counting...
413
  	rcu_assign_pointer(nlru->memcg_lrus, new);
60d3fd32a   Vladimir Davydov   list_lru: introdu...
414
  	spin_unlock_irq(&nlru->lock);
0c7c1bed7   Kirill Tkhai   mm: make counting...
415
  	call_rcu(&old->rcu, kvfree_rcu);
60d3fd32a   Vladimir Davydov   list_lru: introdu...
416
417
418
419
420
421
  	return 0;
  }
  
  static void memcg_cancel_update_list_lru_node(struct list_lru_node *nlru,
  					      int old_size, int new_size)
  {
0c7c1bed7   Kirill Tkhai   mm: make counting...
422
423
424
425
  	struct list_lru_memcg *memcg_lrus;
  
  	memcg_lrus = rcu_dereference_protected(nlru->memcg_lrus,
  					       lockdep_is_held(&list_lrus_mutex));
60d3fd32a   Vladimir Davydov   list_lru: introdu...
426
427
  	/* do not bother shrinking the array back to the old size, because we
  	 * cannot handle allocation failures here */
0c7c1bed7   Kirill Tkhai   mm: make counting...
428
  	__memcg_destroy_list_lru_node(memcg_lrus, old_size, new_size);
60d3fd32a   Vladimir Davydov   list_lru: introdu...
429
430
431
432
433
  }
  
  static int memcg_init_list_lru(struct list_lru *lru, bool memcg_aware)
  {
  	int i;
145949a13   Raghavendra K T   mm/list_lru.c: re...
434
435
436
437
438
  	if (!memcg_aware)
  		return 0;
  
  	for_each_node(i) {
  		if (memcg_init_list_lru_node(&lru->node[i]))
60d3fd32a   Vladimir Davydov   list_lru: introdu...
439
440
441
442
  			goto fail;
  	}
  	return 0;
  fail:
145949a13   Raghavendra K T   mm/list_lru.c: re...
443
444
445
  	for (i = i - 1; i >= 0; i--) {
  		if (!lru->node[i].memcg_lrus)
  			continue;
60d3fd32a   Vladimir Davydov   list_lru: introdu...
446
  		memcg_destroy_list_lru_node(&lru->node[i]);
145949a13   Raghavendra K T   mm/list_lru.c: re...
447
  	}
60d3fd32a   Vladimir Davydov   list_lru: introdu...
448
449
450
451
452
453
454
455
456
  	return -ENOMEM;
  }
  
  static void memcg_destroy_list_lru(struct list_lru *lru)
  {
  	int i;
  
  	if (!list_lru_memcg_aware(lru))
  		return;
145949a13   Raghavendra K T   mm/list_lru.c: re...
457
  	for_each_node(i)
60d3fd32a   Vladimir Davydov   list_lru: introdu...
458
459
460
461
462
463
464
465
466
467
  		memcg_destroy_list_lru_node(&lru->node[i]);
  }
  
  static int memcg_update_list_lru(struct list_lru *lru,
  				 int old_size, int new_size)
  {
  	int i;
  
  	if (!list_lru_memcg_aware(lru))
  		return 0;
145949a13   Raghavendra K T   mm/list_lru.c: re...
468
  	for_each_node(i) {
60d3fd32a   Vladimir Davydov   list_lru: introdu...
469
470
471
472
473
474
  		if (memcg_update_list_lru_node(&lru->node[i],
  					       old_size, new_size))
  			goto fail;
  	}
  	return 0;
  fail:
145949a13   Raghavendra K T   mm/list_lru.c: re...
475
476
477
  	for (i = i - 1; i >= 0; i--) {
  		if (!lru->node[i].memcg_lrus)
  			continue;
60d3fd32a   Vladimir Davydov   list_lru: introdu...
478
479
  		memcg_cancel_update_list_lru_node(&lru->node[i],
  						  old_size, new_size);
145949a13   Raghavendra K T   mm/list_lru.c: re...
480
  	}
60d3fd32a   Vladimir Davydov   list_lru: introdu...
481
482
483
484
485
486
487
488
489
490
  	return -ENOMEM;
  }
  
  static void memcg_cancel_update_list_lru(struct list_lru *lru,
  					 int old_size, int new_size)
  {
  	int i;
  
  	if (!list_lru_memcg_aware(lru))
  		return;
145949a13   Raghavendra K T   mm/list_lru.c: re...
491
  	for_each_node(i)
60d3fd32a   Vladimir Davydov   list_lru: introdu...
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
  		memcg_cancel_update_list_lru_node(&lru->node[i],
  						  old_size, new_size);
  }
  
  int memcg_update_all_list_lrus(int new_size)
  {
  	int ret = 0;
  	struct list_lru *lru;
  	int old_size = memcg_nr_cache_ids;
  
  	mutex_lock(&list_lrus_mutex);
  	list_for_each_entry(lru, &list_lrus, list) {
  		ret = memcg_update_list_lru(lru, old_size, new_size);
  		if (ret)
  			goto fail;
  	}
  out:
  	mutex_unlock(&list_lrus_mutex);
  	return ret;
  fail:
  	list_for_each_entry_continue_reverse(lru, &list_lrus, list)
  		memcg_cancel_update_list_lru(lru, old_size, new_size);
  	goto out;
  }
2788cf0c4   Vladimir Davydov   memcg: reparent l...
516

3b82c4dcc   Kirill Tkhai   mm/list_lru.c: pa...
517
  static void memcg_drain_list_lru_node(struct list_lru *lru, int nid,
9bec5c35b   Kirill Tkhai   mm/list_lru: pass...
518
  				      int src_idx, struct mem_cgroup *dst_memcg)
2788cf0c4   Vladimir Davydov   memcg: reparent l...
519
  {
3b82c4dcc   Kirill Tkhai   mm/list_lru.c: pa...
520
  	struct list_lru_node *nlru = &lru->node[nid];
9bec5c35b   Kirill Tkhai   mm/list_lru: pass...
521
  	int dst_idx = dst_memcg->kmemcg_id;
2788cf0c4   Vladimir Davydov   memcg: reparent l...
522
  	struct list_lru_one *src, *dst;
fae91d6d8   Kirill Tkhai   mm/list_lru.c: se...
523
  	bool set;
2788cf0c4   Vladimir Davydov   memcg: reparent l...
524
525
526
527
528
529
530
531
532
533
534
  
  	/*
  	 * Since list_lru_{add,del} may be called under an IRQ-safe lock,
  	 * we have to use IRQ-safe primitives here to avoid deadlock.
  	 */
  	spin_lock_irq(&nlru->lock);
  
  	src = list_lru_from_memcg_idx(nlru, src_idx);
  	dst = list_lru_from_memcg_idx(nlru, dst_idx);
  
  	list_splice_init(&src->list, &dst->list);
fae91d6d8   Kirill Tkhai   mm/list_lru.c: se...
535
  	set = (!dst->nr_items && src->nr_items);
2788cf0c4   Vladimir Davydov   memcg: reparent l...
536
  	dst->nr_items += src->nr_items;
fae91d6d8   Kirill Tkhai   mm/list_lru.c: se...
537
538
  	if (set)
  		memcg_set_shrinker_bit(dst_memcg, nid, lru_shrinker_id(lru));
2788cf0c4   Vladimir Davydov   memcg: reparent l...
539
540
541
542
543
544
  	src->nr_items = 0;
  
  	spin_unlock_irq(&nlru->lock);
  }
  
  static void memcg_drain_list_lru(struct list_lru *lru,
9bec5c35b   Kirill Tkhai   mm/list_lru: pass...
545
  				 int src_idx, struct mem_cgroup *dst_memcg)
2788cf0c4   Vladimir Davydov   memcg: reparent l...
546
547
548
549
550
  {
  	int i;
  
  	if (!list_lru_memcg_aware(lru))
  		return;
145949a13   Raghavendra K T   mm/list_lru.c: re...
551
  	for_each_node(i)
3b82c4dcc   Kirill Tkhai   mm/list_lru.c: pa...
552
  		memcg_drain_list_lru_node(lru, i, src_idx, dst_memcg);
2788cf0c4   Vladimir Davydov   memcg: reparent l...
553
  }
9bec5c35b   Kirill Tkhai   mm/list_lru: pass...
554
  void memcg_drain_all_list_lrus(int src_idx, struct mem_cgroup *dst_memcg)
2788cf0c4   Vladimir Davydov   memcg: reparent l...
555
556
557
558
559
  {
  	struct list_lru *lru;
  
  	mutex_lock(&list_lrus_mutex);
  	list_for_each_entry(lru, &list_lrus, list)
9bec5c35b   Kirill Tkhai   mm/list_lru: pass...
560
  		memcg_drain_list_lru(lru, src_idx, dst_memcg);
2788cf0c4   Vladimir Davydov   memcg: reparent l...
561
562
  	mutex_unlock(&list_lrus_mutex);
  }
60d3fd32a   Vladimir Davydov   list_lru: introdu...
563
564
565
566
567
568
569
570
571
  #else
  static int memcg_init_list_lru(struct list_lru *lru, bool memcg_aware)
  {
  	return 0;
  }
  
  static void memcg_destroy_list_lru(struct list_lru *lru)
  {
  }
84c07d11a   Kirill Tkhai   mm: introduce CON...
572
  #endif /* CONFIG_MEMCG_KMEM */
60d3fd32a   Vladimir Davydov   list_lru: introdu...
573
574
  
  int __list_lru_init(struct list_lru *lru, bool memcg_aware,
c92e8e10c   Kirill Tkhai   fs: propagate shr...
575
  		    struct lock_class_key *key, struct shrinker *shrinker)
a38e40824   Dave Chinner   list: add a new L...
576
  {
3b1d58a4c   Dave Chinner   list_lru: per-nod...
577
  	int i;
5ca302c8e   Glauber Costa   list_lru: dynamic...
578
  	size_t size = sizeof(*lru->node) * nr_node_ids;
60d3fd32a   Vladimir Davydov   list_lru: introdu...
579
  	int err = -ENOMEM;
c92e8e10c   Kirill Tkhai   fs: propagate shr...
580
581
582
583
584
585
  #ifdef CONFIG_MEMCG_KMEM
  	if (shrinker)
  		lru->shrinker_id = shrinker->id;
  	else
  		lru->shrinker_id = -1;
  #endif
60d3fd32a   Vladimir Davydov   list_lru: introdu...
586
  	memcg_get_cache_ids();
5ca302c8e   Glauber Costa   list_lru: dynamic...
587
588
589
  
  	lru->node = kzalloc(size, GFP_KERNEL);
  	if (!lru->node)
60d3fd32a   Vladimir Davydov   list_lru: introdu...
590
  		goto out;
a38e40824   Dave Chinner   list: add a new L...
591

145949a13   Raghavendra K T   mm/list_lru.c: re...
592
  	for_each_node(i) {
3b1d58a4c   Dave Chinner   list_lru: per-nod...
593
  		spin_lock_init(&lru->node[i].lock);
449dd6984   Johannes Weiner   mm: keep page cac...
594
595
  		if (key)
  			lockdep_set_class(&lru->node[i].lock, key);
60d3fd32a   Vladimir Davydov   list_lru: introdu...
596
597
598
599
600
601
  		init_one_lru(&lru->node[i].lru);
  	}
  
  	err = memcg_init_list_lru(lru, memcg_aware);
  	if (err) {
  		kfree(lru->node);
1bc11d70b   Alexander Polakov   mm/list_lru.c: av...
602
603
  		/* Do this so a list_lru_destroy() doesn't crash: */
  		lru->node = NULL;
60d3fd32a   Vladimir Davydov   list_lru: introdu...
604
  		goto out;
3b1d58a4c   Dave Chinner   list_lru: per-nod...
605
  	}
60d3fd32a   Vladimir Davydov   list_lru: introdu...
606

c0a5b5609   Vladimir Davydov   list_lru: organiz...
607
  	list_lru_register(lru);
60d3fd32a   Vladimir Davydov   list_lru: introdu...
608
609
610
  out:
  	memcg_put_cache_ids();
  	return err;
a38e40824   Dave Chinner   list: add a new L...
611
  }
60d3fd32a   Vladimir Davydov   list_lru: introdu...
612
  EXPORT_SYMBOL_GPL(__list_lru_init);
5ca302c8e   Glauber Costa   list_lru: dynamic...
613
614
615
  
  void list_lru_destroy(struct list_lru *lru)
  {
c0a5b5609   Vladimir Davydov   list_lru: organiz...
616
617
618
  	/* Already destroyed or not yet initialized? */
  	if (!lru->node)
  		return;
60d3fd32a   Vladimir Davydov   list_lru: introdu...
619
620
  
  	memcg_get_cache_ids();
c0a5b5609   Vladimir Davydov   list_lru: organiz...
621
  	list_lru_unregister(lru);
60d3fd32a   Vladimir Davydov   list_lru: introdu...
622
623
  
  	memcg_destroy_list_lru(lru);
5ca302c8e   Glauber Costa   list_lru: dynamic...
624
  	kfree(lru->node);
c0a5b5609   Vladimir Davydov   list_lru: organiz...
625
  	lru->node = NULL;
60d3fd32a   Vladimir Davydov   list_lru: introdu...
626

c92e8e10c   Kirill Tkhai   fs: propagate shr...
627
628
629
  #ifdef CONFIG_MEMCG_KMEM
  	lru->shrinker_id = -1;
  #endif
60d3fd32a   Vladimir Davydov   list_lru: introdu...
630
  	memcg_put_cache_ids();
5ca302c8e   Glauber Costa   list_lru: dynamic...
631
632
  }
  EXPORT_SYMBOL_GPL(list_lru_destroy);