Blame view

mm/list_lru.c 12.6 KB
a38e40824   Dave Chinner   list: add a new L...
1
2
3
4
5
6
7
8
  /*
   * Copyright (c) 2013 Red Hat, Inc. and Parallels Inc. All rights reserved.
   * Authors: David Chinner and Glauber Costa
   *
   * Generic LRU infrastructure
   */
  #include <linux/kernel.h>
  #include <linux/module.h>
3b1d58a4c   Dave Chinner   list_lru: per-nod...
9
  #include <linux/mm.h>
a38e40824   Dave Chinner   list: add a new L...
10
  #include <linux/list_lru.h>
5ca302c8e   Glauber Costa   list_lru: dynamic...
11
  #include <linux/slab.h>
c0a5b5609   Vladimir Davydov   list_lru: organiz...
12
  #include <linux/mutex.h>
60d3fd32a   Vladimir Davydov   list_lru: introdu...
13
  #include <linux/memcontrol.h>
c0a5b5609   Vladimir Davydov   list_lru: organiz...
14

127424c86   Johannes Weiner   mm: memcontrol: m...
15
  #if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB)
c0a5b5609   Vladimir Davydov   list_lru: organiz...
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
  static LIST_HEAD(list_lrus);
  static DEFINE_MUTEX(list_lrus_mutex);
  
  static void list_lru_register(struct list_lru *lru)
  {
  	mutex_lock(&list_lrus_mutex);
  	list_add(&lru->list, &list_lrus);
  	mutex_unlock(&list_lrus_mutex);
  }
  
  static void list_lru_unregister(struct list_lru *lru)
  {
  	mutex_lock(&list_lrus_mutex);
  	list_del(&lru->list);
  	mutex_unlock(&list_lrus_mutex);
  }
  #else
  static void list_lru_register(struct list_lru *lru)
  {
  }
  
  static void list_lru_unregister(struct list_lru *lru)
  {
  }
127424c86   Johannes Weiner   mm: memcontrol: m...
40
  #endif /* CONFIG_MEMCG && !CONFIG_SLOB */
a38e40824   Dave Chinner   list: add a new L...
41

127424c86   Johannes Weiner   mm: memcontrol: m...
42
  #if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB)
60d3fd32a   Vladimir Davydov   list_lru: introdu...
43
44
  static inline bool list_lru_memcg_aware(struct list_lru *lru)
  {
145949a13   Raghavendra K T   mm/list_lru.c: re...
45
46
47
48
  	/*
  	 * This needs node 0 to be always present, even
  	 * in the systems supporting sparse numa ids.
  	 */
60d3fd32a   Vladimir Davydov   list_lru: introdu...
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
  	return !!lru->node[0].memcg_lrus;
  }
  
  static inline struct list_lru_one *
  list_lru_from_memcg_idx(struct list_lru_node *nlru, int idx)
  {
  	/*
  	 * The lock protects the array of per cgroup lists from relocation
  	 * (see memcg_update_list_lru_node).
  	 */
  	lockdep_assert_held(&nlru->lock);
  	if (nlru->memcg_lrus && idx >= 0)
  		return nlru->memcg_lrus->lru[idx];
  
  	return &nlru->lru;
  }
df4065516   Vladimir Davydov   memcg: simplify a...
65
66
67
68
69
70
71
72
73
  static __always_inline struct mem_cgroup *mem_cgroup_from_kmem(void *ptr)
  {
  	struct page *page;
  
  	if (!memcg_kmem_enabled())
  		return NULL;
  	page = virt_to_head_page(ptr);
  	return page->mem_cgroup;
  }
60d3fd32a   Vladimir Davydov   list_lru: introdu...
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
  static inline struct list_lru_one *
  list_lru_from_kmem(struct list_lru_node *nlru, void *ptr)
  {
  	struct mem_cgroup *memcg;
  
  	if (!nlru->memcg_lrus)
  		return &nlru->lru;
  
  	memcg = mem_cgroup_from_kmem(ptr);
  	if (!memcg)
  		return &nlru->lru;
  
  	return list_lru_from_memcg_idx(nlru, memcg_cache_id(memcg));
  }
  #else
  static inline bool list_lru_memcg_aware(struct list_lru *lru)
  {
  	return false;
  }
  
  static inline struct list_lru_one *
  list_lru_from_memcg_idx(struct list_lru_node *nlru, int idx)
  {
  	return &nlru->lru;
  }
  
  static inline struct list_lru_one *
  list_lru_from_kmem(struct list_lru_node *nlru, void *ptr)
  {
  	return &nlru->lru;
  }
127424c86   Johannes Weiner   mm: memcontrol: m...
105
  #endif /* CONFIG_MEMCG && !CONFIG_SLOB */
60d3fd32a   Vladimir Davydov   list_lru: introdu...
106

a38e40824   Dave Chinner   list: add a new L...
107
108
  bool list_lru_add(struct list_lru *lru, struct list_head *item)
  {
3b1d58a4c   Dave Chinner   list_lru: per-nod...
109
110
  	int nid = page_to_nid(virt_to_page(item));
  	struct list_lru_node *nlru = &lru->node[nid];
60d3fd32a   Vladimir Davydov   list_lru: introdu...
111
  	struct list_lru_one *l;
3b1d58a4c   Dave Chinner   list_lru: per-nod...
112
113
  
  	spin_lock(&nlru->lock);
a38e40824   Dave Chinner   list: add a new L...
114
  	if (list_empty(item)) {
26f5d7609   Jeff Layton   list_lru: don't c...
115
  		l = list_lru_from_kmem(nlru, item);
60d3fd32a   Vladimir Davydov   list_lru: introdu...
116
117
  		list_add_tail(item, &l->list);
  		l->nr_items++;
2c80cd57c   Sahitya Tummala   mm/list_lru.c: fi...
118
  		nlru->nr_items++;
3b1d58a4c   Dave Chinner   list_lru: per-nod...
119
  		spin_unlock(&nlru->lock);
a38e40824   Dave Chinner   list: add a new L...
120
121
  		return true;
  	}
3b1d58a4c   Dave Chinner   list_lru: per-nod...
122
  	spin_unlock(&nlru->lock);
a38e40824   Dave Chinner   list: add a new L...
123
124
125
126
127
128
  	return false;
  }
  EXPORT_SYMBOL_GPL(list_lru_add);
  
  bool list_lru_del(struct list_lru *lru, struct list_head *item)
  {
3b1d58a4c   Dave Chinner   list_lru: per-nod...
129
130
  	int nid = page_to_nid(virt_to_page(item));
  	struct list_lru_node *nlru = &lru->node[nid];
60d3fd32a   Vladimir Davydov   list_lru: introdu...
131
  	struct list_lru_one *l;
3b1d58a4c   Dave Chinner   list_lru: per-nod...
132
133
  
  	spin_lock(&nlru->lock);
a38e40824   Dave Chinner   list: add a new L...
134
  	if (!list_empty(item)) {
26f5d7609   Jeff Layton   list_lru: don't c...
135
  		l = list_lru_from_kmem(nlru, item);
a38e40824   Dave Chinner   list: add a new L...
136
  		list_del_init(item);
60d3fd32a   Vladimir Davydov   list_lru: introdu...
137
  		l->nr_items--;
2c80cd57c   Sahitya Tummala   mm/list_lru.c: fi...
138
  		nlru->nr_items--;
3b1d58a4c   Dave Chinner   list_lru: per-nod...
139
  		spin_unlock(&nlru->lock);
a38e40824   Dave Chinner   list: add a new L...
140
141
  		return true;
  	}
3b1d58a4c   Dave Chinner   list_lru: per-nod...
142
  	spin_unlock(&nlru->lock);
a38e40824   Dave Chinner   list: add a new L...
143
144
145
  	return false;
  }
  EXPORT_SYMBOL_GPL(list_lru_del);
3f97b1632   Vladimir Davydov   list_lru: add hel...
146
147
148
149
150
151
152
153
154
155
156
157
158
159
  void list_lru_isolate(struct list_lru_one *list, struct list_head *item)
  {
  	list_del_init(item);
  	list->nr_items--;
  }
  EXPORT_SYMBOL_GPL(list_lru_isolate);
  
  void list_lru_isolate_move(struct list_lru_one *list, struct list_head *item,
  			   struct list_head *head)
  {
  	list_move(item, head);
  	list->nr_items--;
  }
  EXPORT_SYMBOL_GPL(list_lru_isolate_move);
60d3fd32a   Vladimir Davydov   list_lru: introdu...
160
161
  static unsigned long __list_lru_count_one(struct list_lru *lru,
  					  int nid, int memcg_idx)
a38e40824   Dave Chinner   list: add a new L...
162
  {
6a4f496fd   Glauber Costa   list_lru: per-nod...
163
  	struct list_lru_node *nlru = &lru->node[nid];
60d3fd32a   Vladimir Davydov   list_lru: introdu...
164
165
  	struct list_lru_one *l;
  	unsigned long count;
3b1d58a4c   Dave Chinner   list_lru: per-nod...
166

6a4f496fd   Glauber Costa   list_lru: per-nod...
167
  	spin_lock(&nlru->lock);
60d3fd32a   Vladimir Davydov   list_lru: introdu...
168
  	l = list_lru_from_memcg_idx(nlru, memcg_idx);
60d3fd32a   Vladimir Davydov   list_lru: introdu...
169
  	count = l->nr_items;
6a4f496fd   Glauber Costa   list_lru: per-nod...
170
  	spin_unlock(&nlru->lock);
3b1d58a4c   Dave Chinner   list_lru: per-nod...
171
172
173
  
  	return count;
  }
60d3fd32a   Vladimir Davydov   list_lru: introdu...
174
175
176
177
178
179
180
181
182
183
  
  unsigned long list_lru_count_one(struct list_lru *lru,
  				 int nid, struct mem_cgroup *memcg)
  {
  	return __list_lru_count_one(lru, nid, memcg_cache_id(memcg));
  }
  EXPORT_SYMBOL_GPL(list_lru_count_one);
  
  unsigned long list_lru_count_node(struct list_lru *lru, int nid)
  {
2c80cd57c   Sahitya Tummala   mm/list_lru.c: fi...
184
  	struct list_lru_node *nlru;
60d3fd32a   Vladimir Davydov   list_lru: introdu...
185

2c80cd57c   Sahitya Tummala   mm/list_lru.c: fi...
186
187
  	nlru = &lru->node[nid];
  	return nlru->nr_items;
60d3fd32a   Vladimir Davydov   list_lru: introdu...
188
  }
6a4f496fd   Glauber Costa   list_lru: per-nod...
189
  EXPORT_SYMBOL_GPL(list_lru_count_node);
3b1d58a4c   Dave Chinner   list_lru: per-nod...
190

60d3fd32a   Vladimir Davydov   list_lru: introdu...
191
192
193
194
  static unsigned long
  __list_lru_walk_one(struct list_lru *lru, int nid, int memcg_idx,
  		    list_lru_walk_cb isolate, void *cb_arg,
  		    unsigned long *nr_to_walk)
3b1d58a4c   Dave Chinner   list_lru: per-nod...
195
  {
60d3fd32a   Vladimir Davydov   list_lru: introdu...
196
197
  	struct list_lru_node *nlru = &lru->node[nid];
  	struct list_lru_one *l;
a38e40824   Dave Chinner   list: add a new L...
198
  	struct list_head *item, *n;
3b1d58a4c   Dave Chinner   list_lru: per-nod...
199
  	unsigned long isolated = 0;
a38e40824   Dave Chinner   list: add a new L...
200

3b1d58a4c   Dave Chinner   list_lru: per-nod...
201
  	spin_lock(&nlru->lock);
60d3fd32a   Vladimir Davydov   list_lru: introdu...
202
  	l = list_lru_from_memcg_idx(nlru, memcg_idx);
a38e40824   Dave Chinner   list: add a new L...
203
  restart:
60d3fd32a   Vladimir Davydov   list_lru: introdu...
204
  	list_for_each_safe(item, n, &l->list) {
a38e40824   Dave Chinner   list: add a new L...
205
  		enum lru_status ret;
5cedf721a   Dave Chinner   list_lru: fix bro...
206
207
208
209
210
  
  		/*
  		 * decrement nr_to_walk first so that we don't livelock if we
  		 * get stuck on large numbesr of LRU_RETRY items
  		 */
c56b097af   Russell King   mm: list_lru: fix...
211
  		if (!*nr_to_walk)
5cedf721a   Dave Chinner   list_lru: fix bro...
212
  			break;
c56b097af   Russell King   mm: list_lru: fix...
213
  		--*nr_to_walk;
5cedf721a   Dave Chinner   list_lru: fix bro...
214

3f97b1632   Vladimir Davydov   list_lru: add hel...
215
  		ret = isolate(item, l, &nlru->lock, cb_arg);
a38e40824   Dave Chinner   list: add a new L...
216
  		switch (ret) {
449dd6984   Johannes Weiner   mm: keep page cac...
217
218
  		case LRU_REMOVED_RETRY:
  			assert_spin_locked(&nlru->lock);
a38e40824   Dave Chinner   list: add a new L...
219
  		case LRU_REMOVED:
3b1d58a4c   Dave Chinner   list_lru: per-nod...
220
  			isolated++;
2c80cd57c   Sahitya Tummala   mm/list_lru.c: fi...
221
  			nlru->nr_items--;
449dd6984   Johannes Weiner   mm: keep page cac...
222
223
224
225
226
227
228
  			/*
  			 * If the lru lock has been dropped, our list
  			 * traversal is now invalid and so we have to
  			 * restart from scratch.
  			 */
  			if (ret == LRU_REMOVED_RETRY)
  				goto restart;
a38e40824   Dave Chinner   list: add a new L...
229
230
  			break;
  		case LRU_ROTATE:
60d3fd32a   Vladimir Davydov   list_lru: introdu...
231
  			list_move_tail(item, &l->list);
a38e40824   Dave Chinner   list: add a new L...
232
233
234
235
  			break;
  		case LRU_SKIP:
  			break;
  		case LRU_RETRY:
5cedf721a   Dave Chinner   list_lru: fix bro...
236
237
238
239
  			/*
  			 * The lru lock has been dropped, our list traversal is
  			 * now invalid and so we have to restart from scratch.
  			 */
449dd6984   Johannes Weiner   mm: keep page cac...
240
  			assert_spin_locked(&nlru->lock);
a38e40824   Dave Chinner   list: add a new L...
241
242
243
244
  			goto restart;
  		default:
  			BUG();
  		}
a38e40824   Dave Chinner   list: add a new L...
245
  	}
3b1d58a4c   Dave Chinner   list_lru: per-nod...
246
247
248
249
  
  	spin_unlock(&nlru->lock);
  	return isolated;
  }
60d3fd32a   Vladimir Davydov   list_lru: introdu...
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
  
  unsigned long
  list_lru_walk_one(struct list_lru *lru, int nid, struct mem_cgroup *memcg,
  		  list_lru_walk_cb isolate, void *cb_arg,
  		  unsigned long *nr_to_walk)
  {
  	return __list_lru_walk_one(lru, nid, memcg_cache_id(memcg),
  				   isolate, cb_arg, nr_to_walk);
  }
  EXPORT_SYMBOL_GPL(list_lru_walk_one);
  
  unsigned long list_lru_walk_node(struct list_lru *lru, int nid,
  				 list_lru_walk_cb isolate, void *cb_arg,
  				 unsigned long *nr_to_walk)
  {
  	long isolated = 0;
  	int memcg_idx;
  
  	isolated += __list_lru_walk_one(lru, nid, -1, isolate, cb_arg,
  					nr_to_walk);
  	if (*nr_to_walk > 0 && list_lru_memcg_aware(lru)) {
  		for_each_memcg_cache_index(memcg_idx) {
  			isolated += __list_lru_walk_one(lru, nid, memcg_idx,
  						isolate, cb_arg, nr_to_walk);
  			if (*nr_to_walk <= 0)
  				break;
  		}
  	}
  	return isolated;
  }
3b1d58a4c   Dave Chinner   list_lru: per-nod...
280
  EXPORT_SYMBOL_GPL(list_lru_walk_node);
60d3fd32a   Vladimir Davydov   list_lru: introdu...
281
282
283
284
285
  static void init_one_lru(struct list_lru_one *l)
  {
  	INIT_LIST_HEAD(&l->list);
  	l->nr_items = 0;
  }
127424c86   Johannes Weiner   mm: memcontrol: m...
286
  #if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB)
60d3fd32a   Vladimir Davydov   list_lru: introdu...
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
  static void __memcg_destroy_list_lru_node(struct list_lru_memcg *memcg_lrus,
  					  int begin, int end)
  {
  	int i;
  
  	for (i = begin; i < end; i++)
  		kfree(memcg_lrus->lru[i]);
  }
  
  static int __memcg_init_list_lru_node(struct list_lru_memcg *memcg_lrus,
  				      int begin, int end)
  {
  	int i;
  
  	for (i = begin; i < end; i++) {
  		struct list_lru_one *l;
  
  		l = kmalloc(sizeof(struct list_lru_one), GFP_KERNEL);
  		if (!l)
  			goto fail;
  
  		init_one_lru(l);
  		memcg_lrus->lru[i] = l;
  	}
  	return 0;
  fail:
  	__memcg_destroy_list_lru_node(memcg_lrus, begin, i - 1);
  	return -ENOMEM;
  }
  
  static int memcg_init_list_lru_node(struct list_lru_node *nlru)
  {
  	int size = memcg_nr_cache_ids;
f80c7dab9   Johannes Weiner   mm: memcontrol: u...
320
  	nlru->memcg_lrus = kvmalloc(size * sizeof(void *), GFP_KERNEL);
60d3fd32a   Vladimir Davydov   list_lru: introdu...
321
322
323
324
  	if (!nlru->memcg_lrus)
  		return -ENOMEM;
  
  	if (__memcg_init_list_lru_node(nlru->memcg_lrus, 0, size)) {
f80c7dab9   Johannes Weiner   mm: memcontrol: u...
325
  		kvfree(nlru->memcg_lrus);
60d3fd32a   Vladimir Davydov   list_lru: introdu...
326
327
328
329
330
331
332
333
334
  		return -ENOMEM;
  	}
  
  	return 0;
  }
  
  static void memcg_destroy_list_lru_node(struct list_lru_node *nlru)
  {
  	__memcg_destroy_list_lru_node(nlru->memcg_lrus, 0, memcg_nr_cache_ids);
f80c7dab9   Johannes Weiner   mm: memcontrol: u...
335
  	kvfree(nlru->memcg_lrus);
60d3fd32a   Vladimir Davydov   list_lru: introdu...
336
337
338
339
340
341
342
343
344
345
  }
  
  static int memcg_update_list_lru_node(struct list_lru_node *nlru,
  				      int old_size, int new_size)
  {
  	struct list_lru_memcg *old, *new;
  
  	BUG_ON(old_size > new_size);
  
  	old = nlru->memcg_lrus;
f80c7dab9   Johannes Weiner   mm: memcontrol: u...
346
  	new = kvmalloc(new_size * sizeof(void *), GFP_KERNEL);
60d3fd32a   Vladimir Davydov   list_lru: introdu...
347
348
349
350
  	if (!new)
  		return -ENOMEM;
  
  	if (__memcg_init_list_lru_node(new, old_size, new_size)) {
f80c7dab9   Johannes Weiner   mm: memcontrol: u...
351
  		kvfree(new);
60d3fd32a   Vladimir Davydov   list_lru: introdu...
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
  		return -ENOMEM;
  	}
  
  	memcpy(new, old, old_size * sizeof(void *));
  
  	/*
  	 * The lock guarantees that we won't race with a reader
  	 * (see list_lru_from_memcg_idx).
  	 *
  	 * Since list_lru_{add,del} may be called under an IRQ-safe lock,
  	 * we have to use IRQ-safe primitives here to avoid deadlock.
  	 */
  	spin_lock_irq(&nlru->lock);
  	nlru->memcg_lrus = new;
  	spin_unlock_irq(&nlru->lock);
f80c7dab9   Johannes Weiner   mm: memcontrol: u...
367
  	kvfree(old);
60d3fd32a   Vladimir Davydov   list_lru: introdu...
368
369
370
371
372
373
374
375
376
377
378
379
380
381
  	return 0;
  }
  
  static void memcg_cancel_update_list_lru_node(struct list_lru_node *nlru,
  					      int old_size, int new_size)
  {
  	/* do not bother shrinking the array back to the old size, because we
  	 * cannot handle allocation failures here */
  	__memcg_destroy_list_lru_node(nlru->memcg_lrus, old_size, new_size);
  }
  
  static int memcg_init_list_lru(struct list_lru *lru, bool memcg_aware)
  {
  	int i;
145949a13   Raghavendra K T   mm/list_lru.c: re...
382
383
384
385
386
  	if (!memcg_aware)
  		return 0;
  
  	for_each_node(i) {
  		if (memcg_init_list_lru_node(&lru->node[i]))
60d3fd32a   Vladimir Davydov   list_lru: introdu...
387
388
389
390
  			goto fail;
  	}
  	return 0;
  fail:
145949a13   Raghavendra K T   mm/list_lru.c: re...
391
392
393
  	for (i = i - 1; i >= 0; i--) {
  		if (!lru->node[i].memcg_lrus)
  			continue;
60d3fd32a   Vladimir Davydov   list_lru: introdu...
394
  		memcg_destroy_list_lru_node(&lru->node[i]);
145949a13   Raghavendra K T   mm/list_lru.c: re...
395
  	}
60d3fd32a   Vladimir Davydov   list_lru: introdu...
396
397
398
399
400
401
402
403
404
  	return -ENOMEM;
  }
  
  static void memcg_destroy_list_lru(struct list_lru *lru)
  {
  	int i;
  
  	if (!list_lru_memcg_aware(lru))
  		return;
145949a13   Raghavendra K T   mm/list_lru.c: re...
405
  	for_each_node(i)
60d3fd32a   Vladimir Davydov   list_lru: introdu...
406
407
408
409
410
411
412
413
414
415
  		memcg_destroy_list_lru_node(&lru->node[i]);
  }
  
  static int memcg_update_list_lru(struct list_lru *lru,
  				 int old_size, int new_size)
  {
  	int i;
  
  	if (!list_lru_memcg_aware(lru))
  		return 0;
145949a13   Raghavendra K T   mm/list_lru.c: re...
416
  	for_each_node(i) {
60d3fd32a   Vladimir Davydov   list_lru: introdu...
417
418
419
420
421
422
  		if (memcg_update_list_lru_node(&lru->node[i],
  					       old_size, new_size))
  			goto fail;
  	}
  	return 0;
  fail:
145949a13   Raghavendra K T   mm/list_lru.c: re...
423
424
425
  	for (i = i - 1; i >= 0; i--) {
  		if (!lru->node[i].memcg_lrus)
  			continue;
60d3fd32a   Vladimir Davydov   list_lru: introdu...
426
427
  		memcg_cancel_update_list_lru_node(&lru->node[i],
  						  old_size, new_size);
145949a13   Raghavendra K T   mm/list_lru.c: re...
428
  	}
60d3fd32a   Vladimir Davydov   list_lru: introdu...
429
430
431
432
433
434
435
436
437
438
  	return -ENOMEM;
  }
  
  static void memcg_cancel_update_list_lru(struct list_lru *lru,
  					 int old_size, int new_size)
  {
  	int i;
  
  	if (!list_lru_memcg_aware(lru))
  		return;
145949a13   Raghavendra K T   mm/list_lru.c: re...
439
  	for_each_node(i)
60d3fd32a   Vladimir Davydov   list_lru: introdu...
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
  		memcg_cancel_update_list_lru_node(&lru->node[i],
  						  old_size, new_size);
  }
  
  int memcg_update_all_list_lrus(int new_size)
  {
  	int ret = 0;
  	struct list_lru *lru;
  	int old_size = memcg_nr_cache_ids;
  
  	mutex_lock(&list_lrus_mutex);
  	list_for_each_entry(lru, &list_lrus, list) {
  		ret = memcg_update_list_lru(lru, old_size, new_size);
  		if (ret)
  			goto fail;
  	}
  out:
  	mutex_unlock(&list_lrus_mutex);
  	return ret;
  fail:
  	list_for_each_entry_continue_reverse(lru, &list_lrus, list)
  		memcg_cancel_update_list_lru(lru, old_size, new_size);
  	goto out;
  }
2788cf0c4   Vladimir Davydov   memcg: reparent l...
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
  
  static void memcg_drain_list_lru_node(struct list_lru_node *nlru,
  				      int src_idx, int dst_idx)
  {
  	struct list_lru_one *src, *dst;
  
  	/*
  	 * Since list_lru_{add,del} may be called under an IRQ-safe lock,
  	 * we have to use IRQ-safe primitives here to avoid deadlock.
  	 */
  	spin_lock_irq(&nlru->lock);
  
  	src = list_lru_from_memcg_idx(nlru, src_idx);
  	dst = list_lru_from_memcg_idx(nlru, dst_idx);
  
  	list_splice_init(&src->list, &dst->list);
  	dst->nr_items += src->nr_items;
  	src->nr_items = 0;
  
  	spin_unlock_irq(&nlru->lock);
  }
  
  static void memcg_drain_list_lru(struct list_lru *lru,
  				 int src_idx, int dst_idx)
  {
  	int i;
  
  	if (!list_lru_memcg_aware(lru))
  		return;
145949a13   Raghavendra K T   mm/list_lru.c: re...
493
  	for_each_node(i)
2788cf0c4   Vladimir Davydov   memcg: reparent l...
494
495
496
497
498
499
500
501
502
503
504
505
  		memcg_drain_list_lru_node(&lru->node[i], src_idx, dst_idx);
  }
  
  void memcg_drain_all_list_lrus(int src_idx, int dst_idx)
  {
  	struct list_lru *lru;
  
  	mutex_lock(&list_lrus_mutex);
  	list_for_each_entry(lru, &list_lrus, list)
  		memcg_drain_list_lru(lru, src_idx, dst_idx);
  	mutex_unlock(&list_lrus_mutex);
  }
60d3fd32a   Vladimir Davydov   list_lru: introdu...
506
507
508
509
510
511
512
513
514
  #else
  static int memcg_init_list_lru(struct list_lru *lru, bool memcg_aware)
  {
  	return 0;
  }
  
  static void memcg_destroy_list_lru(struct list_lru *lru)
  {
  }
127424c86   Johannes Weiner   mm: memcontrol: m...
515
  #endif /* CONFIG_MEMCG && !CONFIG_SLOB */
60d3fd32a   Vladimir Davydov   list_lru: introdu...
516
517
518
  
  int __list_lru_init(struct list_lru *lru, bool memcg_aware,
  		    struct lock_class_key *key)
a38e40824   Dave Chinner   list: add a new L...
519
  {
3b1d58a4c   Dave Chinner   list_lru: per-nod...
520
  	int i;
5ca302c8e   Glauber Costa   list_lru: dynamic...
521
  	size_t size = sizeof(*lru->node) * nr_node_ids;
60d3fd32a   Vladimir Davydov   list_lru: introdu...
522
523
524
  	int err = -ENOMEM;
  
  	memcg_get_cache_ids();
5ca302c8e   Glauber Costa   list_lru: dynamic...
525
526
527
  
  	lru->node = kzalloc(size, GFP_KERNEL);
  	if (!lru->node)
60d3fd32a   Vladimir Davydov   list_lru: introdu...
528
  		goto out;
a38e40824   Dave Chinner   list: add a new L...
529

145949a13   Raghavendra K T   mm/list_lru.c: re...
530
  	for_each_node(i) {
3b1d58a4c   Dave Chinner   list_lru: per-nod...
531
  		spin_lock_init(&lru->node[i].lock);
449dd6984   Johannes Weiner   mm: keep page cac...
532
533
  		if (key)
  			lockdep_set_class(&lru->node[i].lock, key);
60d3fd32a   Vladimir Davydov   list_lru: introdu...
534
535
536
537
538
539
  		init_one_lru(&lru->node[i].lru);
  	}
  
  	err = memcg_init_list_lru(lru, memcg_aware);
  	if (err) {
  		kfree(lru->node);
1bc11d70b   Alexander Polakov   mm/list_lru.c: av...
540
541
  		/* Do this so a list_lru_destroy() doesn't crash: */
  		lru->node = NULL;
60d3fd32a   Vladimir Davydov   list_lru: introdu...
542
  		goto out;
3b1d58a4c   Dave Chinner   list_lru: per-nod...
543
  	}
60d3fd32a   Vladimir Davydov   list_lru: introdu...
544

c0a5b5609   Vladimir Davydov   list_lru: organiz...
545
  	list_lru_register(lru);
60d3fd32a   Vladimir Davydov   list_lru: introdu...
546
547
548
  out:
  	memcg_put_cache_ids();
  	return err;
a38e40824   Dave Chinner   list: add a new L...
549
  }
60d3fd32a   Vladimir Davydov   list_lru: introdu...
550
  EXPORT_SYMBOL_GPL(__list_lru_init);
5ca302c8e   Glauber Costa   list_lru: dynamic...
551
552
553
  
  void list_lru_destroy(struct list_lru *lru)
  {
c0a5b5609   Vladimir Davydov   list_lru: organiz...
554
555
556
  	/* Already destroyed or not yet initialized? */
  	if (!lru->node)
  		return;
60d3fd32a   Vladimir Davydov   list_lru: introdu...
557
558
  
  	memcg_get_cache_ids();
c0a5b5609   Vladimir Davydov   list_lru: organiz...
559
  	list_lru_unregister(lru);
60d3fd32a   Vladimir Davydov   list_lru: introdu...
560
561
  
  	memcg_destroy_list_lru(lru);
5ca302c8e   Glauber Costa   list_lru: dynamic...
562
  	kfree(lru->node);
c0a5b5609   Vladimir Davydov   list_lru: organiz...
563
  	lru->node = NULL;
60d3fd32a   Vladimir Davydov   list_lru: introdu...
564
565
  
  	memcg_put_cache_ids();
5ca302c8e   Glauber Costa   list_lru: dynamic...
566
567
  }
  EXPORT_SYMBOL_GPL(list_lru_destroy);