Blame view

mm/list_lru.c 12.6 KB
a38e40824   Dave Chinner   list: add a new L...
1
2
3
4
5
6
7
8
  /*
   * Copyright (c) 2013 Red Hat, Inc. and Parallels Inc. All rights reserved.
   * Authors: David Chinner and Glauber Costa
   *
   * Generic LRU infrastructure
   */
  #include <linux/kernel.h>
  #include <linux/module.h>
3b1d58a4c   Dave Chinner   list_lru: per-nod...
9
  #include <linux/mm.h>
a38e40824   Dave Chinner   list: add a new L...
10
  #include <linux/list_lru.h>
5ca302c8e   Glauber Costa   list_lru: dynamic...
11
  #include <linux/slab.h>
c0a5b5609   Vladimir Davydov   list_lru: organiz...
12
  #include <linux/mutex.h>
60d3fd32a   Vladimir Davydov   list_lru: introdu...
13
  #include <linux/memcontrol.h>
c0a5b5609   Vladimir Davydov   list_lru: organiz...
14

127424c86   Johannes Weiner   mm: memcontrol: m...
15
  #if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB)
c0a5b5609   Vladimir Davydov   list_lru: organiz...
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
  static LIST_HEAD(list_lrus);
  static DEFINE_MUTEX(list_lrus_mutex);
  
  static void list_lru_register(struct list_lru *lru)
  {
  	mutex_lock(&list_lrus_mutex);
  	list_add(&lru->list, &list_lrus);
  	mutex_unlock(&list_lrus_mutex);
  }
  
  static void list_lru_unregister(struct list_lru *lru)
  {
  	mutex_lock(&list_lrus_mutex);
  	list_del(&lru->list);
  	mutex_unlock(&list_lrus_mutex);
  }
  #else
  static void list_lru_register(struct list_lru *lru)
  {
  }
  
  static void list_lru_unregister(struct list_lru *lru)
  {
  }
127424c86   Johannes Weiner   mm: memcontrol: m...
40
  #endif /* CONFIG_MEMCG && !CONFIG_SLOB */
a38e40824   Dave Chinner   list: add a new L...
41

127424c86   Johannes Weiner   mm: memcontrol: m...
42
  #if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB)
60d3fd32a   Vladimir Davydov   list_lru: introdu...
43
44
  static inline bool list_lru_memcg_aware(struct list_lru *lru)
  {
145949a13   Raghavendra K T   mm/list_lru.c: re...
45
46
47
48
  	/*
  	 * This needs node 0 to be always present, even
  	 * in the systems supporting sparse numa ids.
  	 */
60d3fd32a   Vladimir Davydov   list_lru: introdu...
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
  	return !!lru->node[0].memcg_lrus;
  }
  
  static inline struct list_lru_one *
  list_lru_from_memcg_idx(struct list_lru_node *nlru, int idx)
  {
  	/*
  	 * The lock protects the array of per cgroup lists from relocation
  	 * (see memcg_update_list_lru_node).
  	 */
  	lockdep_assert_held(&nlru->lock);
  	if (nlru->memcg_lrus && idx >= 0)
  		return nlru->memcg_lrus->lru[idx];
  
  	return &nlru->lru;
  }
df4065516   Vladimir Davydov   memcg: simplify a...
65
66
67
68
69
70
71
72
73
  static __always_inline struct mem_cgroup *mem_cgroup_from_kmem(void *ptr)
  {
  	struct page *page;
  
  	if (!memcg_kmem_enabled())
  		return NULL;
  	page = virt_to_head_page(ptr);
  	return page->mem_cgroup;
  }
60d3fd32a   Vladimir Davydov   list_lru: introdu...
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
  static inline struct list_lru_one *
  list_lru_from_kmem(struct list_lru_node *nlru, void *ptr)
  {
  	struct mem_cgroup *memcg;
  
  	if (!nlru->memcg_lrus)
  		return &nlru->lru;
  
  	memcg = mem_cgroup_from_kmem(ptr);
  	if (!memcg)
  		return &nlru->lru;
  
  	return list_lru_from_memcg_idx(nlru, memcg_cache_id(memcg));
  }
  #else
  static inline bool list_lru_memcg_aware(struct list_lru *lru)
  {
  	return false;
  }
  
  static inline struct list_lru_one *
  list_lru_from_memcg_idx(struct list_lru_node *nlru, int idx)
  {
  	return &nlru->lru;
  }
  
  static inline struct list_lru_one *
  list_lru_from_kmem(struct list_lru_node *nlru, void *ptr)
  {
  	return &nlru->lru;
  }
127424c86   Johannes Weiner   mm: memcontrol: m...
105
  #endif /* CONFIG_MEMCG && !CONFIG_SLOB */
60d3fd32a   Vladimir Davydov   list_lru: introdu...
106

a38e40824   Dave Chinner   list: add a new L...
107
108
  bool list_lru_add(struct list_lru *lru, struct list_head *item)
  {
3b1d58a4c   Dave Chinner   list_lru: per-nod...
109
110
  	int nid = page_to_nid(virt_to_page(item));
  	struct list_lru_node *nlru = &lru->node[nid];
60d3fd32a   Vladimir Davydov   list_lru: introdu...
111
  	struct list_lru_one *l;
3b1d58a4c   Dave Chinner   list_lru: per-nod...
112
113
  
  	spin_lock(&nlru->lock);
a38e40824   Dave Chinner   list: add a new L...
114
  	if (list_empty(item)) {
26f5d7609   Jeff Layton   list_lru: don't c...
115
  		l = list_lru_from_kmem(nlru, item);
60d3fd32a   Vladimir Davydov   list_lru: introdu...
116
117
  		list_add_tail(item, &l->list);
  		l->nr_items++;
3b1d58a4c   Dave Chinner   list_lru: per-nod...
118
  		spin_unlock(&nlru->lock);
a38e40824   Dave Chinner   list: add a new L...
119
120
  		return true;
  	}
3b1d58a4c   Dave Chinner   list_lru: per-nod...
121
  	spin_unlock(&nlru->lock);
a38e40824   Dave Chinner   list: add a new L...
122
123
124
125
126
127
  	return false;
  }
  EXPORT_SYMBOL_GPL(list_lru_add);
  
  bool list_lru_del(struct list_lru *lru, struct list_head *item)
  {
3b1d58a4c   Dave Chinner   list_lru: per-nod...
128
129
  	int nid = page_to_nid(virt_to_page(item));
  	struct list_lru_node *nlru = &lru->node[nid];
60d3fd32a   Vladimir Davydov   list_lru: introdu...
130
  	struct list_lru_one *l;
3b1d58a4c   Dave Chinner   list_lru: per-nod...
131
132
  
  	spin_lock(&nlru->lock);
a38e40824   Dave Chinner   list: add a new L...
133
  	if (!list_empty(item)) {
26f5d7609   Jeff Layton   list_lru: don't c...
134
  		l = list_lru_from_kmem(nlru, item);
a38e40824   Dave Chinner   list: add a new L...
135
  		list_del_init(item);
60d3fd32a   Vladimir Davydov   list_lru: introdu...
136
  		l->nr_items--;
3b1d58a4c   Dave Chinner   list_lru: per-nod...
137
  		spin_unlock(&nlru->lock);
a38e40824   Dave Chinner   list: add a new L...
138
139
  		return true;
  	}
3b1d58a4c   Dave Chinner   list_lru: per-nod...
140
  	spin_unlock(&nlru->lock);
a38e40824   Dave Chinner   list: add a new L...
141
142
143
  	return false;
  }
  EXPORT_SYMBOL_GPL(list_lru_del);
3f97b1632   Vladimir Davydov   list_lru: add hel...
144
145
146
147
148
149
150
151
152
153
154
155
156
157
  void list_lru_isolate(struct list_lru_one *list, struct list_head *item)
  {
  	list_del_init(item);
  	list->nr_items--;
  }
  EXPORT_SYMBOL_GPL(list_lru_isolate);
  
  void list_lru_isolate_move(struct list_lru_one *list, struct list_head *item,
  			   struct list_head *head)
  {
  	list_move(item, head);
  	list->nr_items--;
  }
  EXPORT_SYMBOL_GPL(list_lru_isolate_move);
60d3fd32a   Vladimir Davydov   list_lru: introdu...
158
159
  static unsigned long __list_lru_count_one(struct list_lru *lru,
  					  int nid, int memcg_idx)
a38e40824   Dave Chinner   list: add a new L...
160
  {
6a4f496fd   Glauber Costa   list_lru: per-nod...
161
  	struct list_lru_node *nlru = &lru->node[nid];
60d3fd32a   Vladimir Davydov   list_lru: introdu...
162
163
  	struct list_lru_one *l;
  	unsigned long count;
3b1d58a4c   Dave Chinner   list_lru: per-nod...
164

6a4f496fd   Glauber Costa   list_lru: per-nod...
165
  	spin_lock(&nlru->lock);
60d3fd32a   Vladimir Davydov   list_lru: introdu...
166
  	l = list_lru_from_memcg_idx(nlru, memcg_idx);
60d3fd32a   Vladimir Davydov   list_lru: introdu...
167
  	count = l->nr_items;
6a4f496fd   Glauber Costa   list_lru: per-nod...
168
  	spin_unlock(&nlru->lock);
3b1d58a4c   Dave Chinner   list_lru: per-nod...
169
170
171
  
  	return count;
  }
60d3fd32a   Vladimir Davydov   list_lru: introdu...
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
  
  unsigned long list_lru_count_one(struct list_lru *lru,
  				 int nid, struct mem_cgroup *memcg)
  {
  	return __list_lru_count_one(lru, nid, memcg_cache_id(memcg));
  }
  EXPORT_SYMBOL_GPL(list_lru_count_one);
  
  unsigned long list_lru_count_node(struct list_lru *lru, int nid)
  {
  	long count = 0;
  	int memcg_idx;
  
  	count += __list_lru_count_one(lru, nid, -1);
  	if (list_lru_memcg_aware(lru)) {
  		for_each_memcg_cache_index(memcg_idx)
  			count += __list_lru_count_one(lru, nid, memcg_idx);
  	}
  	return count;
  }
6a4f496fd   Glauber Costa   list_lru: per-nod...
192
  EXPORT_SYMBOL_GPL(list_lru_count_node);
3b1d58a4c   Dave Chinner   list_lru: per-nod...
193

60d3fd32a   Vladimir Davydov   list_lru: introdu...
194
195
196
197
  static unsigned long
  __list_lru_walk_one(struct list_lru *lru, int nid, int memcg_idx,
  		    list_lru_walk_cb isolate, void *cb_arg,
  		    unsigned long *nr_to_walk)
3b1d58a4c   Dave Chinner   list_lru: per-nod...
198
  {
60d3fd32a   Vladimir Davydov   list_lru: introdu...
199
200
  	struct list_lru_node *nlru = &lru->node[nid];
  	struct list_lru_one *l;
a38e40824   Dave Chinner   list: add a new L...
201
  	struct list_head *item, *n;
3b1d58a4c   Dave Chinner   list_lru: per-nod...
202
  	unsigned long isolated = 0;
a38e40824   Dave Chinner   list: add a new L...
203

3b1d58a4c   Dave Chinner   list_lru: per-nod...
204
  	spin_lock(&nlru->lock);
60d3fd32a   Vladimir Davydov   list_lru: introdu...
205
  	l = list_lru_from_memcg_idx(nlru, memcg_idx);
a38e40824   Dave Chinner   list: add a new L...
206
  restart:
60d3fd32a   Vladimir Davydov   list_lru: introdu...
207
  	list_for_each_safe(item, n, &l->list) {
a38e40824   Dave Chinner   list: add a new L...
208
  		enum lru_status ret;
5cedf721a   Dave Chinner   list_lru: fix bro...
209
210
211
212
213
  
  		/*
  		 * decrement nr_to_walk first so that we don't livelock if we
  		 * get stuck on large numbesr of LRU_RETRY items
  		 */
c56b097af   Russell King   mm: list_lru: fix...
214
  		if (!*nr_to_walk)
5cedf721a   Dave Chinner   list_lru: fix bro...
215
  			break;
c56b097af   Russell King   mm: list_lru: fix...
216
  		--*nr_to_walk;
5cedf721a   Dave Chinner   list_lru: fix bro...
217

3f97b1632   Vladimir Davydov   list_lru: add hel...
218
  		ret = isolate(item, l, &nlru->lock, cb_arg);
a38e40824   Dave Chinner   list: add a new L...
219
  		switch (ret) {
449dd6984   Johannes Weiner   mm: keep page cac...
220
221
  		case LRU_REMOVED_RETRY:
  			assert_spin_locked(&nlru->lock);
a38e40824   Dave Chinner   list: add a new L...
222
  		case LRU_REMOVED:
3b1d58a4c   Dave Chinner   list_lru: per-nod...
223
  			isolated++;
449dd6984   Johannes Weiner   mm: keep page cac...
224
225
226
227
228
229
230
  			/*
  			 * If the lru lock has been dropped, our list
  			 * traversal is now invalid and so we have to
  			 * restart from scratch.
  			 */
  			if (ret == LRU_REMOVED_RETRY)
  				goto restart;
a38e40824   Dave Chinner   list: add a new L...
231
232
  			break;
  		case LRU_ROTATE:
60d3fd32a   Vladimir Davydov   list_lru: introdu...
233
  			list_move_tail(item, &l->list);
a38e40824   Dave Chinner   list: add a new L...
234
235
236
237
  			break;
  		case LRU_SKIP:
  			break;
  		case LRU_RETRY:
5cedf721a   Dave Chinner   list_lru: fix bro...
238
239
240
241
  			/*
  			 * The lru lock has been dropped, our list traversal is
  			 * now invalid and so we have to restart from scratch.
  			 */
449dd6984   Johannes Weiner   mm: keep page cac...
242
  			assert_spin_locked(&nlru->lock);
a38e40824   Dave Chinner   list: add a new L...
243
244
245
246
  			goto restart;
  		default:
  			BUG();
  		}
a38e40824   Dave Chinner   list: add a new L...
247
  	}
3b1d58a4c   Dave Chinner   list_lru: per-nod...
248
249
250
251
  
  	spin_unlock(&nlru->lock);
  	return isolated;
  }
60d3fd32a   Vladimir Davydov   list_lru: introdu...
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
  
  unsigned long
  list_lru_walk_one(struct list_lru *lru, int nid, struct mem_cgroup *memcg,
  		  list_lru_walk_cb isolate, void *cb_arg,
  		  unsigned long *nr_to_walk)
  {
  	return __list_lru_walk_one(lru, nid, memcg_cache_id(memcg),
  				   isolate, cb_arg, nr_to_walk);
  }
  EXPORT_SYMBOL_GPL(list_lru_walk_one);
  
  unsigned long list_lru_walk_node(struct list_lru *lru, int nid,
  				 list_lru_walk_cb isolate, void *cb_arg,
  				 unsigned long *nr_to_walk)
  {
  	long isolated = 0;
  	int memcg_idx;
  
  	isolated += __list_lru_walk_one(lru, nid, -1, isolate, cb_arg,
  					nr_to_walk);
  	if (*nr_to_walk > 0 && list_lru_memcg_aware(lru)) {
  		for_each_memcg_cache_index(memcg_idx) {
  			isolated += __list_lru_walk_one(lru, nid, memcg_idx,
  						isolate, cb_arg, nr_to_walk);
  			if (*nr_to_walk <= 0)
  				break;
  		}
  	}
  	return isolated;
  }
3b1d58a4c   Dave Chinner   list_lru: per-nod...
282
  EXPORT_SYMBOL_GPL(list_lru_walk_node);
60d3fd32a   Vladimir Davydov   list_lru: introdu...
283
284
285
286
287
  static void init_one_lru(struct list_lru_one *l)
  {
  	INIT_LIST_HEAD(&l->list);
  	l->nr_items = 0;
  }
127424c86   Johannes Weiner   mm: memcontrol: m...
288
  #if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB)
60d3fd32a   Vladimir Davydov   list_lru: introdu...
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
  static void __memcg_destroy_list_lru_node(struct list_lru_memcg *memcg_lrus,
  					  int begin, int end)
  {
  	int i;
  
  	for (i = begin; i < end; i++)
  		kfree(memcg_lrus->lru[i]);
  }
  
  static int __memcg_init_list_lru_node(struct list_lru_memcg *memcg_lrus,
  				      int begin, int end)
  {
  	int i;
  
  	for (i = begin; i < end; i++) {
  		struct list_lru_one *l;
  
  		l = kmalloc(sizeof(struct list_lru_one), GFP_KERNEL);
  		if (!l)
  			goto fail;
  
  		init_one_lru(l);
  		memcg_lrus->lru[i] = l;
  	}
  	return 0;
  fail:
  	__memcg_destroy_list_lru_node(memcg_lrus, begin, i - 1);
  	return -ENOMEM;
  }
  
  static int memcg_init_list_lru_node(struct list_lru_node *nlru)
  {
  	int size = memcg_nr_cache_ids;
  
  	nlru->memcg_lrus = kmalloc(size * sizeof(void *), GFP_KERNEL);
  	if (!nlru->memcg_lrus)
  		return -ENOMEM;
  
  	if (__memcg_init_list_lru_node(nlru->memcg_lrus, 0, size)) {
  		kfree(nlru->memcg_lrus);
  		return -ENOMEM;
  	}
  
  	return 0;
  }
  
  static void memcg_destroy_list_lru_node(struct list_lru_node *nlru)
  {
  	__memcg_destroy_list_lru_node(nlru->memcg_lrus, 0, memcg_nr_cache_ids);
  	kfree(nlru->memcg_lrus);
  }
  
  static int memcg_update_list_lru_node(struct list_lru_node *nlru,
  				      int old_size, int new_size)
  {
  	struct list_lru_memcg *old, *new;
  
  	BUG_ON(old_size > new_size);
  
  	old = nlru->memcg_lrus;
  	new = kmalloc(new_size * sizeof(void *), GFP_KERNEL);
  	if (!new)
  		return -ENOMEM;
  
  	if (__memcg_init_list_lru_node(new, old_size, new_size)) {
  		kfree(new);
  		return -ENOMEM;
  	}
  
  	memcpy(new, old, old_size * sizeof(void *));
  
  	/*
  	 * The lock guarantees that we won't race with a reader
  	 * (see list_lru_from_memcg_idx).
  	 *
  	 * Since list_lru_{add,del} may be called under an IRQ-safe lock,
  	 * we have to use IRQ-safe primitives here to avoid deadlock.
  	 */
  	spin_lock_irq(&nlru->lock);
  	nlru->memcg_lrus = new;
  	spin_unlock_irq(&nlru->lock);
  
  	kfree(old);
  	return 0;
  }
  
  static void memcg_cancel_update_list_lru_node(struct list_lru_node *nlru,
  					      int old_size, int new_size)
  {
  	/* do not bother shrinking the array back to the old size, because we
  	 * cannot handle allocation failures here */
  	__memcg_destroy_list_lru_node(nlru->memcg_lrus, old_size, new_size);
  }
  
  static int memcg_init_list_lru(struct list_lru *lru, bool memcg_aware)
  {
  	int i;
145949a13   Raghavendra K T   mm/list_lru.c: re...
386
387
388
389
390
  	if (!memcg_aware)
  		return 0;
  
  	for_each_node(i) {
  		if (memcg_init_list_lru_node(&lru->node[i]))
60d3fd32a   Vladimir Davydov   list_lru: introdu...
391
392
393
394
  			goto fail;
  	}
  	return 0;
  fail:
145949a13   Raghavendra K T   mm/list_lru.c: re...
395
396
397
  	for (i = i - 1; i >= 0; i--) {
  		if (!lru->node[i].memcg_lrus)
  			continue;
60d3fd32a   Vladimir Davydov   list_lru: introdu...
398
  		memcg_destroy_list_lru_node(&lru->node[i]);
145949a13   Raghavendra K T   mm/list_lru.c: re...
399
  	}
60d3fd32a   Vladimir Davydov   list_lru: introdu...
400
401
402
403
404
405
406
407
408
  	return -ENOMEM;
  }
  
  static void memcg_destroy_list_lru(struct list_lru *lru)
  {
  	int i;
  
  	if (!list_lru_memcg_aware(lru))
  		return;
145949a13   Raghavendra K T   mm/list_lru.c: re...
409
  	for_each_node(i)
60d3fd32a   Vladimir Davydov   list_lru: introdu...
410
411
412
413
414
415
416
417
418
419
  		memcg_destroy_list_lru_node(&lru->node[i]);
  }
  
  static int memcg_update_list_lru(struct list_lru *lru,
  				 int old_size, int new_size)
  {
  	int i;
  
  	if (!list_lru_memcg_aware(lru))
  		return 0;
145949a13   Raghavendra K T   mm/list_lru.c: re...
420
  	for_each_node(i) {
60d3fd32a   Vladimir Davydov   list_lru: introdu...
421
422
423
424
425
426
  		if (memcg_update_list_lru_node(&lru->node[i],
  					       old_size, new_size))
  			goto fail;
  	}
  	return 0;
  fail:
145949a13   Raghavendra K T   mm/list_lru.c: re...
427
428
429
  	for (i = i - 1; i >= 0; i--) {
  		if (!lru->node[i].memcg_lrus)
  			continue;
60d3fd32a   Vladimir Davydov   list_lru: introdu...
430
431
  		memcg_cancel_update_list_lru_node(&lru->node[i],
  						  old_size, new_size);
145949a13   Raghavendra K T   mm/list_lru.c: re...
432
  	}
60d3fd32a   Vladimir Davydov   list_lru: introdu...
433
434
435
436
437
438
439
440
441
442
  	return -ENOMEM;
  }
  
  static void memcg_cancel_update_list_lru(struct list_lru *lru,
  					 int old_size, int new_size)
  {
  	int i;
  
  	if (!list_lru_memcg_aware(lru))
  		return;
145949a13   Raghavendra K T   mm/list_lru.c: re...
443
  	for_each_node(i)
60d3fd32a   Vladimir Davydov   list_lru: introdu...
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
  		memcg_cancel_update_list_lru_node(&lru->node[i],
  						  old_size, new_size);
  }
  
  int memcg_update_all_list_lrus(int new_size)
  {
  	int ret = 0;
  	struct list_lru *lru;
  	int old_size = memcg_nr_cache_ids;
  
  	mutex_lock(&list_lrus_mutex);
  	list_for_each_entry(lru, &list_lrus, list) {
  		ret = memcg_update_list_lru(lru, old_size, new_size);
  		if (ret)
  			goto fail;
  	}
  out:
  	mutex_unlock(&list_lrus_mutex);
  	return ret;
  fail:
  	list_for_each_entry_continue_reverse(lru, &list_lrus, list)
  		memcg_cancel_update_list_lru(lru, old_size, new_size);
  	goto out;
  }
2788cf0c4   Vladimir Davydov   memcg: reparent l...
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
  
  static void memcg_drain_list_lru_node(struct list_lru_node *nlru,
  				      int src_idx, int dst_idx)
  {
  	struct list_lru_one *src, *dst;
  
  	/*
  	 * Since list_lru_{add,del} may be called under an IRQ-safe lock,
  	 * we have to use IRQ-safe primitives here to avoid deadlock.
  	 */
  	spin_lock_irq(&nlru->lock);
  
  	src = list_lru_from_memcg_idx(nlru, src_idx);
  	dst = list_lru_from_memcg_idx(nlru, dst_idx);
  
  	list_splice_init(&src->list, &dst->list);
  	dst->nr_items += src->nr_items;
  	src->nr_items = 0;
  
  	spin_unlock_irq(&nlru->lock);
  }
  
  static void memcg_drain_list_lru(struct list_lru *lru,
  				 int src_idx, int dst_idx)
  {
  	int i;
  
  	if (!list_lru_memcg_aware(lru))
  		return;
145949a13   Raghavendra K T   mm/list_lru.c: re...
497
  	for_each_node(i)
2788cf0c4   Vladimir Davydov   memcg: reparent l...
498
499
500
501
502
503
504
505
506
507
508
509
  		memcg_drain_list_lru_node(&lru->node[i], src_idx, dst_idx);
  }
  
  void memcg_drain_all_list_lrus(int src_idx, int dst_idx)
  {
  	struct list_lru *lru;
  
  	mutex_lock(&list_lrus_mutex);
  	list_for_each_entry(lru, &list_lrus, list)
  		memcg_drain_list_lru(lru, src_idx, dst_idx);
  	mutex_unlock(&list_lrus_mutex);
  }
60d3fd32a   Vladimir Davydov   list_lru: introdu...
510
511
512
513
514
515
516
517
518
  #else
  static int memcg_init_list_lru(struct list_lru *lru, bool memcg_aware)
  {
  	return 0;
  }
  
  static void memcg_destroy_list_lru(struct list_lru *lru)
  {
  }
127424c86   Johannes Weiner   mm: memcontrol: m...
519
  #endif /* CONFIG_MEMCG && !CONFIG_SLOB */
60d3fd32a   Vladimir Davydov   list_lru: introdu...
520
521
522
  
  int __list_lru_init(struct list_lru *lru, bool memcg_aware,
  		    struct lock_class_key *key)
a38e40824   Dave Chinner   list: add a new L...
523
  {
3b1d58a4c   Dave Chinner   list_lru: per-nod...
524
  	int i;
5ca302c8e   Glauber Costa   list_lru: dynamic...
525
  	size_t size = sizeof(*lru->node) * nr_node_ids;
60d3fd32a   Vladimir Davydov   list_lru: introdu...
526
527
528
  	int err = -ENOMEM;
  
  	memcg_get_cache_ids();
5ca302c8e   Glauber Costa   list_lru: dynamic...
529
530
531
  
  	lru->node = kzalloc(size, GFP_KERNEL);
  	if (!lru->node)
60d3fd32a   Vladimir Davydov   list_lru: introdu...
532
  		goto out;
a38e40824   Dave Chinner   list: add a new L...
533

145949a13   Raghavendra K T   mm/list_lru.c: re...
534
  	for_each_node(i) {
3b1d58a4c   Dave Chinner   list_lru: per-nod...
535
  		spin_lock_init(&lru->node[i].lock);
449dd6984   Johannes Weiner   mm: keep page cac...
536
537
  		if (key)
  			lockdep_set_class(&lru->node[i].lock, key);
60d3fd32a   Vladimir Davydov   list_lru: introdu...
538
539
540
541
542
543
  		init_one_lru(&lru->node[i].lru);
  	}
  
  	err = memcg_init_list_lru(lru, memcg_aware);
  	if (err) {
  		kfree(lru->node);
1bc11d70b   Alexander Polakov   mm/list_lru.c: av...
544
545
  		/* Do this so a list_lru_destroy() doesn't crash: */
  		lru->node = NULL;
60d3fd32a   Vladimir Davydov   list_lru: introdu...
546
  		goto out;
3b1d58a4c   Dave Chinner   list_lru: per-nod...
547
  	}
60d3fd32a   Vladimir Davydov   list_lru: introdu...
548

c0a5b5609   Vladimir Davydov   list_lru: organiz...
549
  	list_lru_register(lru);
60d3fd32a   Vladimir Davydov   list_lru: introdu...
550
551
552
  out:
  	memcg_put_cache_ids();
  	return err;
a38e40824   Dave Chinner   list: add a new L...
553
  }
60d3fd32a   Vladimir Davydov   list_lru: introdu...
554
  EXPORT_SYMBOL_GPL(__list_lru_init);
5ca302c8e   Glauber Costa   list_lru: dynamic...
555
556
557
  
  void list_lru_destroy(struct list_lru *lru)
  {
c0a5b5609   Vladimir Davydov   list_lru: organiz...
558
559
560
  	/* Already destroyed or not yet initialized? */
  	if (!lru->node)
  		return;
60d3fd32a   Vladimir Davydov   list_lru: introdu...
561
562
  
  	memcg_get_cache_ids();
c0a5b5609   Vladimir Davydov   list_lru: organiz...
563
  	list_lru_unregister(lru);
60d3fd32a   Vladimir Davydov   list_lru: introdu...
564
565
  
  	memcg_destroy_list_lru(lru);
5ca302c8e   Glauber Costa   list_lru: dynamic...
566
  	kfree(lru->node);
c0a5b5609   Vladimir Davydov   list_lru: organiz...
567
  	lru->node = NULL;
60d3fd32a   Vladimir Davydov   list_lru: introdu...
568
569
  
  	memcg_put_cache_ids();
5ca302c8e   Glauber Costa   list_lru: dynamic...
570
571
  }
  EXPORT_SYMBOL_GPL(list_lru_destroy);