Blame view

mm/list_lru.c 14.6 KB
457c89965   Thomas Gleixner   treewide: Add SPD...
1
  // SPDX-License-Identifier: GPL-2.0-only
a38e40824   Dave Chinner   list: add a new L...
2
3
4
5
6
7
8
9
  /*
   * Copyright (c) 2013 Red Hat, Inc. and Parallels Inc. All rights reserved.
   * Authors: David Chinner and Glauber Costa
   *
   * Generic LRU infrastructure
   */
  #include <linux/kernel.h>
  #include <linux/module.h>
3b1d58a4c   Dave Chinner   list_lru: per-nod...
10
  #include <linux/mm.h>
a38e40824   Dave Chinner   list: add a new L...
11
  #include <linux/list_lru.h>
5ca302c8e   Glauber Costa   list_lru: dynamic...
12
  #include <linux/slab.h>
c0a5b5609   Vladimir Davydov   list_lru: organiz...
13
  #include <linux/mutex.h>
60d3fd32a   Vladimir Davydov   list_lru: introdu...
14
  #include <linux/memcontrol.h>
4d96ba353   Roman Gushchin   mm: memcg/slab: s...
15
  #include "slab.h"
c0a5b5609   Vladimir Davydov   list_lru: organiz...
16

84c07d11a   Kirill Tkhai   mm: introduce CON...
17
  #ifdef CONFIG_MEMCG_KMEM
c0a5b5609   Vladimir Davydov   list_lru: organiz...
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
  static LIST_HEAD(list_lrus);
  static DEFINE_MUTEX(list_lrus_mutex);
  
  static void list_lru_register(struct list_lru *lru)
  {
  	mutex_lock(&list_lrus_mutex);
  	list_add(&lru->list, &list_lrus);
  	mutex_unlock(&list_lrus_mutex);
  }
  
  static void list_lru_unregister(struct list_lru *lru)
  {
  	mutex_lock(&list_lrus_mutex);
  	list_del(&lru->list);
  	mutex_unlock(&list_lrus_mutex);
  }
c0a5b5609   Vladimir Davydov   list_lru: organiz...
34

fae91d6d8   Kirill Tkhai   mm/list_lru.c: se...
35
36
37
38
  static int lru_shrinker_id(struct list_lru *lru)
  {
  	return lru->shrinker_id;
  }
60d3fd32a   Vladimir Davydov   list_lru: introdu...
39
40
  static inline bool list_lru_memcg_aware(struct list_lru *lru)
  {
3e8589963   Jiri Slaby   memcg: make it wo...
41
  	return lru->memcg_aware;
60d3fd32a   Vladimir Davydov   list_lru: introdu...
42
43
44
45
46
  }
  
  static inline struct list_lru_one *
  list_lru_from_memcg_idx(struct list_lru_node *nlru, int idx)
  {
0c7c1bed7   Kirill Tkhai   mm: make counting...
47
  	struct list_lru_memcg *memcg_lrus;
60d3fd32a   Vladimir Davydov   list_lru: introdu...
48
  	/*
0c7c1bed7   Kirill Tkhai   mm: make counting...
49
50
  	 * Either lock or RCU protects the array of per cgroup lists
  	 * from relocation (see memcg_update_list_lru_node).
60d3fd32a   Vladimir Davydov   list_lru: introdu...
51
  	 */
0c7c1bed7   Kirill Tkhai   mm: make counting...
52
53
54
55
  	memcg_lrus = rcu_dereference_check(nlru->memcg_lrus,
  					   lockdep_is_held(&nlru->lock));
  	if (memcg_lrus && idx >= 0)
  		return memcg_lrus->lru[idx];
60d3fd32a   Vladimir Davydov   list_lru: introdu...
56
57
  	return &nlru->lru;
  }
df4065516   Vladimir Davydov   memcg: simplify a...
58
59
60
61
62
63
64
  static __always_inline struct mem_cgroup *mem_cgroup_from_kmem(void *ptr)
  {
  	struct page *page;
  
  	if (!memcg_kmem_enabled())
  		return NULL;
  	page = virt_to_head_page(ptr);
4d96ba353   Roman Gushchin   mm: memcg/slab: s...
65
  	return memcg_from_slab_page(page);
df4065516   Vladimir Davydov   memcg: simplify a...
66
  }
60d3fd32a   Vladimir Davydov   list_lru: introdu...
67
  static inline struct list_lru_one *
44bd4a475   Kirill Tkhai   mm/list_lru.c: ad...
68
69
  list_lru_from_kmem(struct list_lru_node *nlru, void *ptr,
  		   struct mem_cgroup **memcg_ptr)
60d3fd32a   Vladimir Davydov   list_lru: introdu...
70
  {
44bd4a475   Kirill Tkhai   mm/list_lru.c: ad...
71
72
  	struct list_lru_one *l = &nlru->lru;
  	struct mem_cgroup *memcg = NULL;
60d3fd32a   Vladimir Davydov   list_lru: introdu...
73
74
  
  	if (!nlru->memcg_lrus)
44bd4a475   Kirill Tkhai   mm/list_lru.c: ad...
75
  		goto out;
60d3fd32a   Vladimir Davydov   list_lru: introdu...
76
77
78
  
  	memcg = mem_cgroup_from_kmem(ptr);
  	if (!memcg)
44bd4a475   Kirill Tkhai   mm/list_lru.c: ad...
79
  		goto out;
60d3fd32a   Vladimir Davydov   list_lru: introdu...
80

44bd4a475   Kirill Tkhai   mm/list_lru.c: ad...
81
82
83
84
85
  	l = list_lru_from_memcg_idx(nlru, memcg_cache_id(memcg));
  out:
  	if (memcg_ptr)
  		*memcg_ptr = memcg;
  	return l;
60d3fd32a   Vladimir Davydov   list_lru: introdu...
86
87
  }
  #else
e0295238e   Kirill Tkhai   mm/list_lru.c: co...
88
89
90
91
92
93
94
  static void list_lru_register(struct list_lru *lru)
  {
  }
  
  static void list_lru_unregister(struct list_lru *lru)
  {
  }
fae91d6d8   Kirill Tkhai   mm/list_lru.c: se...
95
96
97
98
  static int lru_shrinker_id(struct list_lru *lru)
  {
  	return -1;
  }
60d3fd32a   Vladimir Davydov   list_lru: introdu...
99
100
101
102
103
104
105
106
107
108
109
110
  static inline bool list_lru_memcg_aware(struct list_lru *lru)
  {
  	return false;
  }
  
  static inline struct list_lru_one *
  list_lru_from_memcg_idx(struct list_lru_node *nlru, int idx)
  {
  	return &nlru->lru;
  }
  
  static inline struct list_lru_one *
44bd4a475   Kirill Tkhai   mm/list_lru.c: ad...
111
112
  list_lru_from_kmem(struct list_lru_node *nlru, void *ptr,
  		   struct mem_cgroup **memcg_ptr)
60d3fd32a   Vladimir Davydov   list_lru: introdu...
113
  {
44bd4a475   Kirill Tkhai   mm/list_lru.c: ad...
114
115
  	if (memcg_ptr)
  		*memcg_ptr = NULL;
60d3fd32a   Vladimir Davydov   list_lru: introdu...
116
117
  	return &nlru->lru;
  }
84c07d11a   Kirill Tkhai   mm: introduce CON...
118
  #endif /* CONFIG_MEMCG_KMEM */
60d3fd32a   Vladimir Davydov   list_lru: introdu...
119

a38e40824   Dave Chinner   list: add a new L...
120
121
  bool list_lru_add(struct list_lru *lru, struct list_head *item)
  {
3b1d58a4c   Dave Chinner   list_lru: per-nod...
122
123
  	int nid = page_to_nid(virt_to_page(item));
  	struct list_lru_node *nlru = &lru->node[nid];
fae91d6d8   Kirill Tkhai   mm/list_lru.c: se...
124
  	struct mem_cgroup *memcg;
60d3fd32a   Vladimir Davydov   list_lru: introdu...
125
  	struct list_lru_one *l;
3b1d58a4c   Dave Chinner   list_lru: per-nod...
126
127
  
  	spin_lock(&nlru->lock);
a38e40824   Dave Chinner   list: add a new L...
128
  	if (list_empty(item)) {
fae91d6d8   Kirill Tkhai   mm/list_lru.c: se...
129
  		l = list_lru_from_kmem(nlru, item, &memcg);
60d3fd32a   Vladimir Davydov   list_lru: introdu...
130
  		list_add_tail(item, &l->list);
fae91d6d8   Kirill Tkhai   mm/list_lru.c: se...
131
132
133
134
  		/* Set shrinker bit if the first element was added */
  		if (!l->nr_items++)
  			memcg_set_shrinker_bit(memcg, nid,
  					       lru_shrinker_id(lru));
2c80cd57c   Sahitya Tummala   mm/list_lru.c: fi...
135
  		nlru->nr_items++;
3b1d58a4c   Dave Chinner   list_lru: per-nod...
136
  		spin_unlock(&nlru->lock);
a38e40824   Dave Chinner   list: add a new L...
137
138
  		return true;
  	}
3b1d58a4c   Dave Chinner   list_lru: per-nod...
139
  	spin_unlock(&nlru->lock);
a38e40824   Dave Chinner   list: add a new L...
140
141
142
143
144
145
  	return false;
  }
  EXPORT_SYMBOL_GPL(list_lru_add);
  
  bool list_lru_del(struct list_lru *lru, struct list_head *item)
  {
3b1d58a4c   Dave Chinner   list_lru: per-nod...
146
147
  	int nid = page_to_nid(virt_to_page(item));
  	struct list_lru_node *nlru = &lru->node[nid];
60d3fd32a   Vladimir Davydov   list_lru: introdu...
148
  	struct list_lru_one *l;
3b1d58a4c   Dave Chinner   list_lru: per-nod...
149
150
  
  	spin_lock(&nlru->lock);
a38e40824   Dave Chinner   list: add a new L...
151
  	if (!list_empty(item)) {
44bd4a475   Kirill Tkhai   mm/list_lru.c: ad...
152
  		l = list_lru_from_kmem(nlru, item, NULL);
a38e40824   Dave Chinner   list: add a new L...
153
  		list_del_init(item);
60d3fd32a   Vladimir Davydov   list_lru: introdu...
154
  		l->nr_items--;
2c80cd57c   Sahitya Tummala   mm/list_lru.c: fi...
155
  		nlru->nr_items--;
3b1d58a4c   Dave Chinner   list_lru: per-nod...
156
  		spin_unlock(&nlru->lock);
a38e40824   Dave Chinner   list: add a new L...
157
158
  		return true;
  	}
3b1d58a4c   Dave Chinner   list_lru: per-nod...
159
  	spin_unlock(&nlru->lock);
a38e40824   Dave Chinner   list: add a new L...
160
161
162
  	return false;
  }
  EXPORT_SYMBOL_GPL(list_lru_del);
3f97b1632   Vladimir Davydov   list_lru: add hel...
163
164
165
166
167
168
169
170
171
172
173
174
175
176
  void list_lru_isolate(struct list_lru_one *list, struct list_head *item)
  {
  	list_del_init(item);
  	list->nr_items--;
  }
  EXPORT_SYMBOL_GPL(list_lru_isolate);
  
  void list_lru_isolate_move(struct list_lru_one *list, struct list_head *item,
  			   struct list_head *head)
  {
  	list_move(item, head);
  	list->nr_items--;
  }
  EXPORT_SYMBOL_GPL(list_lru_isolate_move);
930eaac5e   Andrew Morton   mm/list_lru.c: fo...
177
178
  unsigned long list_lru_count_one(struct list_lru *lru,
  				 int nid, struct mem_cgroup *memcg)
a38e40824   Dave Chinner   list: add a new L...
179
  {
6a4f496fd   Glauber Costa   list_lru: per-nod...
180
  	struct list_lru_node *nlru = &lru->node[nid];
60d3fd32a   Vladimir Davydov   list_lru: introdu...
181
182
  	struct list_lru_one *l;
  	unsigned long count;
3b1d58a4c   Dave Chinner   list_lru: per-nod...
183

0c7c1bed7   Kirill Tkhai   mm: make counting...
184
  	rcu_read_lock();
930eaac5e   Andrew Morton   mm/list_lru.c: fo...
185
  	l = list_lru_from_memcg_idx(nlru, memcg_cache_id(memcg));
60d3fd32a   Vladimir Davydov   list_lru: introdu...
186
  	count = l->nr_items;
0c7c1bed7   Kirill Tkhai   mm: make counting...
187
  	rcu_read_unlock();
3b1d58a4c   Dave Chinner   list_lru: per-nod...
188
189
190
  
  	return count;
  }
60d3fd32a   Vladimir Davydov   list_lru: introdu...
191
192
193
194
  EXPORT_SYMBOL_GPL(list_lru_count_one);
  
  unsigned long list_lru_count_node(struct list_lru *lru, int nid)
  {
2c80cd57c   Sahitya Tummala   mm/list_lru.c: fi...
195
  	struct list_lru_node *nlru;
60d3fd32a   Vladimir Davydov   list_lru: introdu...
196

2c80cd57c   Sahitya Tummala   mm/list_lru.c: fi...
197
198
  	nlru = &lru->node[nid];
  	return nlru->nr_items;
60d3fd32a   Vladimir Davydov   list_lru: introdu...
199
  }
6a4f496fd   Glauber Costa   list_lru: per-nod...
200
  EXPORT_SYMBOL_GPL(list_lru_count_node);
3b1d58a4c   Dave Chinner   list_lru: per-nod...
201

60d3fd32a   Vladimir Davydov   list_lru: introdu...
202
  static unsigned long
6e018968f   Sebastian Andrzej Siewior   mm/list_lru.c: pa...
203
  __list_lru_walk_one(struct list_lru_node *nlru, int memcg_idx,
60d3fd32a   Vladimir Davydov   list_lru: introdu...
204
205
  		    list_lru_walk_cb isolate, void *cb_arg,
  		    unsigned long *nr_to_walk)
3b1d58a4c   Dave Chinner   list_lru: per-nod...
206
  {
60d3fd32a   Vladimir Davydov   list_lru: introdu...
207
  	struct list_lru_one *l;
a38e40824   Dave Chinner   list: add a new L...
208
  	struct list_head *item, *n;
3b1d58a4c   Dave Chinner   list_lru: per-nod...
209
  	unsigned long isolated = 0;
a38e40824   Dave Chinner   list: add a new L...
210

60d3fd32a   Vladimir Davydov   list_lru: introdu...
211
  	l = list_lru_from_memcg_idx(nlru, memcg_idx);
a38e40824   Dave Chinner   list: add a new L...
212
  restart:
60d3fd32a   Vladimir Davydov   list_lru: introdu...
213
  	list_for_each_safe(item, n, &l->list) {
a38e40824   Dave Chinner   list: add a new L...
214
  		enum lru_status ret;
5cedf721a   Dave Chinner   list_lru: fix bro...
215
216
217
218
219
  
  		/*
  		 * decrement nr_to_walk first so that we don't livelock if we
  		 * get stuck on large numbesr of LRU_RETRY items
  		 */
c56b097af   Russell King   mm: list_lru: fix...
220
  		if (!*nr_to_walk)
5cedf721a   Dave Chinner   list_lru: fix bro...
221
  			break;
c56b097af   Russell King   mm: list_lru: fix...
222
  		--*nr_to_walk;
5cedf721a   Dave Chinner   list_lru: fix bro...
223

3f97b1632   Vladimir Davydov   list_lru: add hel...
224
  		ret = isolate(item, l, &nlru->lock, cb_arg);
a38e40824   Dave Chinner   list: add a new L...
225
  		switch (ret) {
449dd6984   Johannes Weiner   mm: keep page cac...
226
227
  		case LRU_REMOVED_RETRY:
  			assert_spin_locked(&nlru->lock);
5b568acc3   Gustavo A. R. Silva   mm/list_lru.c: ma...
228
  			/* fall through */
a38e40824   Dave Chinner   list: add a new L...
229
  		case LRU_REMOVED:
3b1d58a4c   Dave Chinner   list_lru: per-nod...
230
  			isolated++;
2c80cd57c   Sahitya Tummala   mm/list_lru.c: fi...
231
  			nlru->nr_items--;
449dd6984   Johannes Weiner   mm: keep page cac...
232
233
234
235
236
237
238
  			/*
  			 * If the lru lock has been dropped, our list
  			 * traversal is now invalid and so we have to
  			 * restart from scratch.
  			 */
  			if (ret == LRU_REMOVED_RETRY)
  				goto restart;
a38e40824   Dave Chinner   list: add a new L...
239
240
  			break;
  		case LRU_ROTATE:
60d3fd32a   Vladimir Davydov   list_lru: introdu...
241
  			list_move_tail(item, &l->list);
a38e40824   Dave Chinner   list: add a new L...
242
243
244
245
  			break;
  		case LRU_SKIP:
  			break;
  		case LRU_RETRY:
5cedf721a   Dave Chinner   list_lru: fix bro...
246
247
248
249
  			/*
  			 * The lru lock has been dropped, our list traversal is
  			 * now invalid and so we have to restart from scratch.
  			 */
449dd6984   Johannes Weiner   mm: keep page cac...
250
  			assert_spin_locked(&nlru->lock);
a38e40824   Dave Chinner   list: add a new L...
251
252
253
254
  			goto restart;
  		default:
  			BUG();
  		}
a38e40824   Dave Chinner   list: add a new L...
255
  	}
3b1d58a4c   Dave Chinner   list_lru: per-nod...
256
257
  	return isolated;
  }
60d3fd32a   Vladimir Davydov   list_lru: introdu...
258
259
260
261
262
263
  
  unsigned long
  list_lru_walk_one(struct list_lru *lru, int nid, struct mem_cgroup *memcg,
  		  list_lru_walk_cb isolate, void *cb_arg,
  		  unsigned long *nr_to_walk)
  {
6cfe57a96   Sebastian Andrzej Siewior   mm/list_lru.c: mo...
264
265
266
267
  	struct list_lru_node *nlru = &lru->node[nid];
  	unsigned long ret;
  
  	spin_lock(&nlru->lock);
6e018968f   Sebastian Andrzej Siewior   mm/list_lru.c: pa...
268
269
  	ret = __list_lru_walk_one(nlru, memcg_cache_id(memcg), isolate, cb_arg,
  				  nr_to_walk);
6cfe57a96   Sebastian Andrzej Siewior   mm/list_lru.c: mo...
270
271
  	spin_unlock(&nlru->lock);
  	return ret;
60d3fd32a   Vladimir Davydov   list_lru: introdu...
272
273
  }
  EXPORT_SYMBOL_GPL(list_lru_walk_one);
6b51e8819   Sebastian Andrzej Siewior   mm/list_lru: intr...
274
275
276
277
278
279
280
281
282
283
284
285
286
287
  unsigned long
  list_lru_walk_one_irq(struct list_lru *lru, int nid, struct mem_cgroup *memcg,
  		      list_lru_walk_cb isolate, void *cb_arg,
  		      unsigned long *nr_to_walk)
  {
  	struct list_lru_node *nlru = &lru->node[nid];
  	unsigned long ret;
  
  	spin_lock_irq(&nlru->lock);
  	ret = __list_lru_walk_one(nlru, memcg_cache_id(memcg), isolate, cb_arg,
  				  nr_to_walk);
  	spin_unlock_irq(&nlru->lock);
  	return ret;
  }
60d3fd32a   Vladimir Davydov   list_lru: introdu...
288
289
290
291
292
293
  unsigned long list_lru_walk_node(struct list_lru *lru, int nid,
  				 list_lru_walk_cb isolate, void *cb_arg,
  				 unsigned long *nr_to_walk)
  {
  	long isolated = 0;
  	int memcg_idx;
87a5ffc16   Sebastian Andrzej Siewior   mm/list_lru.c: us...
294
295
  	isolated += list_lru_walk_one(lru, nid, NULL, isolate, cb_arg,
  				      nr_to_walk);
60d3fd32a   Vladimir Davydov   list_lru: introdu...
296
297
  	if (*nr_to_walk > 0 && list_lru_memcg_aware(lru)) {
  		for_each_memcg_cache_index(memcg_idx) {
6cfe57a96   Sebastian Andrzej Siewior   mm/list_lru.c: mo...
298
299
300
  			struct list_lru_node *nlru = &lru->node[nid];
  
  			spin_lock(&nlru->lock);
6e018968f   Sebastian Andrzej Siewior   mm/list_lru.c: pa...
301
302
303
  			isolated += __list_lru_walk_one(nlru, memcg_idx,
  							isolate, cb_arg,
  							nr_to_walk);
6cfe57a96   Sebastian Andrzej Siewior   mm/list_lru.c: mo...
304
  			spin_unlock(&nlru->lock);
60d3fd32a   Vladimir Davydov   list_lru: introdu...
305
306
307
308
309
310
  			if (*nr_to_walk <= 0)
  				break;
  		}
  	}
  	return isolated;
  }
3b1d58a4c   Dave Chinner   list_lru: per-nod...
311
  EXPORT_SYMBOL_GPL(list_lru_walk_node);
60d3fd32a   Vladimir Davydov   list_lru: introdu...
312
313
314
315
316
  static void init_one_lru(struct list_lru_one *l)
  {
  	INIT_LIST_HEAD(&l->list);
  	l->nr_items = 0;
  }
84c07d11a   Kirill Tkhai   mm: introduce CON...
317
  #ifdef CONFIG_MEMCG_KMEM
60d3fd32a   Vladimir Davydov   list_lru: introdu...
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
  static void __memcg_destroy_list_lru_node(struct list_lru_memcg *memcg_lrus,
  					  int begin, int end)
  {
  	int i;
  
  	for (i = begin; i < end; i++)
  		kfree(memcg_lrus->lru[i]);
  }
  
  static int __memcg_init_list_lru_node(struct list_lru_memcg *memcg_lrus,
  				      int begin, int end)
  {
  	int i;
  
  	for (i = begin; i < end; i++) {
  		struct list_lru_one *l;
  
  		l = kmalloc(sizeof(struct list_lru_one), GFP_KERNEL);
  		if (!l)
  			goto fail;
  
  		init_one_lru(l);
  		memcg_lrus->lru[i] = l;
  	}
  	return 0;
  fail:
3510955b3   Shakeel Butt   mm/list_lru.c: fi...
344
  	__memcg_destroy_list_lru_node(memcg_lrus, begin, i);
60d3fd32a   Vladimir Davydov   list_lru: introdu...
345
346
347
348
349
  	return -ENOMEM;
  }
  
  static int memcg_init_list_lru_node(struct list_lru_node *nlru)
  {
0c7c1bed7   Kirill Tkhai   mm: make counting...
350
  	struct list_lru_memcg *memcg_lrus;
60d3fd32a   Vladimir Davydov   list_lru: introdu...
351
  	int size = memcg_nr_cache_ids;
0c7c1bed7   Kirill Tkhai   mm: make counting...
352
353
354
  	memcg_lrus = kvmalloc(sizeof(*memcg_lrus) +
  			      size * sizeof(void *), GFP_KERNEL);
  	if (!memcg_lrus)
60d3fd32a   Vladimir Davydov   list_lru: introdu...
355
  		return -ENOMEM;
0c7c1bed7   Kirill Tkhai   mm: make counting...
356
357
  	if (__memcg_init_list_lru_node(memcg_lrus, 0, size)) {
  		kvfree(memcg_lrus);
60d3fd32a   Vladimir Davydov   list_lru: introdu...
358
359
  		return -ENOMEM;
  	}
0c7c1bed7   Kirill Tkhai   mm: make counting...
360
  	RCU_INIT_POINTER(nlru->memcg_lrus, memcg_lrus);
60d3fd32a   Vladimir Davydov   list_lru: introdu...
361
362
363
364
365
366
  
  	return 0;
  }
  
  static void memcg_destroy_list_lru_node(struct list_lru_node *nlru)
  {
0c7c1bed7   Kirill Tkhai   mm: make counting...
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
  	struct list_lru_memcg *memcg_lrus;
  	/*
  	 * This is called when shrinker has already been unregistered,
  	 * and nobody can use it. So, there is no need to use kvfree_rcu().
  	 */
  	memcg_lrus = rcu_dereference_protected(nlru->memcg_lrus, true);
  	__memcg_destroy_list_lru_node(memcg_lrus, 0, memcg_nr_cache_ids);
  	kvfree(memcg_lrus);
  }
  
  static void kvfree_rcu(struct rcu_head *head)
  {
  	struct list_lru_memcg *mlru;
  
  	mlru = container_of(head, struct list_lru_memcg, rcu);
  	kvfree(mlru);
60d3fd32a   Vladimir Davydov   list_lru: introdu...
383
384
385
386
387
388
389
390
  }
  
  static int memcg_update_list_lru_node(struct list_lru_node *nlru,
  				      int old_size, int new_size)
  {
  	struct list_lru_memcg *old, *new;
  
  	BUG_ON(old_size > new_size);
0c7c1bed7   Kirill Tkhai   mm: make counting...
391
392
393
  	old = rcu_dereference_protected(nlru->memcg_lrus,
  					lockdep_is_held(&list_lrus_mutex));
  	new = kvmalloc(sizeof(*new) + new_size * sizeof(void *), GFP_KERNEL);
60d3fd32a   Vladimir Davydov   list_lru: introdu...
394
395
396
397
  	if (!new)
  		return -ENOMEM;
  
  	if (__memcg_init_list_lru_node(new, old_size, new_size)) {
f80c7dab9   Johannes Weiner   mm: memcontrol: u...
398
  		kvfree(new);
60d3fd32a   Vladimir Davydov   list_lru: introdu...
399
400
  		return -ENOMEM;
  	}
0c7c1bed7   Kirill Tkhai   mm: make counting...
401
  	memcpy(&new->lru, &old->lru, old_size * sizeof(void *));
60d3fd32a   Vladimir Davydov   list_lru: introdu...
402
403
  
  	/*
0c7c1bed7   Kirill Tkhai   mm: make counting...
404
405
  	 * The locking below allows readers that hold nlru->lock avoid taking
  	 * rcu_read_lock (see list_lru_from_memcg_idx).
60d3fd32a   Vladimir Davydov   list_lru: introdu...
406
407
408
409
410
  	 *
  	 * Since list_lru_{add,del} may be called under an IRQ-safe lock,
  	 * we have to use IRQ-safe primitives here to avoid deadlock.
  	 */
  	spin_lock_irq(&nlru->lock);
0c7c1bed7   Kirill Tkhai   mm: make counting...
411
  	rcu_assign_pointer(nlru->memcg_lrus, new);
60d3fd32a   Vladimir Davydov   list_lru: introdu...
412
  	spin_unlock_irq(&nlru->lock);
0c7c1bed7   Kirill Tkhai   mm: make counting...
413
  	call_rcu(&old->rcu, kvfree_rcu);
60d3fd32a   Vladimir Davydov   list_lru: introdu...
414
415
416
417
418
419
  	return 0;
  }
  
  static void memcg_cancel_update_list_lru_node(struct list_lru_node *nlru,
  					      int old_size, int new_size)
  {
0c7c1bed7   Kirill Tkhai   mm: make counting...
420
421
422
423
  	struct list_lru_memcg *memcg_lrus;
  
  	memcg_lrus = rcu_dereference_protected(nlru->memcg_lrus,
  					       lockdep_is_held(&list_lrus_mutex));
60d3fd32a   Vladimir Davydov   list_lru: introdu...
424
425
  	/* do not bother shrinking the array back to the old size, because we
  	 * cannot handle allocation failures here */
0c7c1bed7   Kirill Tkhai   mm: make counting...
426
  	__memcg_destroy_list_lru_node(memcg_lrus, old_size, new_size);
60d3fd32a   Vladimir Davydov   list_lru: introdu...
427
428
429
430
431
  }
  
  static int memcg_init_list_lru(struct list_lru *lru, bool memcg_aware)
  {
  	int i;
3e8589963   Jiri Slaby   memcg: make it wo...
432
  	lru->memcg_aware = memcg_aware;
145949a13   Raghavendra K T   mm/list_lru.c: re...
433
434
435
436
437
  	if (!memcg_aware)
  		return 0;
  
  	for_each_node(i) {
  		if (memcg_init_list_lru_node(&lru->node[i]))
60d3fd32a   Vladimir Davydov   list_lru: introdu...
438
439
440
441
  			goto fail;
  	}
  	return 0;
  fail:
145949a13   Raghavendra K T   mm/list_lru.c: re...
442
443
444
  	for (i = i - 1; i >= 0; i--) {
  		if (!lru->node[i].memcg_lrus)
  			continue;
60d3fd32a   Vladimir Davydov   list_lru: introdu...
445
  		memcg_destroy_list_lru_node(&lru->node[i]);
145949a13   Raghavendra K T   mm/list_lru.c: re...
446
  	}
60d3fd32a   Vladimir Davydov   list_lru: introdu...
447
448
449
450
451
452
453
454
455
  	return -ENOMEM;
  }
  
  static void memcg_destroy_list_lru(struct list_lru *lru)
  {
  	int i;
  
  	if (!list_lru_memcg_aware(lru))
  		return;
145949a13   Raghavendra K T   mm/list_lru.c: re...
456
  	for_each_node(i)
60d3fd32a   Vladimir Davydov   list_lru: introdu...
457
458
459
460
461
462
463
464
465
466
  		memcg_destroy_list_lru_node(&lru->node[i]);
  }
  
  static int memcg_update_list_lru(struct list_lru *lru,
  				 int old_size, int new_size)
  {
  	int i;
  
  	if (!list_lru_memcg_aware(lru))
  		return 0;
145949a13   Raghavendra K T   mm/list_lru.c: re...
467
  	for_each_node(i) {
60d3fd32a   Vladimir Davydov   list_lru: introdu...
468
469
470
471
472
473
  		if (memcg_update_list_lru_node(&lru->node[i],
  					       old_size, new_size))
  			goto fail;
  	}
  	return 0;
  fail:
145949a13   Raghavendra K T   mm/list_lru.c: re...
474
475
476
  	for (i = i - 1; i >= 0; i--) {
  		if (!lru->node[i].memcg_lrus)
  			continue;
60d3fd32a   Vladimir Davydov   list_lru: introdu...
477
478
  		memcg_cancel_update_list_lru_node(&lru->node[i],
  						  old_size, new_size);
145949a13   Raghavendra K T   mm/list_lru.c: re...
479
  	}
60d3fd32a   Vladimir Davydov   list_lru: introdu...
480
481
482
483
484
485
486
487
488
489
  	return -ENOMEM;
  }
  
  static void memcg_cancel_update_list_lru(struct list_lru *lru,
  					 int old_size, int new_size)
  {
  	int i;
  
  	if (!list_lru_memcg_aware(lru))
  		return;
145949a13   Raghavendra K T   mm/list_lru.c: re...
490
  	for_each_node(i)
60d3fd32a   Vladimir Davydov   list_lru: introdu...
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
  		memcg_cancel_update_list_lru_node(&lru->node[i],
  						  old_size, new_size);
  }
  
  int memcg_update_all_list_lrus(int new_size)
  {
  	int ret = 0;
  	struct list_lru *lru;
  	int old_size = memcg_nr_cache_ids;
  
  	mutex_lock(&list_lrus_mutex);
  	list_for_each_entry(lru, &list_lrus, list) {
  		ret = memcg_update_list_lru(lru, old_size, new_size);
  		if (ret)
  			goto fail;
  	}
  out:
  	mutex_unlock(&list_lrus_mutex);
  	return ret;
  fail:
  	list_for_each_entry_continue_reverse(lru, &list_lrus, list)
  		memcg_cancel_update_list_lru(lru, old_size, new_size);
  	goto out;
  }
2788cf0c4   Vladimir Davydov   memcg: reparent l...
515

3b82c4dcc   Kirill Tkhai   mm/list_lru.c: pa...
516
  static void memcg_drain_list_lru_node(struct list_lru *lru, int nid,
9bec5c35b   Kirill Tkhai   mm/list_lru: pass...
517
  				      int src_idx, struct mem_cgroup *dst_memcg)
2788cf0c4   Vladimir Davydov   memcg: reparent l...
518
  {
3b82c4dcc   Kirill Tkhai   mm/list_lru.c: pa...
519
  	struct list_lru_node *nlru = &lru->node[nid];
9bec5c35b   Kirill Tkhai   mm/list_lru: pass...
520
  	int dst_idx = dst_memcg->kmemcg_id;
2788cf0c4   Vladimir Davydov   memcg: reparent l...
521
  	struct list_lru_one *src, *dst;
fae91d6d8   Kirill Tkhai   mm/list_lru.c: se...
522
  	bool set;
2788cf0c4   Vladimir Davydov   memcg: reparent l...
523
524
525
526
527
528
529
530
531
532
533
  
  	/*
  	 * Since list_lru_{add,del} may be called under an IRQ-safe lock,
  	 * we have to use IRQ-safe primitives here to avoid deadlock.
  	 */
  	spin_lock_irq(&nlru->lock);
  
  	src = list_lru_from_memcg_idx(nlru, src_idx);
  	dst = list_lru_from_memcg_idx(nlru, dst_idx);
  
  	list_splice_init(&src->list, &dst->list);
fae91d6d8   Kirill Tkhai   mm/list_lru.c: se...
534
  	set = (!dst->nr_items && src->nr_items);
2788cf0c4   Vladimir Davydov   memcg: reparent l...
535
  	dst->nr_items += src->nr_items;
fae91d6d8   Kirill Tkhai   mm/list_lru.c: se...
536
537
  	if (set)
  		memcg_set_shrinker_bit(dst_memcg, nid, lru_shrinker_id(lru));
2788cf0c4   Vladimir Davydov   memcg: reparent l...
538
539
540
541
542
543
  	src->nr_items = 0;
  
  	spin_unlock_irq(&nlru->lock);
  }
  
  static void memcg_drain_list_lru(struct list_lru *lru,
9bec5c35b   Kirill Tkhai   mm/list_lru: pass...
544
  				 int src_idx, struct mem_cgroup *dst_memcg)
2788cf0c4   Vladimir Davydov   memcg: reparent l...
545
546
547
548
549
  {
  	int i;
  
  	if (!list_lru_memcg_aware(lru))
  		return;
145949a13   Raghavendra K T   mm/list_lru.c: re...
550
  	for_each_node(i)
3b82c4dcc   Kirill Tkhai   mm/list_lru.c: pa...
551
  		memcg_drain_list_lru_node(lru, i, src_idx, dst_memcg);
2788cf0c4   Vladimir Davydov   memcg: reparent l...
552
  }
9bec5c35b   Kirill Tkhai   mm/list_lru: pass...
553
  void memcg_drain_all_list_lrus(int src_idx, struct mem_cgroup *dst_memcg)
2788cf0c4   Vladimir Davydov   memcg: reparent l...
554
555
556
557
558
  {
  	struct list_lru *lru;
  
  	mutex_lock(&list_lrus_mutex);
  	list_for_each_entry(lru, &list_lrus, list)
9bec5c35b   Kirill Tkhai   mm/list_lru: pass...
559
  		memcg_drain_list_lru(lru, src_idx, dst_memcg);
2788cf0c4   Vladimir Davydov   memcg: reparent l...
560
561
  	mutex_unlock(&list_lrus_mutex);
  }
60d3fd32a   Vladimir Davydov   list_lru: introdu...
562
563
564
565
566
567
568
569
570
  #else
  static int memcg_init_list_lru(struct list_lru *lru, bool memcg_aware)
  {
  	return 0;
  }
  
  static void memcg_destroy_list_lru(struct list_lru *lru)
  {
  }
84c07d11a   Kirill Tkhai   mm: introduce CON...
571
  #endif /* CONFIG_MEMCG_KMEM */
60d3fd32a   Vladimir Davydov   list_lru: introdu...
572
573
  
  int __list_lru_init(struct list_lru *lru, bool memcg_aware,
c92e8e10c   Kirill Tkhai   fs: propagate shr...
574
  		    struct lock_class_key *key, struct shrinker *shrinker)
a38e40824   Dave Chinner   list: add a new L...
575
  {
3b1d58a4c   Dave Chinner   list_lru: per-nod...
576
  	int i;
60d3fd32a   Vladimir Davydov   list_lru: introdu...
577
  	int err = -ENOMEM;
c92e8e10c   Kirill Tkhai   fs: propagate shr...
578
579
580
581
582
583
  #ifdef CONFIG_MEMCG_KMEM
  	if (shrinker)
  		lru->shrinker_id = shrinker->id;
  	else
  		lru->shrinker_id = -1;
  #endif
60d3fd32a   Vladimir Davydov   list_lru: introdu...
584
  	memcg_get_cache_ids();
5ca302c8e   Glauber Costa   list_lru: dynamic...
585

b9726c26d   Alexey Dobriyan   numa: make "nr_no...
586
  	lru->node = kcalloc(nr_node_ids, sizeof(*lru->node), GFP_KERNEL);
5ca302c8e   Glauber Costa   list_lru: dynamic...
587
  	if (!lru->node)
60d3fd32a   Vladimir Davydov   list_lru: introdu...
588
  		goto out;
a38e40824   Dave Chinner   list: add a new L...
589

145949a13   Raghavendra K T   mm/list_lru.c: re...
590
  	for_each_node(i) {
3b1d58a4c   Dave Chinner   list_lru: per-nod...
591
  		spin_lock_init(&lru->node[i].lock);
449dd6984   Johannes Weiner   mm: keep page cac...
592
593
  		if (key)
  			lockdep_set_class(&lru->node[i].lock, key);
60d3fd32a   Vladimir Davydov   list_lru: introdu...
594
595
596
597
598
599
  		init_one_lru(&lru->node[i].lru);
  	}
  
  	err = memcg_init_list_lru(lru, memcg_aware);
  	if (err) {
  		kfree(lru->node);
1bc11d70b   Alexander Polakov   mm/list_lru.c: av...
600
601
  		/* Do this so a list_lru_destroy() doesn't crash: */
  		lru->node = NULL;
60d3fd32a   Vladimir Davydov   list_lru: introdu...
602
  		goto out;
3b1d58a4c   Dave Chinner   list_lru: per-nod...
603
  	}
60d3fd32a   Vladimir Davydov   list_lru: introdu...
604

c0a5b5609   Vladimir Davydov   list_lru: organiz...
605
  	list_lru_register(lru);
60d3fd32a   Vladimir Davydov   list_lru: introdu...
606
607
608
  out:
  	memcg_put_cache_ids();
  	return err;
a38e40824   Dave Chinner   list: add a new L...
609
  }
60d3fd32a   Vladimir Davydov   list_lru: introdu...
610
  EXPORT_SYMBOL_GPL(__list_lru_init);
5ca302c8e   Glauber Costa   list_lru: dynamic...
611
612
613
  
  void list_lru_destroy(struct list_lru *lru)
  {
c0a5b5609   Vladimir Davydov   list_lru: organiz...
614
615
616
  	/* Already destroyed or not yet initialized? */
  	if (!lru->node)
  		return;
60d3fd32a   Vladimir Davydov   list_lru: introdu...
617
618
  
  	memcg_get_cache_ids();
c0a5b5609   Vladimir Davydov   list_lru: organiz...
619
  	list_lru_unregister(lru);
60d3fd32a   Vladimir Davydov   list_lru: introdu...
620
621
  
  	memcg_destroy_list_lru(lru);
5ca302c8e   Glauber Costa   list_lru: dynamic...
622
  	kfree(lru->node);
c0a5b5609   Vladimir Davydov   list_lru: organiz...
623
  	lru->node = NULL;
60d3fd32a   Vladimir Davydov   list_lru: introdu...
624

c92e8e10c   Kirill Tkhai   fs: propagate shr...
625
626
627
  #ifdef CONFIG_MEMCG_KMEM
  	lru->shrinker_id = -1;
  #endif
60d3fd32a   Vladimir Davydov   list_lru: introdu...
628
  	memcg_put_cache_ids();
5ca302c8e   Glauber Costa   list_lru: dynamic...
629
630
  }
  EXPORT_SYMBOL_GPL(list_lru_destroy);