Blame view

mm/list_lru.c 14.3 KB
457c89965   Thomas Gleixner   treewide: Add SPD...
1
  // SPDX-License-Identifier: GPL-2.0-only
a38e40824   Dave Chinner   list: add a new L...
2
3
4
5
6
7
8
9
  /*
   * Copyright (c) 2013 Red Hat, Inc. and Parallels Inc. All rights reserved.
   * Authors: David Chinner and Glauber Costa
   *
   * Generic LRU infrastructure
   */
  #include <linux/kernel.h>
  #include <linux/module.h>
3b1d58a4c   Dave Chinner   list_lru: per-nod...
10
  #include <linux/mm.h>
a38e40824   Dave Chinner   list: add a new L...
11
  #include <linux/list_lru.h>
5ca302c8e   Glauber Costa   list_lru: dynamic...
12
  #include <linux/slab.h>
c0a5b5609   Vladimir Davydov   list_lru: organiz...
13
  #include <linux/mutex.h>
60d3fd32a   Vladimir Davydov   list_lru: introdu...
14
  #include <linux/memcontrol.h>
4d96ba353   Roman Gushchin   mm: memcg/slab: s...
15
  #include "slab.h"
c0a5b5609   Vladimir Davydov   list_lru: organiz...
16

84c07d11a   Kirill Tkhai   mm: introduce CON...
17
  #ifdef CONFIG_MEMCG_KMEM
c0a5b5609   Vladimir Davydov   list_lru: organiz...
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
  static LIST_HEAD(list_lrus);
  static DEFINE_MUTEX(list_lrus_mutex);
  
  static void list_lru_register(struct list_lru *lru)
  {
  	mutex_lock(&list_lrus_mutex);
  	list_add(&lru->list, &list_lrus);
  	mutex_unlock(&list_lrus_mutex);
  }
  
  static void list_lru_unregister(struct list_lru *lru)
  {
  	mutex_lock(&list_lrus_mutex);
  	list_del(&lru->list);
  	mutex_unlock(&list_lrus_mutex);
  }
c0a5b5609   Vladimir Davydov   list_lru: organiz...
34

fae91d6d8   Kirill Tkhai   mm/list_lru.c: se...
35
36
37
38
  static int lru_shrinker_id(struct list_lru *lru)
  {
  	return lru->shrinker_id;
  }
60d3fd32a   Vladimir Davydov   list_lru: introdu...
39
40
  static inline bool list_lru_memcg_aware(struct list_lru *lru)
  {
3e8589963   Jiri Slaby   memcg: make it wo...
41
  	return lru->memcg_aware;
60d3fd32a   Vladimir Davydov   list_lru: introdu...
42
43
44
45
46
  }
  
  static inline struct list_lru_one *
  list_lru_from_memcg_idx(struct list_lru_node *nlru, int idx)
  {
0c7c1bed7   Kirill Tkhai   mm: make counting...
47
  	struct list_lru_memcg *memcg_lrus;
60d3fd32a   Vladimir Davydov   list_lru: introdu...
48
  	/*
0c7c1bed7   Kirill Tkhai   mm: make counting...
49
50
  	 * Either lock or RCU protects the array of per cgroup lists
  	 * from relocation (see memcg_update_list_lru_node).
60d3fd32a   Vladimir Davydov   list_lru: introdu...
51
  	 */
0c7c1bed7   Kirill Tkhai   mm: make counting...
52
53
54
55
  	memcg_lrus = rcu_dereference_check(nlru->memcg_lrus,
  					   lockdep_is_held(&nlru->lock));
  	if (memcg_lrus && idx >= 0)
  		return memcg_lrus->lru[idx];
60d3fd32a   Vladimir Davydov   list_lru: introdu...
56
57
58
59
  	return &nlru->lru;
  }
  
  static inline struct list_lru_one *
44bd4a475   Kirill Tkhai   mm/list_lru.c: ad...
60
61
  list_lru_from_kmem(struct list_lru_node *nlru, void *ptr,
  		   struct mem_cgroup **memcg_ptr)
60d3fd32a   Vladimir Davydov   list_lru: introdu...
62
  {
44bd4a475   Kirill Tkhai   mm/list_lru.c: ad...
63
64
  	struct list_lru_one *l = &nlru->lru;
  	struct mem_cgroup *memcg = NULL;
60d3fd32a   Vladimir Davydov   list_lru: introdu...
65
66
  
  	if (!nlru->memcg_lrus)
44bd4a475   Kirill Tkhai   mm/list_lru.c: ad...
67
  		goto out;
60d3fd32a   Vladimir Davydov   list_lru: introdu...
68

4f103c636   Roman Gushchin   mm: memcg/slab: u...
69
  	memcg = mem_cgroup_from_obj(ptr);
60d3fd32a   Vladimir Davydov   list_lru: introdu...
70
  	if (!memcg)
44bd4a475   Kirill Tkhai   mm/list_lru.c: ad...
71
  		goto out;
60d3fd32a   Vladimir Davydov   list_lru: introdu...
72

44bd4a475   Kirill Tkhai   mm/list_lru.c: ad...
73
74
75
76
77
  	l = list_lru_from_memcg_idx(nlru, memcg_cache_id(memcg));
  out:
  	if (memcg_ptr)
  		*memcg_ptr = memcg;
  	return l;
60d3fd32a   Vladimir Davydov   list_lru: introdu...
78
79
  }
  #else
e0295238e   Kirill Tkhai   mm/list_lru.c: co...
80
81
82
83
84
85
86
  static void list_lru_register(struct list_lru *lru)
  {
  }
  
  static void list_lru_unregister(struct list_lru *lru)
  {
  }
fae91d6d8   Kirill Tkhai   mm/list_lru.c: se...
87
88
89
90
  static int lru_shrinker_id(struct list_lru *lru)
  {
  	return -1;
  }
60d3fd32a   Vladimir Davydov   list_lru: introdu...
91
92
93
94
95
96
97
98
99
100
101
102
  static inline bool list_lru_memcg_aware(struct list_lru *lru)
  {
  	return false;
  }
  
  static inline struct list_lru_one *
  list_lru_from_memcg_idx(struct list_lru_node *nlru, int idx)
  {
  	return &nlru->lru;
  }
  
  static inline struct list_lru_one *
44bd4a475   Kirill Tkhai   mm/list_lru.c: ad...
103
104
  list_lru_from_kmem(struct list_lru_node *nlru, void *ptr,
  		   struct mem_cgroup **memcg_ptr)
60d3fd32a   Vladimir Davydov   list_lru: introdu...
105
  {
44bd4a475   Kirill Tkhai   mm/list_lru.c: ad...
106
107
  	if (memcg_ptr)
  		*memcg_ptr = NULL;
60d3fd32a   Vladimir Davydov   list_lru: introdu...
108
109
  	return &nlru->lru;
  }
84c07d11a   Kirill Tkhai   mm: introduce CON...
110
  #endif /* CONFIG_MEMCG_KMEM */
60d3fd32a   Vladimir Davydov   list_lru: introdu...
111

a38e40824   Dave Chinner   list: add a new L...
112
113
  bool list_lru_add(struct list_lru *lru, struct list_head *item)
  {
3b1d58a4c   Dave Chinner   list_lru: per-nod...
114
115
  	int nid = page_to_nid(virt_to_page(item));
  	struct list_lru_node *nlru = &lru->node[nid];
fae91d6d8   Kirill Tkhai   mm/list_lru.c: se...
116
  	struct mem_cgroup *memcg;
60d3fd32a   Vladimir Davydov   list_lru: introdu...
117
  	struct list_lru_one *l;
3b1d58a4c   Dave Chinner   list_lru: per-nod...
118
119
  
  	spin_lock(&nlru->lock);
a38e40824   Dave Chinner   list: add a new L...
120
  	if (list_empty(item)) {
fae91d6d8   Kirill Tkhai   mm/list_lru.c: se...
121
  		l = list_lru_from_kmem(nlru, item, &memcg);
60d3fd32a   Vladimir Davydov   list_lru: introdu...
122
  		list_add_tail(item, &l->list);
fae91d6d8   Kirill Tkhai   mm/list_lru.c: se...
123
124
125
126
  		/* Set shrinker bit if the first element was added */
  		if (!l->nr_items++)
  			memcg_set_shrinker_bit(memcg, nid,
  					       lru_shrinker_id(lru));
2c80cd57c   Sahitya Tummala   mm/list_lru.c: fi...
127
  		nlru->nr_items++;
3b1d58a4c   Dave Chinner   list_lru: per-nod...
128
  		spin_unlock(&nlru->lock);
a38e40824   Dave Chinner   list: add a new L...
129
130
  		return true;
  	}
3b1d58a4c   Dave Chinner   list_lru: per-nod...
131
  	spin_unlock(&nlru->lock);
a38e40824   Dave Chinner   list: add a new L...
132
133
134
135
136
137
  	return false;
  }
  EXPORT_SYMBOL_GPL(list_lru_add);
  
  bool list_lru_del(struct list_lru *lru, struct list_head *item)
  {
3b1d58a4c   Dave Chinner   list_lru: per-nod...
138
139
  	int nid = page_to_nid(virt_to_page(item));
  	struct list_lru_node *nlru = &lru->node[nid];
60d3fd32a   Vladimir Davydov   list_lru: introdu...
140
  	struct list_lru_one *l;
3b1d58a4c   Dave Chinner   list_lru: per-nod...
141
142
  
  	spin_lock(&nlru->lock);
a38e40824   Dave Chinner   list: add a new L...
143
  	if (!list_empty(item)) {
44bd4a475   Kirill Tkhai   mm/list_lru.c: ad...
144
  		l = list_lru_from_kmem(nlru, item, NULL);
a38e40824   Dave Chinner   list: add a new L...
145
  		list_del_init(item);
60d3fd32a   Vladimir Davydov   list_lru: introdu...
146
  		l->nr_items--;
2c80cd57c   Sahitya Tummala   mm/list_lru.c: fi...
147
  		nlru->nr_items--;
3b1d58a4c   Dave Chinner   list_lru: per-nod...
148
  		spin_unlock(&nlru->lock);
a38e40824   Dave Chinner   list: add a new L...
149
150
  		return true;
  	}
3b1d58a4c   Dave Chinner   list_lru: per-nod...
151
  	spin_unlock(&nlru->lock);
a38e40824   Dave Chinner   list: add a new L...
152
153
154
  	return false;
  }
  EXPORT_SYMBOL_GPL(list_lru_del);
3f97b1632   Vladimir Davydov   list_lru: add hel...
155
156
157
158
159
160
161
162
163
164
165
166
167
168
  void list_lru_isolate(struct list_lru_one *list, struct list_head *item)
  {
  	list_del_init(item);
  	list->nr_items--;
  }
  EXPORT_SYMBOL_GPL(list_lru_isolate);
  
  void list_lru_isolate_move(struct list_lru_one *list, struct list_head *item,
  			   struct list_head *head)
  {
  	list_move(item, head);
  	list->nr_items--;
  }
  EXPORT_SYMBOL_GPL(list_lru_isolate_move);
930eaac5e   Andrew Morton   mm/list_lru.c: fo...
169
170
  unsigned long list_lru_count_one(struct list_lru *lru,
  				 int nid, struct mem_cgroup *memcg)
a38e40824   Dave Chinner   list: add a new L...
171
  {
6a4f496fd   Glauber Costa   list_lru: per-nod...
172
  	struct list_lru_node *nlru = &lru->node[nid];
60d3fd32a   Vladimir Davydov   list_lru: introdu...
173
174
  	struct list_lru_one *l;
  	unsigned long count;
3b1d58a4c   Dave Chinner   list_lru: per-nod...
175

0c7c1bed7   Kirill Tkhai   mm: make counting...
176
  	rcu_read_lock();
930eaac5e   Andrew Morton   mm/list_lru.c: fo...
177
  	l = list_lru_from_memcg_idx(nlru, memcg_cache_id(memcg));
a1f459354   Qian Cai   mm/list_lru: fix ...
178
  	count = READ_ONCE(l->nr_items);
0c7c1bed7   Kirill Tkhai   mm: make counting...
179
  	rcu_read_unlock();
3b1d58a4c   Dave Chinner   list_lru: per-nod...
180
181
182
  
  	return count;
  }
60d3fd32a   Vladimir Davydov   list_lru: introdu...
183
184
185
186
  EXPORT_SYMBOL_GPL(list_lru_count_one);
  
  unsigned long list_lru_count_node(struct list_lru *lru, int nid)
  {
2c80cd57c   Sahitya Tummala   mm/list_lru.c: fi...
187
  	struct list_lru_node *nlru;
60d3fd32a   Vladimir Davydov   list_lru: introdu...
188

2c80cd57c   Sahitya Tummala   mm/list_lru.c: fi...
189
190
  	nlru = &lru->node[nid];
  	return nlru->nr_items;
60d3fd32a   Vladimir Davydov   list_lru: introdu...
191
  }
6a4f496fd   Glauber Costa   list_lru: per-nod...
192
  EXPORT_SYMBOL_GPL(list_lru_count_node);
3b1d58a4c   Dave Chinner   list_lru: per-nod...
193

60d3fd32a   Vladimir Davydov   list_lru: introdu...
194
  static unsigned long
6e018968f   Sebastian Andrzej Siewior   mm/list_lru.c: pa...
195
  __list_lru_walk_one(struct list_lru_node *nlru, int memcg_idx,
60d3fd32a   Vladimir Davydov   list_lru: introdu...
196
197
  		    list_lru_walk_cb isolate, void *cb_arg,
  		    unsigned long *nr_to_walk)
3b1d58a4c   Dave Chinner   list_lru: per-nod...
198
  {
60d3fd32a   Vladimir Davydov   list_lru: introdu...
199
  	struct list_lru_one *l;
a38e40824   Dave Chinner   list: add a new L...
200
  	struct list_head *item, *n;
3b1d58a4c   Dave Chinner   list_lru: per-nod...
201
  	unsigned long isolated = 0;
a38e40824   Dave Chinner   list: add a new L...
202

60d3fd32a   Vladimir Davydov   list_lru: introdu...
203
  	l = list_lru_from_memcg_idx(nlru, memcg_idx);
a38e40824   Dave Chinner   list: add a new L...
204
  restart:
60d3fd32a   Vladimir Davydov   list_lru: introdu...
205
  	list_for_each_safe(item, n, &l->list) {
a38e40824   Dave Chinner   list: add a new L...
206
  		enum lru_status ret;
5cedf721a   Dave Chinner   list_lru: fix bro...
207
208
209
  
  		/*
  		 * decrement nr_to_walk first so that we don't livelock if we
3dc5f032c   Ethon Paul   mm/list_lru: fix ...
210
  		 * get stuck on large numbers of LRU_RETRY items
5cedf721a   Dave Chinner   list_lru: fix bro...
211
  		 */
c56b097af   Russell King   mm: list_lru: fix...
212
  		if (!*nr_to_walk)
5cedf721a   Dave Chinner   list_lru: fix bro...
213
  			break;
c56b097af   Russell King   mm: list_lru: fix...
214
  		--*nr_to_walk;
5cedf721a   Dave Chinner   list_lru: fix bro...
215

3f97b1632   Vladimir Davydov   list_lru: add hel...
216
  		ret = isolate(item, l, &nlru->lock, cb_arg);
a38e40824   Dave Chinner   list: add a new L...
217
  		switch (ret) {
449dd6984   Johannes Weiner   mm: keep page cac...
218
219
  		case LRU_REMOVED_RETRY:
  			assert_spin_locked(&nlru->lock);
e4a9bc589   Joe Perches   mm: use fallthrough;
220
  			fallthrough;
a38e40824   Dave Chinner   list: add a new L...
221
  		case LRU_REMOVED:
3b1d58a4c   Dave Chinner   list_lru: per-nod...
222
  			isolated++;
2c80cd57c   Sahitya Tummala   mm/list_lru.c: fi...
223
  			nlru->nr_items--;
449dd6984   Johannes Weiner   mm: keep page cac...
224
225
226
227
228
229
230
  			/*
  			 * If the lru lock has been dropped, our list
  			 * traversal is now invalid and so we have to
  			 * restart from scratch.
  			 */
  			if (ret == LRU_REMOVED_RETRY)
  				goto restart;
a38e40824   Dave Chinner   list: add a new L...
231
232
  			break;
  		case LRU_ROTATE:
60d3fd32a   Vladimir Davydov   list_lru: introdu...
233
  			list_move_tail(item, &l->list);
a38e40824   Dave Chinner   list: add a new L...
234
235
236
237
  			break;
  		case LRU_SKIP:
  			break;
  		case LRU_RETRY:
5cedf721a   Dave Chinner   list_lru: fix bro...
238
239
240
241
  			/*
  			 * The lru lock has been dropped, our list traversal is
  			 * now invalid and so we have to restart from scratch.
  			 */
449dd6984   Johannes Weiner   mm: keep page cac...
242
  			assert_spin_locked(&nlru->lock);
a38e40824   Dave Chinner   list: add a new L...
243
244
245
246
  			goto restart;
  		default:
  			BUG();
  		}
a38e40824   Dave Chinner   list: add a new L...
247
  	}
3b1d58a4c   Dave Chinner   list_lru: per-nod...
248
249
  	return isolated;
  }
60d3fd32a   Vladimir Davydov   list_lru: introdu...
250
251
252
253
254
255
  
  unsigned long
  list_lru_walk_one(struct list_lru *lru, int nid, struct mem_cgroup *memcg,
  		  list_lru_walk_cb isolate, void *cb_arg,
  		  unsigned long *nr_to_walk)
  {
6cfe57a96   Sebastian Andrzej Siewior   mm/list_lru.c: mo...
256
257
258
259
  	struct list_lru_node *nlru = &lru->node[nid];
  	unsigned long ret;
  
  	spin_lock(&nlru->lock);
6e018968f   Sebastian Andrzej Siewior   mm/list_lru.c: pa...
260
261
  	ret = __list_lru_walk_one(nlru, memcg_cache_id(memcg), isolate, cb_arg,
  				  nr_to_walk);
6cfe57a96   Sebastian Andrzej Siewior   mm/list_lru.c: mo...
262
263
  	spin_unlock(&nlru->lock);
  	return ret;
60d3fd32a   Vladimir Davydov   list_lru: introdu...
264
265
  }
  EXPORT_SYMBOL_GPL(list_lru_walk_one);
6b51e8819   Sebastian Andrzej Siewior   mm/list_lru: intr...
266
267
268
269
270
271
272
273
274
275
276
277
278
279
  unsigned long
  list_lru_walk_one_irq(struct list_lru *lru, int nid, struct mem_cgroup *memcg,
  		      list_lru_walk_cb isolate, void *cb_arg,
  		      unsigned long *nr_to_walk)
  {
  	struct list_lru_node *nlru = &lru->node[nid];
  	unsigned long ret;
  
  	spin_lock_irq(&nlru->lock);
  	ret = __list_lru_walk_one(nlru, memcg_cache_id(memcg), isolate, cb_arg,
  				  nr_to_walk);
  	spin_unlock_irq(&nlru->lock);
  	return ret;
  }
60d3fd32a   Vladimir Davydov   list_lru: introdu...
280
281
282
283
284
285
  unsigned long list_lru_walk_node(struct list_lru *lru, int nid,
  				 list_lru_walk_cb isolate, void *cb_arg,
  				 unsigned long *nr_to_walk)
  {
  	long isolated = 0;
  	int memcg_idx;
87a5ffc16   Sebastian Andrzej Siewior   mm/list_lru.c: us...
286
287
  	isolated += list_lru_walk_one(lru, nid, NULL, isolate, cb_arg,
  				      nr_to_walk);
60d3fd32a   Vladimir Davydov   list_lru: introdu...
288
289
  	if (*nr_to_walk > 0 && list_lru_memcg_aware(lru)) {
  		for_each_memcg_cache_index(memcg_idx) {
6cfe57a96   Sebastian Andrzej Siewior   mm/list_lru.c: mo...
290
291
292
  			struct list_lru_node *nlru = &lru->node[nid];
  
  			spin_lock(&nlru->lock);
6e018968f   Sebastian Andrzej Siewior   mm/list_lru.c: pa...
293
294
295
  			isolated += __list_lru_walk_one(nlru, memcg_idx,
  							isolate, cb_arg,
  							nr_to_walk);
6cfe57a96   Sebastian Andrzej Siewior   mm/list_lru.c: mo...
296
  			spin_unlock(&nlru->lock);
60d3fd32a   Vladimir Davydov   list_lru: introdu...
297
298
299
300
301
302
  			if (*nr_to_walk <= 0)
  				break;
  		}
  	}
  	return isolated;
  }
3b1d58a4c   Dave Chinner   list_lru: per-nod...
303
  EXPORT_SYMBOL_GPL(list_lru_walk_node);
60d3fd32a   Vladimir Davydov   list_lru: introdu...
304
305
306
307
308
  static void init_one_lru(struct list_lru_one *l)
  {
  	INIT_LIST_HEAD(&l->list);
  	l->nr_items = 0;
  }
84c07d11a   Kirill Tkhai   mm: introduce CON...
309
  #ifdef CONFIG_MEMCG_KMEM
60d3fd32a   Vladimir Davydov   list_lru: introdu...
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
  static void __memcg_destroy_list_lru_node(struct list_lru_memcg *memcg_lrus,
  					  int begin, int end)
  {
  	int i;
  
  	for (i = begin; i < end; i++)
  		kfree(memcg_lrus->lru[i]);
  }
  
  static int __memcg_init_list_lru_node(struct list_lru_memcg *memcg_lrus,
  				      int begin, int end)
  {
  	int i;
  
  	for (i = begin; i < end; i++) {
  		struct list_lru_one *l;
  
  		l = kmalloc(sizeof(struct list_lru_one), GFP_KERNEL);
  		if (!l)
  			goto fail;
  
  		init_one_lru(l);
  		memcg_lrus->lru[i] = l;
  	}
  	return 0;
  fail:
3510955b3   Shakeel Butt   mm/list_lru.c: fi...
336
  	__memcg_destroy_list_lru_node(memcg_lrus, begin, i);
60d3fd32a   Vladimir Davydov   list_lru: introdu...
337
338
339
340
341
  	return -ENOMEM;
  }
  
  static int memcg_init_list_lru_node(struct list_lru_node *nlru)
  {
0c7c1bed7   Kirill Tkhai   mm: make counting...
342
  	struct list_lru_memcg *memcg_lrus;
60d3fd32a   Vladimir Davydov   list_lru: introdu...
343
  	int size = memcg_nr_cache_ids;
0c7c1bed7   Kirill Tkhai   mm: make counting...
344
345
346
  	memcg_lrus = kvmalloc(sizeof(*memcg_lrus) +
  			      size * sizeof(void *), GFP_KERNEL);
  	if (!memcg_lrus)
60d3fd32a   Vladimir Davydov   list_lru: introdu...
347
  		return -ENOMEM;
0c7c1bed7   Kirill Tkhai   mm: make counting...
348
349
  	if (__memcg_init_list_lru_node(memcg_lrus, 0, size)) {
  		kvfree(memcg_lrus);
60d3fd32a   Vladimir Davydov   list_lru: introdu...
350
351
  		return -ENOMEM;
  	}
0c7c1bed7   Kirill Tkhai   mm: make counting...
352
  	RCU_INIT_POINTER(nlru->memcg_lrus, memcg_lrus);
60d3fd32a   Vladimir Davydov   list_lru: introdu...
353
354
355
356
357
358
  
  	return 0;
  }
  
  static void memcg_destroy_list_lru_node(struct list_lru_node *nlru)
  {
0c7c1bed7   Kirill Tkhai   mm: make counting...
359
360
361
  	struct list_lru_memcg *memcg_lrus;
  	/*
  	 * This is called when shrinker has already been unregistered,
e0feed08a   Uladzislau Rezki (Sony)   mm/list_lru.c: Re...
362
  	 * and nobody can use it. So, there is no need to use kvfree_rcu_local().
0c7c1bed7   Kirill Tkhai   mm: make counting...
363
364
365
366
367
  	 */
  	memcg_lrus = rcu_dereference_protected(nlru->memcg_lrus, true);
  	__memcg_destroy_list_lru_node(memcg_lrus, 0, memcg_nr_cache_ids);
  	kvfree(memcg_lrus);
  }
e0feed08a   Uladzislau Rezki (Sony)   mm/list_lru.c: Re...
368
  static void kvfree_rcu_local(struct rcu_head *head)
0c7c1bed7   Kirill Tkhai   mm: make counting...
369
370
371
372
373
  {
  	struct list_lru_memcg *mlru;
  
  	mlru = container_of(head, struct list_lru_memcg, rcu);
  	kvfree(mlru);
60d3fd32a   Vladimir Davydov   list_lru: introdu...
374
375
376
377
378
379
380
381
  }
  
  static int memcg_update_list_lru_node(struct list_lru_node *nlru,
  				      int old_size, int new_size)
  {
  	struct list_lru_memcg *old, *new;
  
  	BUG_ON(old_size > new_size);
0c7c1bed7   Kirill Tkhai   mm: make counting...
382
383
384
  	old = rcu_dereference_protected(nlru->memcg_lrus,
  					lockdep_is_held(&list_lrus_mutex));
  	new = kvmalloc(sizeof(*new) + new_size * sizeof(void *), GFP_KERNEL);
60d3fd32a   Vladimir Davydov   list_lru: introdu...
385
386
387
388
  	if (!new)
  		return -ENOMEM;
  
  	if (__memcg_init_list_lru_node(new, old_size, new_size)) {
f80c7dab9   Johannes Weiner   mm: memcontrol: u...
389
  		kvfree(new);
60d3fd32a   Vladimir Davydov   list_lru: introdu...
390
391
  		return -ENOMEM;
  	}
0c7c1bed7   Kirill Tkhai   mm: make counting...
392
  	memcpy(&new->lru, &old->lru, old_size * sizeof(void *));
60d3fd32a   Vladimir Davydov   list_lru: introdu...
393
394
  
  	/*
0c7c1bed7   Kirill Tkhai   mm: make counting...
395
396
  	 * The locking below allows readers that hold nlru->lock avoid taking
  	 * rcu_read_lock (see list_lru_from_memcg_idx).
60d3fd32a   Vladimir Davydov   list_lru: introdu...
397
398
399
400
401
  	 *
  	 * Since list_lru_{add,del} may be called under an IRQ-safe lock,
  	 * we have to use IRQ-safe primitives here to avoid deadlock.
  	 */
  	spin_lock_irq(&nlru->lock);
0c7c1bed7   Kirill Tkhai   mm: make counting...
402
  	rcu_assign_pointer(nlru->memcg_lrus, new);
60d3fd32a   Vladimir Davydov   list_lru: introdu...
403
  	spin_unlock_irq(&nlru->lock);
e0feed08a   Uladzislau Rezki (Sony)   mm/list_lru.c: Re...
404
  	call_rcu(&old->rcu, kvfree_rcu_local);
60d3fd32a   Vladimir Davydov   list_lru: introdu...
405
406
407
408
409
410
  	return 0;
  }
  
  static void memcg_cancel_update_list_lru_node(struct list_lru_node *nlru,
  					      int old_size, int new_size)
  {
0c7c1bed7   Kirill Tkhai   mm: make counting...
411
412
413
414
  	struct list_lru_memcg *memcg_lrus;
  
  	memcg_lrus = rcu_dereference_protected(nlru->memcg_lrus,
  					       lockdep_is_held(&list_lrus_mutex));
60d3fd32a   Vladimir Davydov   list_lru: introdu...
415
416
  	/* do not bother shrinking the array back to the old size, because we
  	 * cannot handle allocation failures here */
0c7c1bed7   Kirill Tkhai   mm: make counting...
417
  	__memcg_destroy_list_lru_node(memcg_lrus, old_size, new_size);
60d3fd32a   Vladimir Davydov   list_lru: introdu...
418
419
420
421
422
  }
  
  static int memcg_init_list_lru(struct list_lru *lru, bool memcg_aware)
  {
  	int i;
3e8589963   Jiri Slaby   memcg: make it wo...
423
  	lru->memcg_aware = memcg_aware;
145949a13   Raghavendra K T   mm/list_lru.c: re...
424
425
426
427
428
  	if (!memcg_aware)
  		return 0;
  
  	for_each_node(i) {
  		if (memcg_init_list_lru_node(&lru->node[i]))
60d3fd32a   Vladimir Davydov   list_lru: introdu...
429
430
431
432
  			goto fail;
  	}
  	return 0;
  fail:
145949a13   Raghavendra K T   mm/list_lru.c: re...
433
434
435
  	for (i = i - 1; i >= 0; i--) {
  		if (!lru->node[i].memcg_lrus)
  			continue;
60d3fd32a   Vladimir Davydov   list_lru: introdu...
436
  		memcg_destroy_list_lru_node(&lru->node[i]);
145949a13   Raghavendra K T   mm/list_lru.c: re...
437
  	}
60d3fd32a   Vladimir Davydov   list_lru: introdu...
438
439
440
441
442
443
444
445
446
  	return -ENOMEM;
  }
  
  static void memcg_destroy_list_lru(struct list_lru *lru)
  {
  	int i;
  
  	if (!list_lru_memcg_aware(lru))
  		return;
145949a13   Raghavendra K T   mm/list_lru.c: re...
447
  	for_each_node(i)
60d3fd32a   Vladimir Davydov   list_lru: introdu...
448
449
450
451
452
453
454
455
456
457
  		memcg_destroy_list_lru_node(&lru->node[i]);
  }
  
  static int memcg_update_list_lru(struct list_lru *lru,
  				 int old_size, int new_size)
  {
  	int i;
  
  	if (!list_lru_memcg_aware(lru))
  		return 0;
145949a13   Raghavendra K T   mm/list_lru.c: re...
458
  	for_each_node(i) {
60d3fd32a   Vladimir Davydov   list_lru: introdu...
459
460
461
462
463
464
  		if (memcg_update_list_lru_node(&lru->node[i],
  					       old_size, new_size))
  			goto fail;
  	}
  	return 0;
  fail:
145949a13   Raghavendra K T   mm/list_lru.c: re...
465
466
467
  	for (i = i - 1; i >= 0; i--) {
  		if (!lru->node[i].memcg_lrus)
  			continue;
60d3fd32a   Vladimir Davydov   list_lru: introdu...
468
469
  		memcg_cancel_update_list_lru_node(&lru->node[i],
  						  old_size, new_size);
145949a13   Raghavendra K T   mm/list_lru.c: re...
470
  	}
60d3fd32a   Vladimir Davydov   list_lru: introdu...
471
472
473
474
475
476
477
478
479
480
  	return -ENOMEM;
  }
  
  static void memcg_cancel_update_list_lru(struct list_lru *lru,
  					 int old_size, int new_size)
  {
  	int i;
  
  	if (!list_lru_memcg_aware(lru))
  		return;
145949a13   Raghavendra K T   mm/list_lru.c: re...
481
  	for_each_node(i)
60d3fd32a   Vladimir Davydov   list_lru: introdu...
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
  		memcg_cancel_update_list_lru_node(&lru->node[i],
  						  old_size, new_size);
  }
  
  int memcg_update_all_list_lrus(int new_size)
  {
  	int ret = 0;
  	struct list_lru *lru;
  	int old_size = memcg_nr_cache_ids;
  
  	mutex_lock(&list_lrus_mutex);
  	list_for_each_entry(lru, &list_lrus, list) {
  		ret = memcg_update_list_lru(lru, old_size, new_size);
  		if (ret)
  			goto fail;
  	}
  out:
  	mutex_unlock(&list_lrus_mutex);
  	return ret;
  fail:
  	list_for_each_entry_continue_reverse(lru, &list_lrus, list)
  		memcg_cancel_update_list_lru(lru, old_size, new_size);
  	goto out;
  }
2788cf0c4   Vladimir Davydov   memcg: reparent l...
506

3b82c4dcc   Kirill Tkhai   mm/list_lru.c: pa...
507
  static void memcg_drain_list_lru_node(struct list_lru *lru, int nid,
9bec5c35b   Kirill Tkhai   mm/list_lru: pass...
508
  				      int src_idx, struct mem_cgroup *dst_memcg)
2788cf0c4   Vladimir Davydov   memcg: reparent l...
509
  {
3b82c4dcc   Kirill Tkhai   mm/list_lru.c: pa...
510
  	struct list_lru_node *nlru = &lru->node[nid];
9bec5c35b   Kirill Tkhai   mm/list_lru: pass...
511
  	int dst_idx = dst_memcg->kmemcg_id;
2788cf0c4   Vladimir Davydov   memcg: reparent l...
512
513
514
515
516
517
518
519
520
521
522
523
  	struct list_lru_one *src, *dst;
  
  	/*
  	 * Since list_lru_{add,del} may be called under an IRQ-safe lock,
  	 * we have to use IRQ-safe primitives here to avoid deadlock.
  	 */
  	spin_lock_irq(&nlru->lock);
  
  	src = list_lru_from_memcg_idx(nlru, src_idx);
  	dst = list_lru_from_memcg_idx(nlru, dst_idx);
  
  	list_splice_init(&src->list, &dst->list);
8199be001   Yang Shi   mm: list_lru: set...
524
525
526
  
  	if (src->nr_items) {
  		dst->nr_items += src->nr_items;
fae91d6d8   Kirill Tkhai   mm/list_lru.c: se...
527
  		memcg_set_shrinker_bit(dst_memcg, nid, lru_shrinker_id(lru));
8199be001   Yang Shi   mm: list_lru: set...
528
529
  		src->nr_items = 0;
  	}
2788cf0c4   Vladimir Davydov   memcg: reparent l...
530
531
532
533
534
  
  	spin_unlock_irq(&nlru->lock);
  }
  
  static void memcg_drain_list_lru(struct list_lru *lru,
9bec5c35b   Kirill Tkhai   mm/list_lru: pass...
535
  				 int src_idx, struct mem_cgroup *dst_memcg)
2788cf0c4   Vladimir Davydov   memcg: reparent l...
536
537
538
539
540
  {
  	int i;
  
  	if (!list_lru_memcg_aware(lru))
  		return;
145949a13   Raghavendra K T   mm/list_lru.c: re...
541
  	for_each_node(i)
3b82c4dcc   Kirill Tkhai   mm/list_lru.c: pa...
542
  		memcg_drain_list_lru_node(lru, i, src_idx, dst_memcg);
2788cf0c4   Vladimir Davydov   memcg: reparent l...
543
  }
9bec5c35b   Kirill Tkhai   mm/list_lru: pass...
544
  void memcg_drain_all_list_lrus(int src_idx, struct mem_cgroup *dst_memcg)
2788cf0c4   Vladimir Davydov   memcg: reparent l...
545
546
547
548
549
  {
  	struct list_lru *lru;
  
  	mutex_lock(&list_lrus_mutex);
  	list_for_each_entry(lru, &list_lrus, list)
9bec5c35b   Kirill Tkhai   mm/list_lru: pass...
550
  		memcg_drain_list_lru(lru, src_idx, dst_memcg);
2788cf0c4   Vladimir Davydov   memcg: reparent l...
551
552
  	mutex_unlock(&list_lrus_mutex);
  }
60d3fd32a   Vladimir Davydov   list_lru: introdu...
553
554
555
556
557
558
559
560
561
  #else
  static int memcg_init_list_lru(struct list_lru *lru, bool memcg_aware)
  {
  	return 0;
  }
  
  static void memcg_destroy_list_lru(struct list_lru *lru)
  {
  }
84c07d11a   Kirill Tkhai   mm: introduce CON...
562
  #endif /* CONFIG_MEMCG_KMEM */
60d3fd32a   Vladimir Davydov   list_lru: introdu...
563
564
  
  int __list_lru_init(struct list_lru *lru, bool memcg_aware,
c92e8e10c   Kirill Tkhai   fs: propagate shr...
565
  		    struct lock_class_key *key, struct shrinker *shrinker)
a38e40824   Dave Chinner   list: add a new L...
566
  {
3b1d58a4c   Dave Chinner   list_lru: per-nod...
567
  	int i;
60d3fd32a   Vladimir Davydov   list_lru: introdu...
568
  	int err = -ENOMEM;
c92e8e10c   Kirill Tkhai   fs: propagate shr...
569
570
571
572
573
574
  #ifdef CONFIG_MEMCG_KMEM
  	if (shrinker)
  		lru->shrinker_id = shrinker->id;
  	else
  		lru->shrinker_id = -1;
  #endif
60d3fd32a   Vladimir Davydov   list_lru: introdu...
575
  	memcg_get_cache_ids();
5ca302c8e   Glauber Costa   list_lru: dynamic...
576

b9726c26d   Alexey Dobriyan   numa: make "nr_no...
577
  	lru->node = kcalloc(nr_node_ids, sizeof(*lru->node), GFP_KERNEL);
5ca302c8e   Glauber Costa   list_lru: dynamic...
578
  	if (!lru->node)
60d3fd32a   Vladimir Davydov   list_lru: introdu...
579
  		goto out;
a38e40824   Dave Chinner   list: add a new L...
580

145949a13   Raghavendra K T   mm/list_lru.c: re...
581
  	for_each_node(i) {
3b1d58a4c   Dave Chinner   list_lru: per-nod...
582
  		spin_lock_init(&lru->node[i].lock);
449dd6984   Johannes Weiner   mm: keep page cac...
583
584
  		if (key)
  			lockdep_set_class(&lru->node[i].lock, key);
60d3fd32a   Vladimir Davydov   list_lru: introdu...
585
586
587
588
589
590
  		init_one_lru(&lru->node[i].lru);
  	}
  
  	err = memcg_init_list_lru(lru, memcg_aware);
  	if (err) {
  		kfree(lru->node);
1bc11d70b   Alexander Polakov   mm/list_lru.c: av...
591
592
  		/* Do this so a list_lru_destroy() doesn't crash: */
  		lru->node = NULL;
60d3fd32a   Vladimir Davydov   list_lru: introdu...
593
  		goto out;
3b1d58a4c   Dave Chinner   list_lru: per-nod...
594
  	}
60d3fd32a   Vladimir Davydov   list_lru: introdu...
595

c0a5b5609   Vladimir Davydov   list_lru: organiz...
596
  	list_lru_register(lru);
60d3fd32a   Vladimir Davydov   list_lru: introdu...
597
598
599
  out:
  	memcg_put_cache_ids();
  	return err;
a38e40824   Dave Chinner   list: add a new L...
600
  }
60d3fd32a   Vladimir Davydov   list_lru: introdu...
601
  EXPORT_SYMBOL_GPL(__list_lru_init);
5ca302c8e   Glauber Costa   list_lru: dynamic...
602
603
604
  
  void list_lru_destroy(struct list_lru *lru)
  {
c0a5b5609   Vladimir Davydov   list_lru: organiz...
605
606
607
  	/* Already destroyed or not yet initialized? */
  	if (!lru->node)
  		return;
60d3fd32a   Vladimir Davydov   list_lru: introdu...
608
609
  
  	memcg_get_cache_ids();
c0a5b5609   Vladimir Davydov   list_lru: organiz...
610
  	list_lru_unregister(lru);
60d3fd32a   Vladimir Davydov   list_lru: introdu...
611
612
  
  	memcg_destroy_list_lru(lru);
5ca302c8e   Glauber Costa   list_lru: dynamic...
613
  	kfree(lru->node);
c0a5b5609   Vladimir Davydov   list_lru: organiz...
614
  	lru->node = NULL;
60d3fd32a   Vladimir Davydov   list_lru: introdu...
615

c92e8e10c   Kirill Tkhai   fs: propagate shr...
616
617
618
  #ifdef CONFIG_MEMCG_KMEM
  	lru->shrinker_id = -1;
  #endif
60d3fd32a   Vladimir Davydov   list_lru: introdu...
619
  	memcg_put_cache_ids();
5ca302c8e   Glauber Costa   list_lru: dynamic...
620
621
  }
  EXPORT_SYMBOL_GPL(list_lru_destroy);