Blame view
mm/list_lru.c
14.6 KB
457c89965
|
1 |
// SPDX-License-Identifier: GPL-2.0-only |
a38e40824
|
2 3 4 5 6 7 8 9 |
/* * Copyright (c) 2013 Red Hat, Inc. and Parallels Inc. All rights reserved. * Authors: David Chinner and Glauber Costa * * Generic LRU infrastructure */ #include <linux/kernel.h> #include <linux/module.h> |
3b1d58a4c
|
10 |
#include <linux/mm.h> |
a38e40824
|
11 |
#include <linux/list_lru.h> |
5ca302c8e
|
12 |
#include <linux/slab.h> |
c0a5b5609
|
13 |
#include <linux/mutex.h> |
60d3fd32a
|
14 |
#include <linux/memcontrol.h> |
4d96ba353
|
15 |
#include "slab.h" |
c0a5b5609
|
16 |
|
84c07d11a
|
17 |
#ifdef CONFIG_MEMCG_KMEM |
c0a5b5609
|
18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 |
static LIST_HEAD(list_lrus); static DEFINE_MUTEX(list_lrus_mutex); static void list_lru_register(struct list_lru *lru) { mutex_lock(&list_lrus_mutex); list_add(&lru->list, &list_lrus); mutex_unlock(&list_lrus_mutex); } static void list_lru_unregister(struct list_lru *lru) { mutex_lock(&list_lrus_mutex); list_del(&lru->list); mutex_unlock(&list_lrus_mutex); } |
c0a5b5609
|
34 |
|
fae91d6d8
|
35 36 37 38 |
static int lru_shrinker_id(struct list_lru *lru) { return lru->shrinker_id; } |
60d3fd32a
|
39 40 |
static inline bool list_lru_memcg_aware(struct list_lru *lru) { |
3e8589963
|
41 |
return lru->memcg_aware; |
60d3fd32a
|
42 43 44 45 46 |
} static inline struct list_lru_one * list_lru_from_memcg_idx(struct list_lru_node *nlru, int idx) { |
0c7c1bed7
|
47 |
struct list_lru_memcg *memcg_lrus; |
60d3fd32a
|
48 |
/* |
0c7c1bed7
|
49 50 |
* Either lock or RCU protects the array of per cgroup lists * from relocation (see memcg_update_list_lru_node). |
60d3fd32a
|
51 |
*/ |
0c7c1bed7
|
52 53 54 55 |
memcg_lrus = rcu_dereference_check(nlru->memcg_lrus, lockdep_is_held(&nlru->lock)); if (memcg_lrus && idx >= 0) return memcg_lrus->lru[idx]; |
60d3fd32a
|
56 57 |
return &nlru->lru; } |
df4065516
|
58 59 60 61 62 63 64 |
static __always_inline struct mem_cgroup *mem_cgroup_from_kmem(void *ptr) { struct page *page; if (!memcg_kmem_enabled()) return NULL; page = virt_to_head_page(ptr); |
4d96ba353
|
65 |
return memcg_from_slab_page(page); |
df4065516
|
66 |
} |
60d3fd32a
|
67 |
static inline struct list_lru_one * |
44bd4a475
|
68 69 |
list_lru_from_kmem(struct list_lru_node *nlru, void *ptr, struct mem_cgroup **memcg_ptr) |
60d3fd32a
|
70 |
{ |
44bd4a475
|
71 72 |
struct list_lru_one *l = &nlru->lru; struct mem_cgroup *memcg = NULL; |
60d3fd32a
|
73 74 |
if (!nlru->memcg_lrus) |
44bd4a475
|
75 |
goto out; |
60d3fd32a
|
76 77 78 |
memcg = mem_cgroup_from_kmem(ptr); if (!memcg) |
44bd4a475
|
79 |
goto out; |
60d3fd32a
|
80 |
|
44bd4a475
|
81 82 83 84 85 |
l = list_lru_from_memcg_idx(nlru, memcg_cache_id(memcg)); out: if (memcg_ptr) *memcg_ptr = memcg; return l; |
60d3fd32a
|
86 87 |
} #else |
e0295238e
|
88 89 90 91 92 93 94 |
static void list_lru_register(struct list_lru *lru) { } static void list_lru_unregister(struct list_lru *lru) { } |
fae91d6d8
|
95 96 97 98 |
static int lru_shrinker_id(struct list_lru *lru) { return -1; } |
60d3fd32a
|
99 100 101 102 103 104 105 106 107 108 109 110 |
static inline bool list_lru_memcg_aware(struct list_lru *lru) { return false; } static inline struct list_lru_one * list_lru_from_memcg_idx(struct list_lru_node *nlru, int idx) { return &nlru->lru; } static inline struct list_lru_one * |
44bd4a475
|
111 112 |
list_lru_from_kmem(struct list_lru_node *nlru, void *ptr, struct mem_cgroup **memcg_ptr) |
60d3fd32a
|
113 |
{ |
44bd4a475
|
114 115 |
if (memcg_ptr) *memcg_ptr = NULL; |
60d3fd32a
|
116 117 |
return &nlru->lru; } |
84c07d11a
|
118 |
#endif /* CONFIG_MEMCG_KMEM */ |
60d3fd32a
|
119 |
|
a38e40824
|
120 121 |
bool list_lru_add(struct list_lru *lru, struct list_head *item) { |
3b1d58a4c
|
122 123 |
int nid = page_to_nid(virt_to_page(item)); struct list_lru_node *nlru = &lru->node[nid]; |
fae91d6d8
|
124 |
struct mem_cgroup *memcg; |
60d3fd32a
|
125 |
struct list_lru_one *l; |
3b1d58a4c
|
126 127 |
spin_lock(&nlru->lock); |
a38e40824
|
128 |
if (list_empty(item)) { |
fae91d6d8
|
129 |
l = list_lru_from_kmem(nlru, item, &memcg); |
60d3fd32a
|
130 |
list_add_tail(item, &l->list); |
fae91d6d8
|
131 132 133 134 |
/* Set shrinker bit if the first element was added */ if (!l->nr_items++) memcg_set_shrinker_bit(memcg, nid, lru_shrinker_id(lru)); |
2c80cd57c
|
135 |
nlru->nr_items++; |
3b1d58a4c
|
136 |
spin_unlock(&nlru->lock); |
a38e40824
|
137 138 |
return true; } |
3b1d58a4c
|
139 |
spin_unlock(&nlru->lock); |
a38e40824
|
140 141 142 143 144 145 |
return false; } EXPORT_SYMBOL_GPL(list_lru_add); bool list_lru_del(struct list_lru *lru, struct list_head *item) { |
3b1d58a4c
|
146 147 |
int nid = page_to_nid(virt_to_page(item)); struct list_lru_node *nlru = &lru->node[nid]; |
60d3fd32a
|
148 |
struct list_lru_one *l; |
3b1d58a4c
|
149 150 |
spin_lock(&nlru->lock); |
a38e40824
|
151 |
if (!list_empty(item)) { |
44bd4a475
|
152 |
l = list_lru_from_kmem(nlru, item, NULL); |
a38e40824
|
153 |
list_del_init(item); |
60d3fd32a
|
154 |
l->nr_items--; |
2c80cd57c
|
155 |
nlru->nr_items--; |
3b1d58a4c
|
156 |
spin_unlock(&nlru->lock); |
a38e40824
|
157 158 |
return true; } |
3b1d58a4c
|
159 |
spin_unlock(&nlru->lock); |
a38e40824
|
160 161 162 |
return false; } EXPORT_SYMBOL_GPL(list_lru_del); |
3f97b1632
|
163 164 165 166 167 168 169 170 171 172 173 174 175 176 |
void list_lru_isolate(struct list_lru_one *list, struct list_head *item) { list_del_init(item); list->nr_items--; } EXPORT_SYMBOL_GPL(list_lru_isolate); void list_lru_isolate_move(struct list_lru_one *list, struct list_head *item, struct list_head *head) { list_move(item, head); list->nr_items--; } EXPORT_SYMBOL_GPL(list_lru_isolate_move); |
930eaac5e
|
177 178 |
unsigned long list_lru_count_one(struct list_lru *lru, int nid, struct mem_cgroup *memcg) |
a38e40824
|
179 |
{ |
6a4f496fd
|
180 |
struct list_lru_node *nlru = &lru->node[nid]; |
60d3fd32a
|
181 182 |
struct list_lru_one *l; unsigned long count; |
3b1d58a4c
|
183 |
|
0c7c1bed7
|
184 |
rcu_read_lock(); |
930eaac5e
|
185 |
l = list_lru_from_memcg_idx(nlru, memcg_cache_id(memcg)); |
60d3fd32a
|
186 |
count = l->nr_items; |
0c7c1bed7
|
187 |
rcu_read_unlock(); |
3b1d58a4c
|
188 189 190 |
return count; } |
60d3fd32a
|
191 192 193 194 |
EXPORT_SYMBOL_GPL(list_lru_count_one); unsigned long list_lru_count_node(struct list_lru *lru, int nid) { |
2c80cd57c
|
195 |
struct list_lru_node *nlru; |
60d3fd32a
|
196 |
|
2c80cd57c
|
197 198 |
nlru = &lru->node[nid]; return nlru->nr_items; |
60d3fd32a
|
199 |
} |
6a4f496fd
|
200 |
EXPORT_SYMBOL_GPL(list_lru_count_node); |
3b1d58a4c
|
201 |
|
60d3fd32a
|
202 |
static unsigned long |
6e018968f
|
203 |
__list_lru_walk_one(struct list_lru_node *nlru, int memcg_idx, |
60d3fd32a
|
204 205 |
list_lru_walk_cb isolate, void *cb_arg, unsigned long *nr_to_walk) |
3b1d58a4c
|
206 |
{ |
60d3fd32a
|
207 |
struct list_lru_one *l; |
a38e40824
|
208 |
struct list_head *item, *n; |
3b1d58a4c
|
209 |
unsigned long isolated = 0; |
a38e40824
|
210 |
|
60d3fd32a
|
211 |
l = list_lru_from_memcg_idx(nlru, memcg_idx); |
a38e40824
|
212 |
restart: |
60d3fd32a
|
213 |
list_for_each_safe(item, n, &l->list) { |
a38e40824
|
214 |
enum lru_status ret; |
5cedf721a
|
215 216 217 218 219 |
/* * decrement nr_to_walk first so that we don't livelock if we * get stuck on large numbesr of LRU_RETRY items */ |
c56b097af
|
220 |
if (!*nr_to_walk) |
5cedf721a
|
221 |
break; |
c56b097af
|
222 |
--*nr_to_walk; |
5cedf721a
|
223 |
|
3f97b1632
|
224 |
ret = isolate(item, l, &nlru->lock, cb_arg); |
a38e40824
|
225 |
switch (ret) { |
449dd6984
|
226 227 |
case LRU_REMOVED_RETRY: assert_spin_locked(&nlru->lock); |
5b568acc3
|
228 |
/* fall through */ |
a38e40824
|
229 |
case LRU_REMOVED: |
3b1d58a4c
|
230 |
isolated++; |
2c80cd57c
|
231 |
nlru->nr_items--; |
449dd6984
|
232 233 234 235 236 237 238 |
/* * If the lru lock has been dropped, our list * traversal is now invalid and so we have to * restart from scratch. */ if (ret == LRU_REMOVED_RETRY) goto restart; |
a38e40824
|
239 240 |
break; case LRU_ROTATE: |
60d3fd32a
|
241 |
list_move_tail(item, &l->list); |
a38e40824
|
242 243 244 245 |
break; case LRU_SKIP: break; case LRU_RETRY: |
5cedf721a
|
246 247 248 249 |
/* * The lru lock has been dropped, our list traversal is * now invalid and so we have to restart from scratch. */ |
449dd6984
|
250 |
assert_spin_locked(&nlru->lock); |
a38e40824
|
251 252 253 254 |
goto restart; default: BUG(); } |
a38e40824
|
255 |
} |
3b1d58a4c
|
256 257 |
return isolated; } |
60d3fd32a
|
258 259 260 261 262 263 |
unsigned long list_lru_walk_one(struct list_lru *lru, int nid, struct mem_cgroup *memcg, list_lru_walk_cb isolate, void *cb_arg, unsigned long *nr_to_walk) { |
6cfe57a96
|
264 265 266 267 |
struct list_lru_node *nlru = &lru->node[nid]; unsigned long ret; spin_lock(&nlru->lock); |
6e018968f
|
268 269 |
ret = __list_lru_walk_one(nlru, memcg_cache_id(memcg), isolate, cb_arg, nr_to_walk); |
6cfe57a96
|
270 271 |
spin_unlock(&nlru->lock); return ret; |
60d3fd32a
|
272 273 |
} EXPORT_SYMBOL_GPL(list_lru_walk_one); |
6b51e8819
|
274 275 276 277 278 279 280 281 282 283 284 285 286 287 |
unsigned long list_lru_walk_one_irq(struct list_lru *lru, int nid, struct mem_cgroup *memcg, list_lru_walk_cb isolate, void *cb_arg, unsigned long *nr_to_walk) { struct list_lru_node *nlru = &lru->node[nid]; unsigned long ret; spin_lock_irq(&nlru->lock); ret = __list_lru_walk_one(nlru, memcg_cache_id(memcg), isolate, cb_arg, nr_to_walk); spin_unlock_irq(&nlru->lock); return ret; } |
60d3fd32a
|
288 289 290 291 292 293 |
unsigned long list_lru_walk_node(struct list_lru *lru, int nid, list_lru_walk_cb isolate, void *cb_arg, unsigned long *nr_to_walk) { long isolated = 0; int memcg_idx; |
87a5ffc16
|
294 295 |
isolated += list_lru_walk_one(lru, nid, NULL, isolate, cb_arg, nr_to_walk); |
60d3fd32a
|
296 297 |
if (*nr_to_walk > 0 && list_lru_memcg_aware(lru)) { for_each_memcg_cache_index(memcg_idx) { |
6cfe57a96
|
298 299 300 |
struct list_lru_node *nlru = &lru->node[nid]; spin_lock(&nlru->lock); |
6e018968f
|
301 302 303 |
isolated += __list_lru_walk_one(nlru, memcg_idx, isolate, cb_arg, nr_to_walk); |
6cfe57a96
|
304 |
spin_unlock(&nlru->lock); |
60d3fd32a
|
305 306 307 308 309 310 |
if (*nr_to_walk <= 0) break; } } return isolated; } |
3b1d58a4c
|
311 |
EXPORT_SYMBOL_GPL(list_lru_walk_node); |
60d3fd32a
|
312 313 314 315 316 |
static void init_one_lru(struct list_lru_one *l) { INIT_LIST_HEAD(&l->list); l->nr_items = 0; } |
84c07d11a
|
317 |
#ifdef CONFIG_MEMCG_KMEM |
60d3fd32a
|
318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 |
static void __memcg_destroy_list_lru_node(struct list_lru_memcg *memcg_lrus, int begin, int end) { int i; for (i = begin; i < end; i++) kfree(memcg_lrus->lru[i]); } static int __memcg_init_list_lru_node(struct list_lru_memcg *memcg_lrus, int begin, int end) { int i; for (i = begin; i < end; i++) { struct list_lru_one *l; l = kmalloc(sizeof(struct list_lru_one), GFP_KERNEL); if (!l) goto fail; init_one_lru(l); memcg_lrus->lru[i] = l; } return 0; fail: |
3510955b3
|
344 |
__memcg_destroy_list_lru_node(memcg_lrus, begin, i); |
60d3fd32a
|
345 346 347 348 349 |
return -ENOMEM; } static int memcg_init_list_lru_node(struct list_lru_node *nlru) { |
0c7c1bed7
|
350 |
struct list_lru_memcg *memcg_lrus; |
60d3fd32a
|
351 |
int size = memcg_nr_cache_ids; |
0c7c1bed7
|
352 353 354 |
memcg_lrus = kvmalloc(sizeof(*memcg_lrus) + size * sizeof(void *), GFP_KERNEL); if (!memcg_lrus) |
60d3fd32a
|
355 |
return -ENOMEM; |
0c7c1bed7
|
356 357 |
if (__memcg_init_list_lru_node(memcg_lrus, 0, size)) { kvfree(memcg_lrus); |
60d3fd32a
|
358 359 |
return -ENOMEM; } |
0c7c1bed7
|
360 |
RCU_INIT_POINTER(nlru->memcg_lrus, memcg_lrus); |
60d3fd32a
|
361 362 363 364 365 366 |
return 0; } static void memcg_destroy_list_lru_node(struct list_lru_node *nlru) { |
0c7c1bed7
|
367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 |
struct list_lru_memcg *memcg_lrus; /* * This is called when shrinker has already been unregistered, * and nobody can use it. So, there is no need to use kvfree_rcu(). */ memcg_lrus = rcu_dereference_protected(nlru->memcg_lrus, true); __memcg_destroy_list_lru_node(memcg_lrus, 0, memcg_nr_cache_ids); kvfree(memcg_lrus); } static void kvfree_rcu(struct rcu_head *head) { struct list_lru_memcg *mlru; mlru = container_of(head, struct list_lru_memcg, rcu); kvfree(mlru); |
60d3fd32a
|
383 384 385 386 387 388 389 390 |
} static int memcg_update_list_lru_node(struct list_lru_node *nlru, int old_size, int new_size) { struct list_lru_memcg *old, *new; BUG_ON(old_size > new_size); |
0c7c1bed7
|
391 392 393 |
old = rcu_dereference_protected(nlru->memcg_lrus, lockdep_is_held(&list_lrus_mutex)); new = kvmalloc(sizeof(*new) + new_size * sizeof(void *), GFP_KERNEL); |
60d3fd32a
|
394 395 396 397 |
if (!new) return -ENOMEM; if (__memcg_init_list_lru_node(new, old_size, new_size)) { |
f80c7dab9
|
398 |
kvfree(new); |
60d3fd32a
|
399 400 |
return -ENOMEM; } |
0c7c1bed7
|
401 |
memcpy(&new->lru, &old->lru, old_size * sizeof(void *)); |
60d3fd32a
|
402 403 |
/* |
0c7c1bed7
|
404 405 |
* The locking below allows readers that hold nlru->lock avoid taking * rcu_read_lock (see list_lru_from_memcg_idx). |
60d3fd32a
|
406 407 408 409 410 |
* * Since list_lru_{add,del} may be called under an IRQ-safe lock, * we have to use IRQ-safe primitives here to avoid deadlock. */ spin_lock_irq(&nlru->lock); |
0c7c1bed7
|
411 |
rcu_assign_pointer(nlru->memcg_lrus, new); |
60d3fd32a
|
412 |
spin_unlock_irq(&nlru->lock); |
0c7c1bed7
|
413 |
call_rcu(&old->rcu, kvfree_rcu); |
60d3fd32a
|
414 415 416 417 418 419 |
return 0; } static void memcg_cancel_update_list_lru_node(struct list_lru_node *nlru, int old_size, int new_size) { |
0c7c1bed7
|
420 421 422 423 |
struct list_lru_memcg *memcg_lrus; memcg_lrus = rcu_dereference_protected(nlru->memcg_lrus, lockdep_is_held(&list_lrus_mutex)); |
60d3fd32a
|
424 425 |
/* do not bother shrinking the array back to the old size, because we * cannot handle allocation failures here */ |
0c7c1bed7
|
426 |
__memcg_destroy_list_lru_node(memcg_lrus, old_size, new_size); |
60d3fd32a
|
427 428 429 430 431 |
} static int memcg_init_list_lru(struct list_lru *lru, bool memcg_aware) { int i; |
3e8589963
|
432 |
lru->memcg_aware = memcg_aware; |
145949a13
|
433 434 435 436 437 |
if (!memcg_aware) return 0; for_each_node(i) { if (memcg_init_list_lru_node(&lru->node[i])) |
60d3fd32a
|
438 439 440 441 |
goto fail; } return 0; fail: |
145949a13
|
442 443 444 |
for (i = i - 1; i >= 0; i--) { if (!lru->node[i].memcg_lrus) continue; |
60d3fd32a
|
445 |
memcg_destroy_list_lru_node(&lru->node[i]); |
145949a13
|
446 |
} |
60d3fd32a
|
447 448 449 450 451 452 453 454 455 |
return -ENOMEM; } static void memcg_destroy_list_lru(struct list_lru *lru) { int i; if (!list_lru_memcg_aware(lru)) return; |
145949a13
|
456 |
for_each_node(i) |
60d3fd32a
|
457 458 459 460 461 462 463 464 465 466 |
memcg_destroy_list_lru_node(&lru->node[i]); } static int memcg_update_list_lru(struct list_lru *lru, int old_size, int new_size) { int i; if (!list_lru_memcg_aware(lru)) return 0; |
145949a13
|
467 |
for_each_node(i) { |
60d3fd32a
|
468 469 470 471 472 473 |
if (memcg_update_list_lru_node(&lru->node[i], old_size, new_size)) goto fail; } return 0; fail: |
145949a13
|
474 475 476 |
for (i = i - 1; i >= 0; i--) { if (!lru->node[i].memcg_lrus) continue; |
60d3fd32a
|
477 478 |
memcg_cancel_update_list_lru_node(&lru->node[i], old_size, new_size); |
145949a13
|
479 |
} |
60d3fd32a
|
480 481 482 483 484 485 486 487 488 489 |
return -ENOMEM; } static void memcg_cancel_update_list_lru(struct list_lru *lru, int old_size, int new_size) { int i; if (!list_lru_memcg_aware(lru)) return; |
145949a13
|
490 |
for_each_node(i) |
60d3fd32a
|
491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 |
memcg_cancel_update_list_lru_node(&lru->node[i], old_size, new_size); } int memcg_update_all_list_lrus(int new_size) { int ret = 0; struct list_lru *lru; int old_size = memcg_nr_cache_ids; mutex_lock(&list_lrus_mutex); list_for_each_entry(lru, &list_lrus, list) { ret = memcg_update_list_lru(lru, old_size, new_size); if (ret) goto fail; } out: mutex_unlock(&list_lrus_mutex); return ret; fail: list_for_each_entry_continue_reverse(lru, &list_lrus, list) memcg_cancel_update_list_lru(lru, old_size, new_size); goto out; } |
2788cf0c4
|
515 |
|
3b82c4dcc
|
516 |
static void memcg_drain_list_lru_node(struct list_lru *lru, int nid, |
9bec5c35b
|
517 |
int src_idx, struct mem_cgroup *dst_memcg) |
2788cf0c4
|
518 |
{ |
3b82c4dcc
|
519 |
struct list_lru_node *nlru = &lru->node[nid]; |
9bec5c35b
|
520 |
int dst_idx = dst_memcg->kmemcg_id; |
2788cf0c4
|
521 |
struct list_lru_one *src, *dst; |
fae91d6d8
|
522 |
bool set; |
2788cf0c4
|
523 524 525 526 527 528 529 530 531 532 533 |
/* * Since list_lru_{add,del} may be called under an IRQ-safe lock, * we have to use IRQ-safe primitives here to avoid deadlock. */ spin_lock_irq(&nlru->lock); src = list_lru_from_memcg_idx(nlru, src_idx); dst = list_lru_from_memcg_idx(nlru, dst_idx); list_splice_init(&src->list, &dst->list); |
fae91d6d8
|
534 |
set = (!dst->nr_items && src->nr_items); |
2788cf0c4
|
535 |
dst->nr_items += src->nr_items; |
fae91d6d8
|
536 537 |
if (set) memcg_set_shrinker_bit(dst_memcg, nid, lru_shrinker_id(lru)); |
2788cf0c4
|
538 539 540 541 542 543 |
src->nr_items = 0; spin_unlock_irq(&nlru->lock); } static void memcg_drain_list_lru(struct list_lru *lru, |
9bec5c35b
|
544 |
int src_idx, struct mem_cgroup *dst_memcg) |
2788cf0c4
|
545 546 547 548 549 |
{ int i; if (!list_lru_memcg_aware(lru)) return; |
145949a13
|
550 |
for_each_node(i) |
3b82c4dcc
|
551 |
memcg_drain_list_lru_node(lru, i, src_idx, dst_memcg); |
2788cf0c4
|
552 |
} |
9bec5c35b
|
553 |
void memcg_drain_all_list_lrus(int src_idx, struct mem_cgroup *dst_memcg) |
2788cf0c4
|
554 555 556 557 558 |
{ struct list_lru *lru; mutex_lock(&list_lrus_mutex); list_for_each_entry(lru, &list_lrus, list) |
9bec5c35b
|
559 |
memcg_drain_list_lru(lru, src_idx, dst_memcg); |
2788cf0c4
|
560 561 |
mutex_unlock(&list_lrus_mutex); } |
60d3fd32a
|
562 563 564 565 566 567 568 569 570 |
#else static int memcg_init_list_lru(struct list_lru *lru, bool memcg_aware) { return 0; } static void memcg_destroy_list_lru(struct list_lru *lru) { } |
84c07d11a
|
571 |
#endif /* CONFIG_MEMCG_KMEM */ |
60d3fd32a
|
572 573 |
int __list_lru_init(struct list_lru *lru, bool memcg_aware, |
c92e8e10c
|
574 |
struct lock_class_key *key, struct shrinker *shrinker) |
a38e40824
|
575 |
{ |
3b1d58a4c
|
576 |
int i; |
60d3fd32a
|
577 |
int err = -ENOMEM; |
c92e8e10c
|
578 579 580 581 582 583 |
#ifdef CONFIG_MEMCG_KMEM if (shrinker) lru->shrinker_id = shrinker->id; else lru->shrinker_id = -1; #endif |
60d3fd32a
|
584 |
memcg_get_cache_ids(); |
5ca302c8e
|
585 |
|
b9726c26d
|
586 |
lru->node = kcalloc(nr_node_ids, sizeof(*lru->node), GFP_KERNEL); |
5ca302c8e
|
587 |
if (!lru->node) |
60d3fd32a
|
588 |
goto out; |
a38e40824
|
589 |
|
145949a13
|
590 |
for_each_node(i) { |
3b1d58a4c
|
591 |
spin_lock_init(&lru->node[i].lock); |
449dd6984
|
592 593 |
if (key) lockdep_set_class(&lru->node[i].lock, key); |
60d3fd32a
|
594 595 596 597 598 599 |
init_one_lru(&lru->node[i].lru); } err = memcg_init_list_lru(lru, memcg_aware); if (err) { kfree(lru->node); |
1bc11d70b
|
600 601 |
/* Do this so a list_lru_destroy() doesn't crash: */ lru->node = NULL; |
60d3fd32a
|
602 |
goto out; |
3b1d58a4c
|
603 |
} |
60d3fd32a
|
604 |
|
c0a5b5609
|
605 |
list_lru_register(lru); |
60d3fd32a
|
606 607 608 |
out: memcg_put_cache_ids(); return err; |
a38e40824
|
609 |
} |
60d3fd32a
|
610 |
EXPORT_SYMBOL_GPL(__list_lru_init); |
5ca302c8e
|
611 612 613 |
void list_lru_destroy(struct list_lru *lru) { |
c0a5b5609
|
614 615 616 |
/* Already destroyed or not yet initialized? */ if (!lru->node) return; |
60d3fd32a
|
617 618 |
memcg_get_cache_ids(); |
c0a5b5609
|
619 |
list_lru_unregister(lru); |
60d3fd32a
|
620 621 |
memcg_destroy_list_lru(lru); |
5ca302c8e
|
622 |
kfree(lru->node); |
c0a5b5609
|
623 |
lru->node = NULL; |
60d3fd32a
|
624 |
|
c92e8e10c
|
625 626 627 |
#ifdef CONFIG_MEMCG_KMEM lru->shrinker_id = -1; #endif |
60d3fd32a
|
628 |
memcg_put_cache_ids(); |
5ca302c8e
|
629 630 |
} EXPORT_SYMBOL_GPL(list_lru_destroy); |