Blame view
mm/list_lru.c
12.6 KB
a38e40824 list: add a new L... |
1 2 3 4 5 6 7 8 |
/* * Copyright (c) 2013 Red Hat, Inc. and Parallels Inc. All rights reserved. * Authors: David Chinner and Glauber Costa * * Generic LRU infrastructure */ #include <linux/kernel.h> #include <linux/module.h> |
3b1d58a4c list_lru: per-nod... |
9 |
#include <linux/mm.h> |
a38e40824 list: add a new L... |
10 |
#include <linux/list_lru.h> |
5ca302c8e list_lru: dynamic... |
11 |
#include <linux/slab.h> |
c0a5b5609 list_lru: organiz... |
12 |
#include <linux/mutex.h> |
60d3fd32a list_lru: introdu... |
13 |
#include <linux/memcontrol.h> |
c0a5b5609 list_lru: organiz... |
14 |
|
127424c86 mm: memcontrol: m... |
15 |
#if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB) |
c0a5b5609 list_lru: organiz... |
16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 |
static LIST_HEAD(list_lrus); static DEFINE_MUTEX(list_lrus_mutex); static void list_lru_register(struct list_lru *lru) { mutex_lock(&list_lrus_mutex); list_add(&lru->list, &list_lrus); mutex_unlock(&list_lrus_mutex); } static void list_lru_unregister(struct list_lru *lru) { mutex_lock(&list_lrus_mutex); list_del(&lru->list); mutex_unlock(&list_lrus_mutex); } #else static void list_lru_register(struct list_lru *lru) { } static void list_lru_unregister(struct list_lru *lru) { } |
127424c86 mm: memcontrol: m... |
40 |
#endif /* CONFIG_MEMCG && !CONFIG_SLOB */ |
a38e40824 list: add a new L... |
41 |
|
127424c86 mm: memcontrol: m... |
42 |
#if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB) |
60d3fd32a list_lru: introdu... |
43 44 |
static inline bool list_lru_memcg_aware(struct list_lru *lru) { |
145949a13 mm/list_lru.c: re... |
45 46 47 48 |
/* * This needs node 0 to be always present, even * in the systems supporting sparse numa ids. */ |
60d3fd32a list_lru: introdu... |
49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 |
return !!lru->node[0].memcg_lrus; } static inline struct list_lru_one * list_lru_from_memcg_idx(struct list_lru_node *nlru, int idx) { /* * The lock protects the array of per cgroup lists from relocation * (see memcg_update_list_lru_node). */ lockdep_assert_held(&nlru->lock); if (nlru->memcg_lrus && idx >= 0) return nlru->memcg_lrus->lru[idx]; return &nlru->lru; } |
df4065516 memcg: simplify a... |
65 66 67 68 69 70 71 72 73 |
static __always_inline struct mem_cgroup *mem_cgroup_from_kmem(void *ptr) { struct page *page; if (!memcg_kmem_enabled()) return NULL; page = virt_to_head_page(ptr); return page->mem_cgroup; } |
60d3fd32a list_lru: introdu... |
74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 |
static inline struct list_lru_one * list_lru_from_kmem(struct list_lru_node *nlru, void *ptr) { struct mem_cgroup *memcg; if (!nlru->memcg_lrus) return &nlru->lru; memcg = mem_cgroup_from_kmem(ptr); if (!memcg) return &nlru->lru; return list_lru_from_memcg_idx(nlru, memcg_cache_id(memcg)); } #else static inline bool list_lru_memcg_aware(struct list_lru *lru) { return false; } static inline struct list_lru_one * list_lru_from_memcg_idx(struct list_lru_node *nlru, int idx) { return &nlru->lru; } static inline struct list_lru_one * list_lru_from_kmem(struct list_lru_node *nlru, void *ptr) { return &nlru->lru; } |
127424c86 mm: memcontrol: m... |
105 |
#endif /* CONFIG_MEMCG && !CONFIG_SLOB */ |
60d3fd32a list_lru: introdu... |
106 |
|
a38e40824 list: add a new L... |
107 108 |
bool list_lru_add(struct list_lru *lru, struct list_head *item) { |
3b1d58a4c list_lru: per-nod... |
109 110 |
int nid = page_to_nid(virt_to_page(item)); struct list_lru_node *nlru = &lru->node[nid]; |
60d3fd32a list_lru: introdu... |
111 |
struct list_lru_one *l; |
3b1d58a4c list_lru: per-nod... |
112 113 |
spin_lock(&nlru->lock); |
a38e40824 list: add a new L... |
114 |
if (list_empty(item)) { |
26f5d7609 list_lru: don't c... |
115 |
l = list_lru_from_kmem(nlru, item); |
60d3fd32a list_lru: introdu... |
116 117 |
list_add_tail(item, &l->list); l->nr_items++; |
2c80cd57c mm/list_lru.c: fi... |
118 |
nlru->nr_items++; |
3b1d58a4c list_lru: per-nod... |
119 |
spin_unlock(&nlru->lock); |
a38e40824 list: add a new L... |
120 121 |
return true; } |
3b1d58a4c list_lru: per-nod... |
122 |
spin_unlock(&nlru->lock); |
a38e40824 list: add a new L... |
123 124 125 126 127 128 |
return false; } EXPORT_SYMBOL_GPL(list_lru_add); bool list_lru_del(struct list_lru *lru, struct list_head *item) { |
3b1d58a4c list_lru: per-nod... |
129 130 |
int nid = page_to_nid(virt_to_page(item)); struct list_lru_node *nlru = &lru->node[nid]; |
60d3fd32a list_lru: introdu... |
131 |
struct list_lru_one *l; |
3b1d58a4c list_lru: per-nod... |
132 133 |
spin_lock(&nlru->lock); |
a38e40824 list: add a new L... |
134 |
if (!list_empty(item)) { |
26f5d7609 list_lru: don't c... |
135 |
l = list_lru_from_kmem(nlru, item); |
a38e40824 list: add a new L... |
136 |
list_del_init(item); |
60d3fd32a list_lru: introdu... |
137 |
l->nr_items--; |
2c80cd57c mm/list_lru.c: fi... |
138 |
nlru->nr_items--; |
3b1d58a4c list_lru: per-nod... |
139 |
spin_unlock(&nlru->lock); |
a38e40824 list: add a new L... |
140 141 |
return true; } |
3b1d58a4c list_lru: per-nod... |
142 |
spin_unlock(&nlru->lock); |
a38e40824 list: add a new L... |
143 144 145 |
return false; } EXPORT_SYMBOL_GPL(list_lru_del); |
3f97b1632 list_lru: add hel... |
146 147 148 149 150 151 152 153 154 155 156 157 158 159 |
void list_lru_isolate(struct list_lru_one *list, struct list_head *item) { list_del_init(item); list->nr_items--; } EXPORT_SYMBOL_GPL(list_lru_isolate); void list_lru_isolate_move(struct list_lru_one *list, struct list_head *item, struct list_head *head) { list_move(item, head); list->nr_items--; } EXPORT_SYMBOL_GPL(list_lru_isolate_move); |
60d3fd32a list_lru: introdu... |
160 161 |
static unsigned long __list_lru_count_one(struct list_lru *lru, int nid, int memcg_idx) |
a38e40824 list: add a new L... |
162 |
{ |
6a4f496fd list_lru: per-nod... |
163 |
struct list_lru_node *nlru = &lru->node[nid]; |
60d3fd32a list_lru: introdu... |
164 165 |
struct list_lru_one *l; unsigned long count; |
3b1d58a4c list_lru: per-nod... |
166 |
|
6a4f496fd list_lru: per-nod... |
167 |
spin_lock(&nlru->lock); |
60d3fd32a list_lru: introdu... |
168 |
l = list_lru_from_memcg_idx(nlru, memcg_idx); |
60d3fd32a list_lru: introdu... |
169 |
count = l->nr_items; |
6a4f496fd list_lru: per-nod... |
170 |
spin_unlock(&nlru->lock); |
3b1d58a4c list_lru: per-nod... |
171 172 173 |
return count; } |
60d3fd32a list_lru: introdu... |
174 175 176 177 178 179 180 181 182 183 |
unsigned long list_lru_count_one(struct list_lru *lru, int nid, struct mem_cgroup *memcg) { return __list_lru_count_one(lru, nid, memcg_cache_id(memcg)); } EXPORT_SYMBOL_GPL(list_lru_count_one); unsigned long list_lru_count_node(struct list_lru *lru, int nid) { |
2c80cd57c mm/list_lru.c: fi... |
184 |
struct list_lru_node *nlru; |
60d3fd32a list_lru: introdu... |
185 |
|
2c80cd57c mm/list_lru.c: fi... |
186 187 |
nlru = &lru->node[nid]; return nlru->nr_items; |
60d3fd32a list_lru: introdu... |
188 |
} |
6a4f496fd list_lru: per-nod... |
189 |
EXPORT_SYMBOL_GPL(list_lru_count_node); |
3b1d58a4c list_lru: per-nod... |
190 |
|
60d3fd32a list_lru: introdu... |
191 192 193 194 |
static unsigned long __list_lru_walk_one(struct list_lru *lru, int nid, int memcg_idx, list_lru_walk_cb isolate, void *cb_arg, unsigned long *nr_to_walk) |
3b1d58a4c list_lru: per-nod... |
195 |
{ |
60d3fd32a list_lru: introdu... |
196 197 |
struct list_lru_node *nlru = &lru->node[nid]; struct list_lru_one *l; |
a38e40824 list: add a new L... |
198 |
struct list_head *item, *n; |
3b1d58a4c list_lru: per-nod... |
199 |
unsigned long isolated = 0; |
a38e40824 list: add a new L... |
200 |
|
3b1d58a4c list_lru: per-nod... |
201 |
spin_lock(&nlru->lock); |
60d3fd32a list_lru: introdu... |
202 |
l = list_lru_from_memcg_idx(nlru, memcg_idx); |
a38e40824 list: add a new L... |
203 |
restart: |
60d3fd32a list_lru: introdu... |
204 |
list_for_each_safe(item, n, &l->list) { |
a38e40824 list: add a new L... |
205 |
enum lru_status ret; |
5cedf721a list_lru: fix bro... |
206 207 208 209 210 |
/* * decrement nr_to_walk first so that we don't livelock if we * get stuck on large numbesr of LRU_RETRY items */ |
c56b097af mm: list_lru: fix... |
211 |
if (!*nr_to_walk) |
5cedf721a list_lru: fix bro... |
212 |
break; |
c56b097af mm: list_lru: fix... |
213 |
--*nr_to_walk; |
5cedf721a list_lru: fix bro... |
214 |
|
3f97b1632 list_lru: add hel... |
215 |
ret = isolate(item, l, &nlru->lock, cb_arg); |
a38e40824 list: add a new L... |
216 |
switch (ret) { |
449dd6984 mm: keep page cac... |
217 218 |
case LRU_REMOVED_RETRY: assert_spin_locked(&nlru->lock); |
5b568acc3 mm/list_lru.c: ma... |
219 |
/* fall through */ |
a38e40824 list: add a new L... |
220 |
case LRU_REMOVED: |
3b1d58a4c list_lru: per-nod... |
221 |
isolated++; |
2c80cd57c mm/list_lru.c: fi... |
222 |
nlru->nr_items--; |
449dd6984 mm: keep page cac... |
223 224 225 226 227 228 229 |
/* * If the lru lock has been dropped, our list * traversal is now invalid and so we have to * restart from scratch. */ if (ret == LRU_REMOVED_RETRY) goto restart; |
a38e40824 list: add a new L... |
230 231 |
break; case LRU_ROTATE: |
60d3fd32a list_lru: introdu... |
232 |
list_move_tail(item, &l->list); |
a38e40824 list: add a new L... |
233 234 235 236 |
break; case LRU_SKIP: break; case LRU_RETRY: |
5cedf721a list_lru: fix bro... |
237 238 239 240 |
/* * The lru lock has been dropped, our list traversal is * now invalid and so we have to restart from scratch. */ |
449dd6984 mm: keep page cac... |
241 |
assert_spin_locked(&nlru->lock); |
a38e40824 list: add a new L... |
242 243 244 245 |
goto restart; default: BUG(); } |
a38e40824 list: add a new L... |
246 |
} |
3b1d58a4c list_lru: per-nod... |
247 248 249 250 |
spin_unlock(&nlru->lock); return isolated; } |
60d3fd32a list_lru: introdu... |
251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 |
unsigned long list_lru_walk_one(struct list_lru *lru, int nid, struct mem_cgroup *memcg, list_lru_walk_cb isolate, void *cb_arg, unsigned long *nr_to_walk) { return __list_lru_walk_one(lru, nid, memcg_cache_id(memcg), isolate, cb_arg, nr_to_walk); } EXPORT_SYMBOL_GPL(list_lru_walk_one); unsigned long list_lru_walk_node(struct list_lru *lru, int nid, list_lru_walk_cb isolate, void *cb_arg, unsigned long *nr_to_walk) { long isolated = 0; int memcg_idx; isolated += __list_lru_walk_one(lru, nid, -1, isolate, cb_arg, nr_to_walk); if (*nr_to_walk > 0 && list_lru_memcg_aware(lru)) { for_each_memcg_cache_index(memcg_idx) { isolated += __list_lru_walk_one(lru, nid, memcg_idx, isolate, cb_arg, nr_to_walk); if (*nr_to_walk <= 0) break; } } return isolated; } |
3b1d58a4c list_lru: per-nod... |
281 |
EXPORT_SYMBOL_GPL(list_lru_walk_node); |
60d3fd32a list_lru: introdu... |
282 283 284 285 286 |
static void init_one_lru(struct list_lru_one *l) { INIT_LIST_HEAD(&l->list); l->nr_items = 0; } |
127424c86 mm: memcontrol: m... |
287 |
#if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB) |
60d3fd32a list_lru: introdu... |
288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 |
static void __memcg_destroy_list_lru_node(struct list_lru_memcg *memcg_lrus, int begin, int end) { int i; for (i = begin; i < end; i++) kfree(memcg_lrus->lru[i]); } static int __memcg_init_list_lru_node(struct list_lru_memcg *memcg_lrus, int begin, int end) { int i; for (i = begin; i < end; i++) { struct list_lru_one *l; l = kmalloc(sizeof(struct list_lru_one), GFP_KERNEL); if (!l) goto fail; init_one_lru(l); memcg_lrus->lru[i] = l; } return 0; fail: __memcg_destroy_list_lru_node(memcg_lrus, begin, i - 1); return -ENOMEM; } static int memcg_init_list_lru_node(struct list_lru_node *nlru) { int size = memcg_nr_cache_ids; |
f80c7dab9 mm: memcontrol: u... |
321 |
nlru->memcg_lrus = kvmalloc(size * sizeof(void *), GFP_KERNEL); |
60d3fd32a list_lru: introdu... |
322 323 324 325 |
if (!nlru->memcg_lrus) return -ENOMEM; if (__memcg_init_list_lru_node(nlru->memcg_lrus, 0, size)) { |
f80c7dab9 mm: memcontrol: u... |
326 |
kvfree(nlru->memcg_lrus); |
60d3fd32a list_lru: introdu... |
327 328 329 330 331 332 333 334 335 |
return -ENOMEM; } return 0; } static void memcg_destroy_list_lru_node(struct list_lru_node *nlru) { __memcg_destroy_list_lru_node(nlru->memcg_lrus, 0, memcg_nr_cache_ids); |
f80c7dab9 mm: memcontrol: u... |
336 |
kvfree(nlru->memcg_lrus); |
60d3fd32a list_lru: introdu... |
337 338 339 340 341 342 343 344 345 346 |
} static int memcg_update_list_lru_node(struct list_lru_node *nlru, int old_size, int new_size) { struct list_lru_memcg *old, *new; BUG_ON(old_size > new_size); old = nlru->memcg_lrus; |
f80c7dab9 mm: memcontrol: u... |
347 |
new = kvmalloc(new_size * sizeof(void *), GFP_KERNEL); |
60d3fd32a list_lru: introdu... |
348 349 350 351 |
if (!new) return -ENOMEM; if (__memcg_init_list_lru_node(new, old_size, new_size)) { |
f80c7dab9 mm: memcontrol: u... |
352 |
kvfree(new); |
60d3fd32a list_lru: introdu... |
353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 |
return -ENOMEM; } memcpy(new, old, old_size * sizeof(void *)); /* * The lock guarantees that we won't race with a reader * (see list_lru_from_memcg_idx). * * Since list_lru_{add,del} may be called under an IRQ-safe lock, * we have to use IRQ-safe primitives here to avoid deadlock. */ spin_lock_irq(&nlru->lock); nlru->memcg_lrus = new; spin_unlock_irq(&nlru->lock); |
f80c7dab9 mm: memcontrol: u... |
368 |
kvfree(old); |
60d3fd32a list_lru: introdu... |
369 370 371 372 373 374 375 376 377 378 379 380 381 382 |
return 0; } static void memcg_cancel_update_list_lru_node(struct list_lru_node *nlru, int old_size, int new_size) { /* do not bother shrinking the array back to the old size, because we * cannot handle allocation failures here */ __memcg_destroy_list_lru_node(nlru->memcg_lrus, old_size, new_size); } static int memcg_init_list_lru(struct list_lru *lru, bool memcg_aware) { int i; |
145949a13 mm/list_lru.c: re... |
383 384 385 386 387 |
if (!memcg_aware) return 0; for_each_node(i) { if (memcg_init_list_lru_node(&lru->node[i])) |
60d3fd32a list_lru: introdu... |
388 389 390 391 |
goto fail; } return 0; fail: |
145949a13 mm/list_lru.c: re... |
392 393 394 |
for (i = i - 1; i >= 0; i--) { if (!lru->node[i].memcg_lrus) continue; |
60d3fd32a list_lru: introdu... |
395 |
memcg_destroy_list_lru_node(&lru->node[i]); |
145949a13 mm/list_lru.c: re... |
396 |
} |
60d3fd32a list_lru: introdu... |
397 398 399 400 401 402 403 404 405 |
return -ENOMEM; } static void memcg_destroy_list_lru(struct list_lru *lru) { int i; if (!list_lru_memcg_aware(lru)) return; |
145949a13 mm/list_lru.c: re... |
406 |
for_each_node(i) |
60d3fd32a list_lru: introdu... |
407 408 409 410 411 412 413 414 415 416 |
memcg_destroy_list_lru_node(&lru->node[i]); } static int memcg_update_list_lru(struct list_lru *lru, int old_size, int new_size) { int i; if (!list_lru_memcg_aware(lru)) return 0; |
145949a13 mm/list_lru.c: re... |
417 |
for_each_node(i) { |
60d3fd32a list_lru: introdu... |
418 419 420 421 422 423 |
if (memcg_update_list_lru_node(&lru->node[i], old_size, new_size)) goto fail; } return 0; fail: |
145949a13 mm/list_lru.c: re... |
424 425 426 |
for (i = i - 1; i >= 0; i--) { if (!lru->node[i].memcg_lrus) continue; |
60d3fd32a list_lru: introdu... |
427 428 |
memcg_cancel_update_list_lru_node(&lru->node[i], old_size, new_size); |
145949a13 mm/list_lru.c: re... |
429 |
} |
60d3fd32a list_lru: introdu... |
430 431 432 433 434 435 436 437 438 439 |
return -ENOMEM; } static void memcg_cancel_update_list_lru(struct list_lru *lru, int old_size, int new_size) { int i; if (!list_lru_memcg_aware(lru)) return; |
145949a13 mm/list_lru.c: re... |
440 |
for_each_node(i) |
60d3fd32a list_lru: introdu... |
441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 |
memcg_cancel_update_list_lru_node(&lru->node[i], old_size, new_size); } int memcg_update_all_list_lrus(int new_size) { int ret = 0; struct list_lru *lru; int old_size = memcg_nr_cache_ids; mutex_lock(&list_lrus_mutex); list_for_each_entry(lru, &list_lrus, list) { ret = memcg_update_list_lru(lru, old_size, new_size); if (ret) goto fail; } out: mutex_unlock(&list_lrus_mutex); return ret; fail: list_for_each_entry_continue_reverse(lru, &list_lrus, list) memcg_cancel_update_list_lru(lru, old_size, new_size); goto out; } |
2788cf0c4 memcg: reparent l... |
465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 |
static void memcg_drain_list_lru_node(struct list_lru_node *nlru, int src_idx, int dst_idx) { struct list_lru_one *src, *dst; /* * Since list_lru_{add,del} may be called under an IRQ-safe lock, * we have to use IRQ-safe primitives here to avoid deadlock. */ spin_lock_irq(&nlru->lock); src = list_lru_from_memcg_idx(nlru, src_idx); dst = list_lru_from_memcg_idx(nlru, dst_idx); list_splice_init(&src->list, &dst->list); dst->nr_items += src->nr_items; src->nr_items = 0; spin_unlock_irq(&nlru->lock); } static void memcg_drain_list_lru(struct list_lru *lru, int src_idx, int dst_idx) { int i; if (!list_lru_memcg_aware(lru)) return; |
145949a13 mm/list_lru.c: re... |
494 |
for_each_node(i) |
2788cf0c4 memcg: reparent l... |
495 496 497 498 499 500 501 502 503 504 505 506 |
memcg_drain_list_lru_node(&lru->node[i], src_idx, dst_idx); } void memcg_drain_all_list_lrus(int src_idx, int dst_idx) { struct list_lru *lru; mutex_lock(&list_lrus_mutex); list_for_each_entry(lru, &list_lrus, list) memcg_drain_list_lru(lru, src_idx, dst_idx); mutex_unlock(&list_lrus_mutex); } |
60d3fd32a list_lru: introdu... |
507 508 509 510 511 512 513 514 515 |
#else static int memcg_init_list_lru(struct list_lru *lru, bool memcg_aware) { return 0; } static void memcg_destroy_list_lru(struct list_lru *lru) { } |
127424c86 mm: memcontrol: m... |
516 |
#endif /* CONFIG_MEMCG && !CONFIG_SLOB */ |
60d3fd32a list_lru: introdu... |
517 518 519 |
int __list_lru_init(struct list_lru *lru, bool memcg_aware, struct lock_class_key *key) |
a38e40824 list: add a new L... |
520 |
{ |
3b1d58a4c list_lru: per-nod... |
521 |
int i; |
5ca302c8e list_lru: dynamic... |
522 |
size_t size = sizeof(*lru->node) * nr_node_ids; |
60d3fd32a list_lru: introdu... |
523 524 525 |
int err = -ENOMEM; memcg_get_cache_ids(); |
5ca302c8e list_lru: dynamic... |
526 527 528 |
lru->node = kzalloc(size, GFP_KERNEL); if (!lru->node) |
60d3fd32a list_lru: introdu... |
529 |
goto out; |
a38e40824 list: add a new L... |
530 |
|
145949a13 mm/list_lru.c: re... |
531 |
for_each_node(i) { |
3b1d58a4c list_lru: per-nod... |
532 |
spin_lock_init(&lru->node[i].lock); |
449dd6984 mm: keep page cac... |
533 534 |
if (key) lockdep_set_class(&lru->node[i].lock, key); |
60d3fd32a list_lru: introdu... |
535 536 537 538 539 540 |
init_one_lru(&lru->node[i].lru); } err = memcg_init_list_lru(lru, memcg_aware); if (err) { kfree(lru->node); |
1bc11d70b mm/list_lru.c: av... |
541 542 |
/* Do this so a list_lru_destroy() doesn't crash: */ lru->node = NULL; |
60d3fd32a list_lru: introdu... |
543 |
goto out; |
3b1d58a4c list_lru: per-nod... |
544 |
} |
60d3fd32a list_lru: introdu... |
545 |
|
c0a5b5609 list_lru: organiz... |
546 |
list_lru_register(lru); |
60d3fd32a list_lru: introdu... |
547 548 549 |
out: memcg_put_cache_ids(); return err; |
a38e40824 list: add a new L... |
550 |
} |
60d3fd32a list_lru: introdu... |
551 |
EXPORT_SYMBOL_GPL(__list_lru_init); |
5ca302c8e list_lru: dynamic... |
552 553 554 |
void list_lru_destroy(struct list_lru *lru) { |
c0a5b5609 list_lru: organiz... |
555 556 557 |
/* Already destroyed or not yet initialized? */ if (!lru->node) return; |
60d3fd32a list_lru: introdu... |
558 559 |
memcg_get_cache_ids(); |
c0a5b5609 list_lru: organiz... |
560 |
list_lru_unregister(lru); |
60d3fd32a list_lru: introdu... |
561 562 |
memcg_destroy_list_lru(lru); |
5ca302c8e list_lru: dynamic... |
563 |
kfree(lru->node); |
c0a5b5609 list_lru: organiz... |
564 |
lru->node = NULL; |
60d3fd32a list_lru: introdu... |
565 566 |
memcg_put_cache_ids(); |
5ca302c8e list_lru: dynamic... |
567 568 |
} EXPORT_SYMBOL_GPL(list_lru_destroy); |