Blame view
mm/workingset.c
20.5 KB
b24413180
|
1 |
// SPDX-License-Identifier: GPL-2.0 |
a528910e1
|
2 3 4 5 6 7 8 9 |
/* * Workingset detection * * Copyright (C) 2013 Red Hat, Inc., Johannes Weiner */ #include <linux/memcontrol.h> #include <linux/writeback.h> |
3a4f8a0b3
|
10 |
#include <linux/shmem_fs.h> |
a528910e1
|
11 12 13 14 |
#include <linux/pagemap.h> #include <linux/atomic.h> #include <linux/module.h> #include <linux/swap.h> |
14b468791
|
15 |
#include <linux/dax.h> |
a528910e1
|
16 17 18 19 20 21 |
#include <linux/fs.h> #include <linux/mm.h> /* * Double CLOCK lists * |
1e6b10857
|
22 |
* Per node, two clock lists are maintained for file pages: the |
a528910e1
|
23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 |
* inactive and the active list. Freshly faulted pages start out at * the head of the inactive list and page reclaim scans pages from the * tail. Pages that are accessed multiple times on the inactive list * are promoted to the active list, to protect them from reclaim, * whereas active pages are demoted to the inactive list when the * active list grows too big. * * fault ------------------------+ * | * +--------------+ | +-------------+ * reclaim <- | inactive | <-+-- demotion | active | <--+ * +--------------+ +-------------+ | * | | * +-------------- promotion ------------------+ * * * Access frequency and refault distance * * A workload is thrashing when its pages are frequently used but they * are evicted from the inactive list every time before another access * would have promoted them to the active list. * * In cases where the average access distance between thrashing pages * is bigger than the size of memory there is nothing that can be * done - the thrashing set could never fit into memory under any * circumstance. * * However, the average access distance could be bigger than the * inactive list, yet smaller than the size of memory. In this case, * the set could fit into memory if it weren't for the currently * active pages - which may be used more, hopefully less frequently: * * +-memory available to cache-+ * | | * +-inactive------+-active----+ * a b | c d e f g h i | J K L M N | * +---------------+-----------+ * * It is prohibitively expensive to accurately track access frequency * of pages. But a reasonable approximation can be made to measure * thrashing on the inactive list, after which refaulting pages can be * activated optimistically to compete with the existing active pages. * * Approximating inactive page access frequency - Observations: * * 1. When a page is accessed for the first time, it is added to the * head of the inactive list, slides every existing inactive page * towards the tail by one slot, and pushes the current tail page * out of memory. * * 2. When a page is accessed for the second time, it is promoted to * the active list, shrinking the inactive list by one slot. This * also slides all inactive pages that were faulted into the cache * more recently than the activated page towards the tail of the * inactive list. * * Thus: * * 1. The sum of evictions and activations between any two points in * time indicate the minimum number of inactive pages accessed in * between. * * 2. Moving one inactive page N page slots towards the tail of the * list requires at least N inactive page accesses. * * Combining these: * * 1. When a page is finally evicted from memory, the number of * inactive pages accessed while the page was in cache is at least * the number of page slots on the inactive list. * * 2. In addition, measuring the sum of evictions and activations (E) * at the time of a page's eviction, and comparing it to another * reading (R) at the time the page faults back into memory tells * the minimum number of accesses while the page was not cached. * This is called the refault distance. * * Because the first access of the page was the fault and the second * access the refault, we combine the in-cache distance with the * out-of-cache distance to get the complete minimum access distance * of this page: * * NR_inactive + (R - E) * * And knowing the minimum access distance of a page, we can easily * tell if the page would be able to stay in cache assuming all page * slots in the cache were available: * * NR_inactive + (R - E) <= NR_inactive + NR_active * * which can be further simplified to * * (R - E) <= NR_active * * Put into words, the refault distance (out-of-cache) can be seen as * a deficit in inactive list space (in-cache). If the inactive list * had (R - E) more page slots, the page would not have been evicted * in between accesses, but activated instead. And on a full system, * the only thing eating into inactive list space is active pages. * * |
1899ad18c
|
124 |
* Refaulting inactive pages |
a528910e1
|
125 126 127 128 129 130 131 132 133 134 135 136 |
* * All that is known about the active list is that the pages have been * accessed more than once in the past. This means that at any given * time there is actually a good chance that pages on the active list * are no longer in active use. * * So when a refault distance of (R - E) is observed and there are at * least (R - E) active pages, the refaulting page is activated * optimistically in the hope that (R - E) active pages are actually * used less frequently than the refaulting page - or even not used at * all anymore. * |
1899ad18c
|
137 138 139 140 |
* That means if inactive cache is refaulting with a suitable refault * distance, we assume the cache workingset is transitioning and put * pressure on the current active list. * |
a528910e1
|
141 142 143 144 145 146 147 |
* If this is wrong and demotion kicks in, the pages which are truly * used more frequently will be reactivated while the less frequently * used once will be evicted from memory. * * But if this is right, the stale pages will be pushed out of memory * and the used pages get to stay in cache. * |
1899ad18c
|
148 149 150 151 152 153 154 155 |
* Refaulting active pages * * If on the other hand the refaulting pages have recently been * deactivated, it means that the active list is no longer protecting * actively used cache from reclaim. The cache is NOT transitioning to * a different workingset; the existing workingset is thrashing in the * space allocated to the page cache. * |
a528910e1
|
156 157 158 |
* * Implementation * |
1e6b10857
|
159 160 |
* For each node's file LRU lists, a counter for inactive evictions * and activations is maintained (node->inactive_age). |
a528910e1
|
161 162 |
* * On eviction, a snapshot of this counter (along with some bits to |
a97e7904c
|
163 |
* identify the node) is stored in the now empty page cache |
a528910e1
|
164 165 166 167 168 |
* slot of the evicted page. This is called a shadow entry. * * On cache misses for which there are shadow entries, an eligible * refault distance will immediately activate the refaulting page. */ |
3159f943a
|
169 |
#define EVICTION_SHIFT ((BITS_PER_LONG - BITS_PER_XA_VALUE) + \ |
1899ad18c
|
170 |
1 + NODES_SHIFT + MEM_CGROUP_ID_SHIFT) |
689c94f03
|
171 |
#define EVICTION_MASK (~0UL >> EVICTION_SHIFT) |
612e44939
|
172 173 |
/* * Eviction timestamps need to be able to cover the full range of |
a97e7904c
|
174 |
* actionable refaults. However, bits are tight in the xarray |
612e44939
|
175 176 177 178 179 180 |
* entry, and after storing the identifier for the lruvec there might * not be enough left to represent every single actionable refault. In * that case, we have to sacrifice granularity for distance, and group * evictions into coarser buckets by shaving off lower timestamp bits. */ static unsigned int bucket_order __read_mostly; |
1899ad18c
|
181 182 |
static void *pack_shadow(int memcgid, pg_data_t *pgdat, unsigned long eviction, bool workingset) |
a528910e1
|
183 |
{ |
612e44939
|
184 |
eviction >>= bucket_order; |
3159f943a
|
185 |
eviction &= EVICTION_MASK; |
23047a96d
|
186 |
eviction = (eviction << MEM_CGROUP_ID_SHIFT) | memcgid; |
1e6b10857
|
187 |
eviction = (eviction << NODES_SHIFT) | pgdat->node_id; |
1899ad18c
|
188 |
eviction = (eviction << 1) | workingset; |
a528910e1
|
189 |
|
3159f943a
|
190 |
return xa_mk_value(eviction); |
a528910e1
|
191 |
} |
1e6b10857
|
192 |
static void unpack_shadow(void *shadow, int *memcgidp, pg_data_t **pgdat, |
1899ad18c
|
193 |
unsigned long *evictionp, bool *workingsetp) |
a528910e1
|
194 |
{ |
3159f943a
|
195 |
unsigned long entry = xa_to_value(shadow); |
1e6b10857
|
196 |
int memcgid, nid; |
1899ad18c
|
197 |
bool workingset; |
a528910e1
|
198 |
|
1899ad18c
|
199 200 |
workingset = entry & 1; entry >>= 1; |
a528910e1
|
201 202 |
nid = entry & ((1UL << NODES_SHIFT) - 1); entry >>= NODES_SHIFT; |
23047a96d
|
203 204 |
memcgid = entry & ((1UL << MEM_CGROUP_ID_SHIFT) - 1); entry >>= MEM_CGROUP_ID_SHIFT; |
a528910e1
|
205 |
|
23047a96d
|
206 |
*memcgidp = memcgid; |
1e6b10857
|
207 |
*pgdat = NODE_DATA(nid); |
612e44939
|
208 |
*evictionp = entry << bucket_order; |
1899ad18c
|
209 |
*workingsetp = workingset; |
a528910e1
|
210 |
} |
b910718a9
|
211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 |
static void advance_inactive_age(struct mem_cgroup *memcg, pg_data_t *pgdat) { /* * Reclaiming a cgroup means reclaiming all its children in a * round-robin fashion. That means that each cgroup has an LRU * order that is composed of the LRU orders of its child * cgroups; and every page has an LRU position not just in the * cgroup that owns it, but in all of that group's ancestors. * * So when the physical inactive list of a leaf cgroup ages, * the virtual inactive lists of all its parents, including * the root cgroup's, age as well. */ do { struct lruvec *lruvec; lruvec = mem_cgroup_lruvec(memcg, pgdat); atomic_long_inc(&lruvec->inactive_age); } while (memcg && (memcg = parent_mem_cgroup(memcg))); } |
a528910e1
|
231 232 |
/** * workingset_eviction - note the eviction of a page from memory |
b910718a9
|
233 |
* @target_memcg: the cgroup that is causing the reclaim |
a528910e1
|
234 235 |
* @page: the page being evicted * |
a7ca12f9d
|
236 |
* Returns a shadow entry to be stored in @page->mapping->i_pages in place |
a528910e1
|
237 238 |
* of the evicted @page so that a later refault can be detected. */ |
b910718a9
|
239 |
void *workingset_eviction(struct page *page, struct mem_cgroup *target_memcg) |
a528910e1
|
240 |
{ |
1e6b10857
|
241 |
struct pglist_data *pgdat = page_pgdat(page); |
a528910e1
|
242 |
unsigned long eviction; |
23047a96d
|
243 |
struct lruvec *lruvec; |
b910718a9
|
244 |
int memcgid; |
a528910e1
|
245 |
|
23047a96d
|
246 247 248 249 |
/* Page is fully exclusive and pins page->mem_cgroup */ VM_BUG_ON_PAGE(PageLRU(page), page); VM_BUG_ON_PAGE(page_count(page), page); VM_BUG_ON_PAGE(!PageLocked(page), page); |
b910718a9
|
250 251 252 253 254 255 |
advance_inactive_age(page_memcg(page), pgdat); lruvec = mem_cgroup_lruvec(target_memcg, pgdat); /* XXX: target_memcg can be NULL, go through lruvec */ memcgid = mem_cgroup_id(lruvec_memcg(lruvec)); eviction = atomic_long_read(&lruvec->inactive_age); |
1899ad18c
|
256 |
return pack_shadow(memcgid, pgdat, eviction, PageWorkingset(page)); |
a528910e1
|
257 258 259 260 |
} /** * workingset_refault - evaluate the refault of a previously evicted page |
1899ad18c
|
261 |
* @page: the freshly allocated replacement page |
a528910e1
|
262 263 264 |
* @shadow: shadow entry of the evicted page * * Calculates and evaluates the refault distance of the previously |
b910718a9
|
265 266 |
* evicted page in the context of the node and the memcg whose memory * pressure caused the eviction. |
a528910e1
|
267 |
*/ |
1899ad18c
|
268 |
void workingset_refault(struct page *page, void *shadow) |
a528910e1
|
269 |
{ |
b910718a9
|
270 271 |
struct mem_cgroup *eviction_memcg; struct lruvec *eviction_lruvec; |
a528910e1
|
272 |
unsigned long refault_distance; |
1899ad18c
|
273 |
struct pglist_data *pgdat; |
23047a96d
|
274 275 |
unsigned long active_file; struct mem_cgroup *memcg; |
162453bfb
|
276 |
unsigned long eviction; |
23047a96d
|
277 |
struct lruvec *lruvec; |
162453bfb
|
278 |
unsigned long refault; |
1899ad18c
|
279 |
bool workingset; |
23047a96d
|
280 |
int memcgid; |
a528910e1
|
281 |
|
1899ad18c
|
282 |
unpack_shadow(shadow, &memcgid, &pgdat, &eviction, &workingset); |
162453bfb
|
283 |
|
23047a96d
|
284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 |
rcu_read_lock(); /* * Look up the memcg associated with the stored ID. It might * have been deleted since the page's eviction. * * Note that in rare events the ID could have been recycled * for a new cgroup that refaults a shared page. This is * impossible to tell from the available data. However, this * should be a rare and limited disturbance, and activations * are always speculative anyway. Ultimately, it's the aging * algorithm's job to shake out the minimum access frequency * for the active cache. * * XXX: On !CONFIG_MEMCG, this will always return NULL; it * would be better if the root_mem_cgroup existed in all * configurations instead. */ |
b910718a9
|
301 302 |
eviction_memcg = mem_cgroup_from_id(memcgid); if (!mem_cgroup_disabled() && !eviction_memcg) |
1899ad18c
|
303 |
goto out; |
b910718a9
|
304 305 306 |
eviction_lruvec = mem_cgroup_lruvec(eviction_memcg, pgdat); refault = atomic_long_read(&eviction_lruvec->inactive_age); active_file = lruvec_page_state(eviction_lruvec, NR_ACTIVE_FILE); |
162453bfb
|
307 308 |
/* |
1899ad18c
|
309 |
* Calculate the refault distance |
162453bfb
|
310 |
* |
1899ad18c
|
311 312 313 314 315 316 317 318 319 320 321 322 |
* The unsigned subtraction here gives an accurate distance * across inactive_age overflows in most cases. There is a * special case: usually, shadow entries have a short lifetime * and are either refaulted or reclaimed along with the inode * before they get too old. But it is not impossible for the * inactive_age to lap a shadow entry in the field, which can * then result in a false small refault distance, leading to a * false activation should this old entry actually refault * again. However, earlier kernels used to deactivate * unconditionally with *every* reclaim invocation for the * longest time, so the occasional inappropriate activation * leading to pressure on the active list is not a problem. |
162453bfb
|
323 324 |
*/ refault_distance = (refault - eviction) & EVICTION_MASK; |
b910718a9
|
325 326 327 328 329 330 331 332 333 334 |
/* * The activation decision for this page is made at the level * where the eviction occurred, as that is where the LRU order * during page reclaim is being determined. * * However, the cgroup that will own the page is the one that * is actually experiencing the refault event. */ memcg = page_memcg(page); lruvec = mem_cgroup_lruvec(memcg, pgdat); |
00f3ca2c2
|
335 |
inc_lruvec_state(lruvec, WORKINGSET_REFAULT); |
a528910e1
|
336 |
|
1899ad18c
|
337 338 339 340 341 342 343 344 345 |
/* * Compare the distance to the existing workingset size. We * don't act on pages that couldn't stay resident even if all * the memory was available to the page cache. */ if (refault_distance > active_file) goto out; SetPageActive(page); |
b910718a9
|
346 |
advance_inactive_age(memcg, pgdat); |
1899ad18c
|
347 348 349 350 351 352 |
inc_lruvec_state(lruvec, WORKINGSET_ACTIVATE); /* Page was active prior to eviction */ if (workingset) { SetPageWorkingset(page); inc_lruvec_state(lruvec, WORKINGSET_RESTORE); |
a528910e1
|
353 |
} |
1899ad18c
|
354 |
out: |
2a2e48854
|
355 |
rcu_read_unlock(); |
a528910e1
|
356 357 358 359 360 361 362 363 |
} /** * workingset_activation - note a page activation * @page: page that is being activated */ void workingset_activation(struct page *page) { |
55779ec75
|
364 |
struct mem_cgroup *memcg; |
23047a96d
|
365 |
|
55779ec75
|
366 |
rcu_read_lock(); |
23047a96d
|
367 368 369 370 371 372 373 |
/* * Filter non-memcg pages here, e.g. unmap can call * mark_page_accessed() on VDSO pages. * * XXX: See workingset_refault() - this should return * root_mem_cgroup even for !CONFIG_MEMCG. */ |
55779ec75
|
374 375 |
memcg = page_memcg_rcu(page); if (!mem_cgroup_disabled() && !memcg) |
23047a96d
|
376 |
goto out; |
b910718a9
|
377 |
advance_inactive_age(memcg, page_pgdat(page)); |
23047a96d
|
378 |
out: |
55779ec75
|
379 |
rcu_read_unlock(); |
a528910e1
|
380 |
} |
449dd6984
|
381 382 383 384 385 386 387 388 389 390 391 392 |
/* * Shadow entries reflect the share of the working set that does not * fit into memory, so their number depends on the access pattern of * the workload. In most cases, they will refault or get reclaimed * along with the inode, but a (malicious) workload that streams * through files with a total size several times that of available * memory, while preventing the inodes from being reclaimed, can * create excessive amounts of shadow nodes. To keep a lid on this, * track shadow nodes and reclaim them when they grow way past the * point where they would still be useful. */ |
14b468791
|
393 |
static struct list_lru shadow_nodes; |
a97e7904c
|
394 |
void workingset_update_node(struct xa_node *node) |
14b468791
|
395 |
{ |
14b468791
|
396 397 398 399 400 401 |
/* * Track non-empty nodes that contain only shadow entries; * unlink those that contain pages or are being freed. * * Avoid acquiring the list_lru lock when the nodes are * already where they should be. The list_empty() test is safe |
b93b01631
|
402 |
* as node->private_list is protected by the i_pages lock. |
14b468791
|
403 |
*/ |
68d48e6a2
|
404 |
VM_WARN_ON_ONCE(!irqs_disabled()); /* For __inc_lruvec_page_state */ |
01959dfe7
|
405 |
if (node->count && node->count == node->nr_values) { |
68d48e6a2
|
406 |
if (list_empty(&node->private_list)) { |
14b468791
|
407 |
list_lru_add(&shadow_nodes, &node->private_list); |
ec9f02384
|
408 |
__inc_lruvec_slab_state(node, WORKINGSET_NODES); |
68d48e6a2
|
409 |
} |
14b468791
|
410 |
} else { |
68d48e6a2
|
411 |
if (!list_empty(&node->private_list)) { |
14b468791
|
412 |
list_lru_del(&shadow_nodes, &node->private_list); |
ec9f02384
|
413 |
__dec_lruvec_slab_state(node, WORKINGSET_NODES); |
68d48e6a2
|
414 |
} |
14b468791
|
415 416 |
} } |
449dd6984
|
417 418 419 420 |
static unsigned long count_shadow_nodes(struct shrinker *shrinker, struct shrink_control *sc) { |
449dd6984
|
421 |
unsigned long max_nodes; |
14b468791
|
422 |
unsigned long nodes; |
95f9ab2d5
|
423 |
unsigned long pages; |
449dd6984
|
424 |
|
14b468791
|
425 |
nodes = list_lru_shrink_count(&shadow_nodes, sc); |
449dd6984
|
426 |
|
449dd6984
|
427 |
/* |
a97e7904c
|
428 |
* Approximate a reasonable limit for the nodes |
b53889987
|
429 430 431 432 433 434 435 436 437 438 439 440 441 |
* containing shadow entries. We don't need to keep more * shadow entries than possible pages on the active list, * since refault distances bigger than that are dismissed. * * The size of the active list converges toward 100% of * overall page cache as memory grows, with only a tiny * inactive list. Assume the total cache size for that. * * Nodes might be sparsely populated, with only one shadow * entry in the extreme case. Obviously, we cannot keep one * node for every eligible shadow entry, so compromise on a * worst-case density of 1/8th. Below that, not all eligible * refaults can be detected anymore. |
449dd6984
|
442 |
* |
a97e7904c
|
443 |
* On 64-bit with 7 xa_nodes per page and 64 slots |
449dd6984
|
444 |
* each, this will reclaim shadow entries when they consume |
b53889987
|
445 |
* ~1.8% of available memory: |
449dd6984
|
446 |
* |
a97e7904c
|
447 |
* PAGE_SIZE / xa_nodes / node_entries * 8 / PAGE_SIZE |
449dd6984
|
448 |
*/ |
95f9ab2d5
|
449 |
#ifdef CONFIG_MEMCG |
b53889987
|
450 |
if (sc->memcg) { |
95f9ab2d5
|
451 |
struct lruvec *lruvec; |
2b487e59f
|
452 |
int i; |
95f9ab2d5
|
453 |
|
867e5e1de
|
454 |
lruvec = mem_cgroup_lruvec(sc->memcg, NODE_DATA(sc->nid)); |
2b487e59f
|
455 |
for (pages = 0, i = 0; i < NR_LRU_LISTS; i++) |
205b20cc5
|
456 457 458 459 |
pages += lruvec_page_state_local(lruvec, NR_LRU_BASE + i); pages += lruvec_page_state_local(lruvec, NR_SLAB_RECLAIMABLE); pages += lruvec_page_state_local(lruvec, NR_SLAB_UNRECLAIMABLE); |
95f9ab2d5
|
460 461 462 |
} else #endif pages = node_present_pages(sc->nid); |
dad4f140e
|
463 |
max_nodes = pages >> (XA_CHUNK_SHIFT - 3); |
449dd6984
|
464 |
|
9b996468c
|
465 466 |
if (!nodes) return SHRINK_EMPTY; |
14b468791
|
467 |
if (nodes <= max_nodes) |
449dd6984
|
468 |
return 0; |
14b468791
|
469 |
return nodes - max_nodes; |
449dd6984
|
470 471 472 |
} static enum lru_status shadow_lru_isolate(struct list_head *item, |
3f97b1632
|
473 |
struct list_lru_one *lru, |
449dd6984
|
474 |
spinlock_t *lru_lock, |
a97e7904c
|
475 |
void *arg) __must_hold(lru_lock) |
449dd6984
|
476 |
{ |
a97e7904c
|
477 478 |
struct xa_node *node = container_of(item, struct xa_node, private_list); XA_STATE(xas, node->array, 0); |
449dd6984
|
479 |
struct address_space *mapping; |
449dd6984
|
480 481 482 483 |
int ret; /* * Page cache insertions and deletions synchroneously maintain |
b93b01631
|
484 |
* the shadow node LRU under the i_pages lock and the |
449dd6984
|
485 486 |
* lru_lock. Because the page cache tree is emptied before * the inode can be destroyed, holding the lru_lock pins any |
a97e7904c
|
487 |
* address_space that has nodes on the LRU. |
449dd6984
|
488 |
* |
b93b01631
|
489 |
* We can then safely transition to the i_pages lock to |
449dd6984
|
490 491 492 |
* pin only the address_space of the particular node we want * to reclaim, take the node off-LRU, and drop the lru_lock. */ |
01959dfe7
|
493 |
mapping = container_of(node->array, struct address_space, i_pages); |
449dd6984
|
494 495 |
/* Coming from the list, invert the lock order */ |
b93b01631
|
496 |
if (!xa_trylock(&mapping->i_pages)) { |
6ca342d02
|
497 |
spin_unlock_irq(lru_lock); |
449dd6984
|
498 499 500 |
ret = LRU_RETRY; goto out; } |
3f97b1632
|
501 |
list_lru_isolate(lru, item); |
ec9f02384
|
502 |
__dec_lruvec_slab_state(node, WORKINGSET_NODES); |
68d48e6a2
|
503 |
|
449dd6984
|
504 505 506 507 508 509 510 |
spin_unlock(lru_lock); /* * The nodes should only contain one or more shadow entries, * no pages, so we expect to be able to remove them all and * delete and free the empty node afterwards. */ |
01959dfe7
|
511 |
if (WARN_ON_ONCE(!node->nr_values)) |
b936887e8
|
512 |
goto out_invalid; |
01959dfe7
|
513 |
if (WARN_ON_ONCE(node->count != node->nr_values)) |
b936887e8
|
514 |
goto out_invalid; |
a97e7904c
|
515 516 517 518 519 520 521 522 523 524 |
mapping->nrexceptional -= node->nr_values; xas.xa_node = xa_parent_locked(&mapping->i_pages, node); xas.xa_offset = node->offset; xas.xa_shift = node->shift + XA_CHUNK_SHIFT; xas_set_update(&xas, workingset_update_node); /* * We could store a shadow entry here which was the minimum of the * shadow entries we were tracking ... */ xas_store(&xas, NULL); |
ec9f02384
|
525 |
__inc_lruvec_slab_state(node, WORKINGSET_NODERECLAIM); |
449dd6984
|
526 |
|
b936887e8
|
527 |
out_invalid: |
6ca342d02
|
528 |
xa_unlock_irq(&mapping->i_pages); |
449dd6984
|
529 530 |
ret = LRU_REMOVED_RETRY; out: |
449dd6984
|
531 |
cond_resched(); |
6ca342d02
|
532 |
spin_lock_irq(lru_lock); |
449dd6984
|
533 534 535 536 537 538 |
return ret; } static unsigned long scan_shadow_nodes(struct shrinker *shrinker, struct shrink_control *sc) { |
b93b01631
|
539 |
/* list_lru lock nests inside the IRQ-safe i_pages lock */ |
6b51e8819
|
540 541 |
return list_lru_shrink_walk_irq(&shadow_nodes, sc, shadow_lru_isolate, NULL); |
449dd6984
|
542 543 544 545 546 |
} static struct shrinker workingset_shadow_shrinker = { .count_objects = count_shadow_nodes, .scan_objects = scan_shadow_nodes, |
4b85afbda
|
547 |
.seeks = 0, /* ->count reports only fully expendable nodes */ |
0a6b76dd2
|
548 |
.flags = SHRINKER_NUMA_AWARE | SHRINKER_MEMCG_AWARE, |
449dd6984
|
549 550 551 552 |
}; /* * Our list_lru->lock is IRQ-safe as it nests inside the IRQ-safe |
b93b01631
|
553 |
* i_pages lock. |
449dd6984
|
554 555 556 557 558 |
*/ static struct lock_class_key shadow_nodes_key; static int __init workingset_init(void) { |
612e44939
|
559 560 |
unsigned int timestamp_bits; unsigned int max_order; |
449dd6984
|
561 |
int ret; |
612e44939
|
562 563 564 565 566 567 568 569 570 |
BUILD_BUG_ON(BITS_PER_LONG < EVICTION_SHIFT); /* * Calculate the eviction bucket size to cover the longest * actionable refault distance, which is currently half of * memory (totalram_pages/2). However, memory hotplug may add * some more pages at runtime, so keep working with up to * double the initial memory by using totalram_pages as-is. */ timestamp_bits = BITS_PER_LONG - EVICTION_SHIFT; |
ca79b0c21
|
571 |
max_order = fls_long(totalram_pages() - 1); |
612e44939
|
572 573 |
if (max_order > timestamp_bits) bucket_order = max_order - timestamp_bits; |
d3d36c4b5
|
574 575 |
pr_info("workingset: timestamp_bits=%d max_order=%d bucket_order=%u ", |
612e44939
|
576 |
timestamp_bits, max_order, bucket_order); |
39887653a
|
577 |
ret = prealloc_shrinker(&workingset_shadow_shrinker); |
449dd6984
|
578 579 |
if (ret) goto err; |
c92e8e10c
|
580 581 |
ret = __list_lru_init(&shadow_nodes, true, &shadow_nodes_key, &workingset_shadow_shrinker); |
449dd6984
|
582 583 |
if (ret) goto err_list_lru; |
39887653a
|
584 |
register_shrinker_prepared(&workingset_shadow_shrinker); |
449dd6984
|
585 586 |
return 0; err_list_lru: |
39887653a
|
587 |
free_prealloced_shrinker(&workingset_shadow_shrinker); |
449dd6984
|
588 589 590 591 |
err: return ret; } module_init(workingset_init); |