Blame view
mm/swap_state.c
13.1 KB
1da177e4c
|
1 2 3 4 5 6 7 8 |
/* * linux/mm/swap_state.c * * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds * Swap reorganised 29.12.95, Stephen Tweedie * * Rewritten to use page cache, (C) 1998 Stephen Tweedie */ |
1da177e4c
|
9 |
#include <linux/mm.h> |
5a0e3ad6a
|
10 |
#include <linux/gfp.h> |
1da177e4c
|
11 12 |
#include <linux/kernel_stat.h> #include <linux/swap.h> |
46017e954
|
13 |
#include <linux/swapops.h> |
1da177e4c
|
14 15 |
#include <linux/init.h> #include <linux/pagemap.h> |
1da177e4c
|
16 |
#include <linux/backing-dev.h> |
3fb5c298b
|
17 |
#include <linux/blkdev.h> |
c484d4104
|
18 |
#include <linux/pagevec.h> |
b20a35035
|
19 |
#include <linux/migrate.h> |
1da177e4c
|
20 21 22 23 24 |
#include <asm/pgtable.h> /* * swapper_space is a fiction, retained to simplify the path through |
7eaceacca
|
25 |
* vmscan's shrink_page_list. |
1da177e4c
|
26 |
*/ |
f5e54d6e5
|
27 |
static const struct address_space_operations swap_aops = { |
1da177e4c
|
28 |
.writepage = swap_writepage, |
62c230bc1
|
29 |
.set_page_dirty = swap_set_page_dirty, |
1c93923cc
|
30 |
#ifdef CONFIG_MIGRATION |
e965f9630
|
31 |
.migratepage = migrate_page, |
1c93923cc
|
32 |
#endif |
1da177e4c
|
33 |
}; |
33806f06d
|
34 35 36 |
struct address_space swapper_spaces[MAX_SWAPFILES] = { [0 ... MAX_SWAPFILES - 1] = { .page_tree = RADIX_TREE_INIT(GFP_ATOMIC|__GFP_NOWARN), |
4bb5f5d93
|
37 |
.i_mmap_writable = ATOMIC_INIT(0), |
33806f06d
|
38 |
.a_ops = &swap_aops, |
33806f06d
|
39 |
} |
1da177e4c
|
40 |
}; |
1da177e4c
|
41 42 43 44 45 46 47 48 |
#define INC_CACHE_INFO(x) do { swap_cache_info.x++; } while (0) static struct { unsigned long add_total; unsigned long del_total; unsigned long find_success; unsigned long find_total; |
1da177e4c
|
49 |
} swap_cache_info; |
33806f06d
|
50 51 52 53 54 55 56 57 58 |
unsigned long total_swapcache_pages(void) { int i; unsigned long ret = 0; for (i = 0; i < MAX_SWAPFILES; i++) ret += swapper_spaces[i].nrpages; return ret; } |
579f82901
|
59 |
static atomic_t swapin_readahead_hits = ATOMIC_INIT(4); |
1da177e4c
|
60 61 |
void show_swap_cache_info(void) { |
33806f06d
|
62 63 |
printk("%lu pages in swap cache ", total_swapcache_pages()); |
2c97b7fc0
|
64 65 |
printk("Swap cache stats: add %lu, delete %lu, find %lu/%lu ", |
1da177e4c
|
66 |
swap_cache_info.add_total, swap_cache_info.del_total, |
bb63be0a0
|
67 |
swap_cache_info.find_success, swap_cache_info.find_total); |
ec8acf20a
|
68 69 70 |
printk("Free swap = %ldkB ", get_nr_swap_pages() << (PAGE_SHIFT - 10)); |
1da177e4c
|
71 72 73 74 75 |
printk("Total swap = %lukB ", total_swap_pages << (PAGE_SHIFT - 10)); } /* |
31a563962
|
76 |
* __add_to_swap_cache resembles add_to_page_cache_locked on swapper_space, |
1da177e4c
|
77 78 |
* but sets SwapCache flag and private instead of mapping and index. */ |
2f772e6ca
|
79 |
int __add_to_swap_cache(struct page *page, swp_entry_t entry) |
1da177e4c
|
80 81 |
{ int error; |
33806f06d
|
82 |
struct address_space *address_space; |
1da177e4c
|
83 |
|
309381fea
|
84 85 86 |
VM_BUG_ON_PAGE(!PageLocked(page), page); VM_BUG_ON_PAGE(PageSwapCache(page), page); VM_BUG_ON_PAGE(!PageSwapBacked(page), page); |
51726b122
|
87 |
|
09cbfeaf1
|
88 |
get_page(page); |
31a563962
|
89 90 |
SetPageSwapCache(page); set_page_private(page, entry.val); |
33806f06d
|
91 92 93 94 |
address_space = swap_address_space(entry); spin_lock_irq(&address_space->tree_lock); error = radix_tree_insert(&address_space->page_tree, entry.val, page); |
31a563962
|
95 |
if (likely(!error)) { |
33806f06d
|
96 |
address_space->nrpages++; |
11fb99898
|
97 |
__inc_node_page_state(page, NR_FILE_PAGES); |
31a563962
|
98 99 |
INC_CACHE_INFO(add_total); } |
33806f06d
|
100 |
spin_unlock_irq(&address_space->tree_lock); |
31a563962
|
101 102 |
if (unlikely(error)) { |
2ca4532a4
|
103 104 105 106 107 108 |
/* * Only the context which have set SWAP_HAS_CACHE flag * would call add_to_swap_cache(). * So add_to_swap_cache() doesn't returns -EEXIST. */ VM_BUG_ON(error == -EEXIST); |
31a563962
|
109 110 |
set_page_private(page, 0UL); ClearPageSwapCache(page); |
09cbfeaf1
|
111 |
put_page(page); |
31a563962
|
112 113 114 115 116 117 118 119 120 |
} return error; } int add_to_swap_cache(struct page *page, swp_entry_t entry, gfp_t gfp_mask) { int error; |
5e4c0d974
|
121 |
error = radix_tree_maybe_preload(gfp_mask); |
35c754d79
|
122 |
if (!error) { |
31a563962
|
123 |
error = __add_to_swap_cache(page, entry); |
1da177e4c
|
124 |
radix_tree_preload_end(); |
fa1de9008
|
125 |
} |
1da177e4c
|
126 127 |
return error; } |
1da177e4c
|
128 129 130 131 132 133 |
/* * This must be called only on pages that have * been verified to be in the swap cache. */ void __delete_from_swap_cache(struct page *page) { |
33806f06d
|
134 135 |
swp_entry_t entry; struct address_space *address_space; |
309381fea
|
136 137 138 |
VM_BUG_ON_PAGE(!PageLocked(page), page); VM_BUG_ON_PAGE(!PageSwapCache(page), page); VM_BUG_ON_PAGE(PageWriteback(page), page); |
1da177e4c
|
139 |
|
33806f06d
|
140 141 142 |
entry.val = page_private(page); address_space = swap_address_space(entry); radix_tree_delete(&address_space->page_tree, page_private(page)); |
4c21e2f24
|
143 |
set_page_private(page, 0); |
1da177e4c
|
144 |
ClearPageSwapCache(page); |
33806f06d
|
145 |
address_space->nrpages--; |
11fb99898
|
146 |
__dec_node_page_state(page, NR_FILE_PAGES); |
1da177e4c
|
147 148 149 150 151 152 153 154 155 156 |
INC_CACHE_INFO(del_total); } /** * add_to_swap - allocate swap space for a page * @page: page we want to move to swap * * Allocate swap space for the page and add the page to the * swap cache. Caller needs to hold the page lock. */ |
5bc7b8aca
|
157 |
int add_to_swap(struct page *page, struct list_head *list) |
1da177e4c
|
158 159 |
{ swp_entry_t entry; |
1da177e4c
|
160 |
int err; |
309381fea
|
161 162 |
VM_BUG_ON_PAGE(!PageLocked(page), page); VM_BUG_ON_PAGE(!PageUptodate(page), page); |
1da177e4c
|
163 |
|
2ca4532a4
|
164 165 166 |
entry = get_swap_page(); if (!entry.val) return 0; |
37e843511
|
167 168 169 170 |
if (mem_cgroup_try_charge_swap(page, entry)) { swapcache_free(entry); return 0; } |
3f04f62f9
|
171 |
if (unlikely(PageTransHuge(page))) |
5bc7b8aca
|
172 |
if (unlikely(split_huge_page_to_list(page, list))) { |
0a31bc97c
|
173 |
swapcache_free(entry); |
3f04f62f9
|
174 175 |
return 0; } |
2ca4532a4
|
176 177 178 179 180 181 182 183 184 |
/* * Radix-tree node allocations from PF_MEMALLOC contexts could * completely exhaust the page allocator. __GFP_NOMEMALLOC * stops emergency reserves from being allocated. * * TODO: this could cause a theoretical memory reclaim * deadlock in the swap out path. */ /* |
854e9ed09
|
185 |
* Add it to the swap cache. |
2ca4532a4
|
186 187 188 |
*/ err = add_to_swap_cache(page, entry, __GFP_HIGH|__GFP_NOMEMALLOC|__GFP_NOWARN); |
854e9ed09
|
189 |
if (!err) { |
2ca4532a4
|
190 191 |
return 1; } else { /* -ENOMEM radix-tree allocation failure */ |
bd53b714d
|
192 |
/* |
2ca4532a4
|
193 194 |
* add_to_swap_cache() doesn't return -EEXIST, so we can safely * clear SWAP_HAS_CACHE flag. |
1da177e4c
|
195 |
*/ |
0a31bc97c
|
196 |
swapcache_free(entry); |
2ca4532a4
|
197 |
return 0; |
1da177e4c
|
198 199 200 201 202 203 204 205 206 207 208 209 |
} } /* * This must be called only on pages that have * been verified to be in the swap cache and locked. * It will never put the page into the free list, * the caller has a reference on the page. */ void delete_from_swap_cache(struct page *page) { swp_entry_t entry; |
33806f06d
|
210 |
struct address_space *address_space; |
1da177e4c
|
211 |
|
4c21e2f24
|
212 |
entry.val = page_private(page); |
1da177e4c
|
213 |
|
33806f06d
|
214 215 |
address_space = swap_address_space(entry); spin_lock_irq(&address_space->tree_lock); |
1da177e4c
|
216 |
__delete_from_swap_cache(page); |
33806f06d
|
217 |
spin_unlock_irq(&address_space->tree_lock); |
1da177e4c
|
218 |
|
0a31bc97c
|
219 |
swapcache_free(entry); |
09cbfeaf1
|
220 |
put_page(page); |
1da177e4c
|
221 |
} |
1da177e4c
|
222 223 224 225 |
/* * If we are the only user, then try to free up the swap cache. * * Its ok to check for PageSwapCache without the page lock |
a2c43eed8
|
226 227 |
* here because we are going to recheck again inside * try_to_free_swap() _with_ the lock. |
1da177e4c
|
228 229 230 231 |
* - Marcelo */ static inline void free_swap_cache(struct page *page) { |
a2c43eed8
|
232 233 |
if (PageSwapCache(page) && !page_mapped(page) && trylock_page(page)) { try_to_free_swap(page); |
1da177e4c
|
234 235 236 237 238 239 |
unlock_page(page); } } /* * Perform a free_page(), also freeing any swap cache associated with |
b8072f099
|
240 |
* this page if it is the last user of the page. |
1da177e4c
|
241 242 243 244 |
*/ void free_page_and_swap_cache(struct page *page) { free_swap_cache(page); |
770a53702
|
245 246 247 248 |
if (is_huge_zero_page(page)) put_huge_zero_page(); else put_page(page); |
1da177e4c
|
249 250 251 252 253 254 255 256 |
} /* * Passed an array of pages, drop them all from swapcache and then release * them. They are removed from the LRU and freed if this is their last use. */ void free_pages_and_swap_cache(struct page **pages, int nr) { |
1da177e4c
|
257 |
struct page **pagep = pages; |
aabfb5729
|
258 |
int i; |
1da177e4c
|
259 260 |
lru_add_drain(); |
aabfb5729
|
261 262 263 |
for (i = 0; i < nr; i++) free_swap_cache(pagep[i]); release_pages(pagep, nr, false); |
1da177e4c
|
264 265 266 267 268 269 270 271 272 273 274 |
} /* * Lookup a swap entry in the swap cache. A found page will be returned * unlocked and with its refcount incremented - we rely on the kernel * lock getting page table operations atomic even if we drop the page * lock before returning. */ struct page * lookup_swap_cache(swp_entry_t entry) { struct page *page; |
33806f06d
|
275 |
page = find_get_page(swap_address_space(entry), entry.val); |
1da177e4c
|
276 |
|
579f82901
|
277 |
if (page) { |
1da177e4c
|
278 |
INC_CACHE_INFO(find_success); |
579f82901
|
279 280 281 |
if (TestClearPageReadahead(page)) atomic_inc(&swapin_readahead_hits); } |
1da177e4c
|
282 283 284 285 |
INC_CACHE_INFO(find_total); return page; } |
5b999aadb
|
286 287 288 |
struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, struct vm_area_struct *vma, unsigned long addr, bool *new_page_allocated) |
1da177e4c
|
289 290 |
{ struct page *found_page, *new_page = NULL; |
5b999aadb
|
291 |
struct address_space *swapper_space = swap_address_space(entry); |
1da177e4c
|
292 |
int err; |
5b999aadb
|
293 |
*new_page_allocated = false; |
1da177e4c
|
294 295 296 297 298 299 300 |
do { /* * First check the swap cache. Since this is normally * called after lookup_swap_cache() failed, re-calling * that would confuse statistics. */ |
5b999aadb
|
301 |
found_page = find_get_page(swapper_space, entry.val); |
1da177e4c
|
302 303 304 305 306 307 308 |
if (found_page) break; /* * Get a new page to read into from swap. */ if (!new_page) { |
02098feaa
|
309 |
new_page = alloc_page_vma(gfp_mask, vma, addr); |
1da177e4c
|
310 311 312 313 314 |
if (!new_page) break; /* Out of memory */ } /* |
31a563962
|
315 316 |
* call radix_tree_preload() while we can wait. */ |
5e4c0d974
|
317 |
err = radix_tree_maybe_preload(gfp_mask & GFP_KERNEL); |
31a563962
|
318 319 320 321 |
if (err) break; /* |
f000944d0
|
322 323 |
* Swap entry may have been freed since our caller observed it. */ |
355cfa73d
|
324 |
err = swapcache_prepare(entry); |
cbab0e4ee
|
325 |
if (err == -EEXIST) { |
31a563962
|
326 |
radix_tree_preload_end(); |
cbab0e4ee
|
327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 |
/* * We might race against get_swap_page() and stumble * across a SWAP_HAS_CACHE swap_map entry whose page * has not been brought into the swapcache yet, while * the other end is scheduled away waiting on discard * I/O completion at scan_swap_map(). * * In order to avoid turning this transitory state * into a permanent loop around this -EEXIST case * if !CONFIG_PREEMPT and the I/O completion happens * to be waiting on the CPU waitqueue where we are now * busy looping, we just conditionally invoke the * scheduler here, if there are some more important * tasks to run. */ cond_resched(); |
355cfa73d
|
343 |
continue; |
31a563962
|
344 345 346 |
} if (err) { /* swp entry is obsolete ? */ radix_tree_preload_end(); |
f000944d0
|
347 |
break; |
31a563962
|
348 |
} |
f000944d0
|
349 |
|
2ca4532a4
|
350 |
/* May fail (-ENOMEM) if radix-tree node allocation failed. */ |
48c935ad8
|
351 |
__SetPageLocked(new_page); |
fa9949da5
|
352 |
__SetPageSwapBacked(new_page); |
31a563962
|
353 |
err = __add_to_swap_cache(new_page, entry); |
529ae9aaa
|
354 |
if (likely(!err)) { |
31a563962
|
355 |
radix_tree_preload_end(); |
1da177e4c
|
356 357 358 |
/* * Initiate read into locked page and return. */ |
c5fdae469
|
359 |
lru_cache_add_anon(new_page); |
5b999aadb
|
360 |
*new_page_allocated = true; |
1da177e4c
|
361 362 |
return new_page; } |
31a563962
|
363 |
radix_tree_preload_end(); |
48c935ad8
|
364 |
__ClearPageLocked(new_page); |
2ca4532a4
|
365 366 367 368 |
/* * add_to_swap_cache() doesn't return -EEXIST, so we can safely * clear SWAP_HAS_CACHE flag. */ |
0a31bc97c
|
369 |
swapcache_free(entry); |
f000944d0
|
370 |
} while (err != -ENOMEM); |
1da177e4c
|
371 372 |
if (new_page) |
09cbfeaf1
|
373 |
put_page(new_page); |
1da177e4c
|
374 375 |
return found_page; } |
46017e954
|
376 |
|
5b999aadb
|
377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 |
/* * Locate a page of swap in physical memory, reserving swap cache space * and reading the disk if it is not already cached. * A failure return means that either the page allocation failed or that * the swap entry is no longer in use. */ struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, struct vm_area_struct *vma, unsigned long addr) { bool page_was_allocated; struct page *retpage = __read_swap_cache_async(entry, gfp_mask, vma, addr, &page_was_allocated); if (page_was_allocated) swap_readpage(retpage); return retpage; } |
579f82901
|
395 396 397 398 399 |
static unsigned long swapin_nr_pages(unsigned long offset) { static unsigned long prev_offset; unsigned int pages, max_pages, last_ra; static atomic_t last_readahead_pages; |
4db0c3c29
|
400 |
max_pages = 1 << READ_ONCE(page_cluster); |
579f82901
|
401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 |
if (max_pages <= 1) return 1; /* * This heuristic has been found to work well on both sequential and * random loads, swapping to hard disk or to SSD: please don't ask * what the "+ 2" means, it just happens to work well, that's all. */ pages = atomic_xchg(&swapin_readahead_hits, 0) + 2; if (pages == 2) { /* * We can have no readahead hits to judge by: but must not get * stuck here forever, so check for an adjacent offset instead * (and don't even bother to check whether swap type is same). */ if (offset != prev_offset + 1 && offset != prev_offset - 1) pages = 1; prev_offset = offset; } else { unsigned int roundup = 4; while (roundup < pages) roundup <<= 1; pages = roundup; } if (pages > max_pages) pages = max_pages; /* Don't shrink readahead too fast */ last_ra = atomic_read(&last_readahead_pages) / 2; if (pages < last_ra) pages = last_ra; atomic_set(&last_readahead_pages, pages); return pages; } |
46017e954
|
437 438 439 |
/** * swapin_readahead - swap in pages in hope we need them soon * @entry: swap entry of this memory |
7682486b3
|
440 |
* @gfp_mask: memory allocation flags |
46017e954
|
441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 |
* @vma: user vma this address belongs to * @addr: target address for mempolicy * * Returns the struct page for entry and addr, after queueing swapin. * * Primitive swap readahead code. We simply read an aligned block of * (1 << page_cluster) entries in the swap area. This method is chosen * because it doesn't cost us any seek time. We also make sure to queue * the 'original' request together with the readahead ones... * * This has been extended to use the NUMA policies from the mm triggering * the readahead. * * Caller must hold down_read on the vma->vm_mm if vma is not NULL. */ |
02098feaa
|
456 |
struct page *swapin_readahead(swp_entry_t entry, gfp_t gfp_mask, |
46017e954
|
457 458 |
struct vm_area_struct *vma, unsigned long addr) { |
46017e954
|
459 |
struct page *page; |
579f82901
|
460 461 |
unsigned long entry_offset = swp_offset(entry); unsigned long offset = entry_offset; |
67f96aa25
|
462 |
unsigned long start_offset, end_offset; |
579f82901
|
463 |
unsigned long mask; |
3fb5c298b
|
464 |
struct blk_plug plug; |
46017e954
|
465 |
|
579f82901
|
466 467 468 |
mask = swapin_nr_pages(offset) - 1; if (!mask) goto skip; |
67f96aa25
|
469 470 471 472 473 |
/* Read a page_cluster sized and aligned cluster around offset. */ start_offset = offset & ~mask; end_offset = offset | mask; if (!start_offset) /* First page is swap header. */ start_offset++; |
3fb5c298b
|
474 |
blk_start_plug(&plug); |
67f96aa25
|
475 |
for (offset = start_offset; offset <= end_offset ; offset++) { |
46017e954
|
476 477 |
/* Ok, do the async read-ahead now */ page = read_swap_cache_async(swp_entry(swp_type(entry), offset), |
02098feaa
|
478 |
gfp_mask, vma, addr); |
46017e954
|
479 |
if (!page) |
67f96aa25
|
480 |
continue; |
579f82901
|
481 482 |
if (offset != entry_offset) SetPageReadahead(page); |
09cbfeaf1
|
483 |
put_page(page); |
46017e954
|
484 |
} |
3fb5c298b
|
485 |
blk_finish_plug(&plug); |
46017e954
|
486 |
lru_add_drain(); /* Push any new pages onto the LRU now */ |
579f82901
|
487 |
skip: |
02098feaa
|
488 |
return read_swap_cache_async(entry, gfp_mask, vma, addr); |
46017e954
|
489 |
} |