Blame view
mm/swap.c
13.2 KB
1da177e4c
|
1 2 3 4 5 6 7 |
/* * linux/mm/swap.c * * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds */ /* |
183ff22bb
|
8 |
* This file contains the default values for the operation of the |
1da177e4c
|
9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 |
* Linux VM subsystem. Fine-tuning documentation can be found in * Documentation/sysctl/vm.txt. * Started 18.12.91 * Swap aging added 23.2.95, Stephen Tweedie. * Buffermem limits added 12.3.98, Rik van Riel. */ #include <linux/mm.h> #include <linux/sched.h> #include <linux/kernel_stat.h> #include <linux/swap.h> #include <linux/mman.h> #include <linux/pagemap.h> #include <linux/pagevec.h> #include <linux/init.h> #include <linux/module.h> #include <linux/mm_inline.h> #include <linux/buffer_head.h> /* for try_to_release_page() */ |
1da177e4c
|
27 28 29 30 |
#include <linux/percpu_counter.h> #include <linux/percpu.h> #include <linux/cpu.h> #include <linux/notifier.h> |
e0bf68dde
|
31 |
#include <linux/backing-dev.h> |
66e1707bc
|
32 |
#include <linux/memcontrol.h> |
1da177e4c
|
33 34 35 |
/* How many pages do we try to swap or page in/out together? */ int page_cluster; |
902aaed0d
|
36 37 38 |
static DEFINE_PER_CPU(struct pagevec, lru_add_pvecs) = { 0, }; static DEFINE_PER_CPU(struct pagevec, lru_add_active_pvecs) = { 0, }; static DEFINE_PER_CPU(struct pagevec, lru_rotate_pvecs) = { 0, }; |
b221385bc
|
39 40 41 42 |
/* * This path almost never happens for VM activity - pages are normally * freed via pagevecs. But it gets used by networking. */ |
920c7a5d0
|
43 |
static void __page_cache_release(struct page *page) |
b221385bc
|
44 45 46 47 48 49 50 51 52 53 54 55 56 |
{ if (PageLRU(page)) { unsigned long flags; struct zone *zone = page_zone(page); spin_lock_irqsave(&zone->lru_lock, flags); VM_BUG_ON(!PageLRU(page)); __ClearPageLRU(page); del_page_from_lru(zone, page); spin_unlock_irqrestore(&zone->lru_lock, flags); } free_hot_page(page); } |
8519fb30e
|
57 |
static void put_compound_page(struct page *page) |
1da177e4c
|
58 |
{ |
d85f33855
|
59 |
page = compound_head(page); |
8519fb30e
|
60 |
if (put_page_testzero(page)) { |
33f2ef89f
|
61 |
compound_page_dtor *dtor; |
1da177e4c
|
62 |
|
33f2ef89f
|
63 |
dtor = get_compound_page_dtor(page); |
8519fb30e
|
64 |
(*dtor)(page); |
1da177e4c
|
65 |
} |
8519fb30e
|
66 67 68 69 70 71 72 |
} void put_page(struct page *page) { if (unlikely(PageCompound(page))) put_compound_page(page); else if (put_page_testzero(page)) |
1da177e4c
|
73 74 75 |
__page_cache_release(page); } EXPORT_SYMBOL(put_page); |
1da177e4c
|
76 |
|
1d7ea7324
|
77 |
/** |
7682486b3
|
78 79 |
* put_pages_list() - release a list of pages * @pages: list of pages threaded on page->lru |
1d7ea7324
|
80 81 82 |
* * Release a list of pages which are strung together on page.lru. Currently * used by read_cache_pages() and related error recovery code. |
1d7ea7324
|
83 84 85 86 87 88 89 90 91 92 93 94 |
*/ void put_pages_list(struct list_head *pages) { while (!list_empty(pages)) { struct page *victim; victim = list_entry(pages->prev, struct page, lru); list_del(&victim->lru); page_cache_release(victim); } } EXPORT_SYMBOL(put_pages_list); |
1da177e4c
|
95 |
/* |
902aaed0d
|
96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 |
* pagevec_move_tail() must be called with IRQ disabled. * Otherwise this may cause nasty races. */ static void pagevec_move_tail(struct pagevec *pvec) { int i; int pgmoved = 0; struct zone *zone = NULL; for (i = 0; i < pagevec_count(pvec); i++) { struct page *page = pvec->pages[i]; struct zone *pagezone = page_zone(page); if (pagezone != zone) { if (zone) spin_unlock(&zone->lru_lock); zone = pagezone; spin_lock(&zone->lru_lock); } if (PageLRU(page) && !PageActive(page)) { list_move_tail(&page->lru, &zone->inactive_list); pgmoved++; } } if (zone) spin_unlock(&zone->lru_lock); __count_vm_events(PGROTATED, pgmoved); release_pages(pvec->pages, pvec->nr, pvec->cold); pagevec_reinit(pvec); } /* |
1da177e4c
|
128 129 |
* Writeback is about to end against a page which has been marked for immediate * reclaim. If it still appears to be reclaimable, move it to the tail of the |
902aaed0d
|
130 |
* inactive list. |
1da177e4c
|
131 |
*/ |
ac6aadb24
|
132 |
void rotate_reclaimable_page(struct page *page) |
1da177e4c
|
133 |
{ |
ac6aadb24
|
134 135 136 137 138 139 140 141 142 143 144 145 |
if (!PageLocked(page) && !PageDirty(page) && !PageActive(page) && PageLRU(page)) { struct pagevec *pvec; unsigned long flags; page_cache_get(page); local_irq_save(flags); pvec = &__get_cpu_var(lru_rotate_pvecs); if (!pagevec_add(pvec, page)) pagevec_move_tail(pvec); local_irq_restore(flags); } |
1da177e4c
|
146 147 148 149 150 |
} /* * FIXME: speed this up? */ |
920c7a5d0
|
151 |
void activate_page(struct page *page) |
1da177e4c
|
152 153 154 155 156 157 158 159 |
{ struct zone *zone = page_zone(page); spin_lock_irq(&zone->lru_lock); if (PageLRU(page) && !PageActive(page)) { del_page_from_inactive_list(zone, page); SetPageActive(page); add_page_to_active_list(zone, page); |
f8891e5e1
|
160 |
__count_vm_event(PGACTIVATE); |
427d5416f
|
161 |
mem_cgroup_move_lists(page, true); |
1da177e4c
|
162 163 164 165 166 167 168 169 170 171 172 |
} spin_unlock_irq(&zone->lru_lock); } /* * Mark a page as having seen activity. * * inactive,unreferenced -> inactive,referenced * inactive,referenced -> active,unreferenced * active,unreferenced -> active,referenced */ |
920c7a5d0
|
173 |
void mark_page_accessed(struct page *page) |
1da177e4c
|
174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 |
{ if (!PageActive(page) && PageReferenced(page) && PageLRU(page)) { activate_page(page); ClearPageReferenced(page); } else if (!PageReferenced(page)) { SetPageReferenced(page); } } EXPORT_SYMBOL(mark_page_accessed); /** * lru_cache_add: add a page to the page lists * @page: the page to add */ |
920c7a5d0
|
189 |
void lru_cache_add(struct page *page) |
1da177e4c
|
190 191 192 193 194 195 196 197 |
{ struct pagevec *pvec = &get_cpu_var(lru_add_pvecs); page_cache_get(page); if (!pagevec_add(pvec, page)) __pagevec_lru_add(pvec); put_cpu_var(lru_add_pvecs); } |
920c7a5d0
|
198 |
void lru_cache_add_active(struct page *page) |
1da177e4c
|
199 200 201 202 203 204 205 206 |
{ struct pagevec *pvec = &get_cpu_var(lru_add_active_pvecs); page_cache_get(page); if (!pagevec_add(pvec, page)) __pagevec_lru_add_active(pvec); put_cpu_var(lru_add_active_pvecs); } |
902aaed0d
|
207 208 209 210 211 212 |
/* * Drain pages out of the cpu's pagevecs. * Either "cpu" is the current CPU, and preemption has already been * disabled; or "cpu" is being hot-unplugged, and is already dead. */ static void drain_cpu_pagevecs(int cpu) |
1da177e4c
|
213 |
{ |
902aaed0d
|
214 |
struct pagevec *pvec; |
1da177e4c
|
215 |
|
902aaed0d
|
216 |
pvec = &per_cpu(lru_add_pvecs, cpu); |
1da177e4c
|
217 218 |
if (pagevec_count(pvec)) __pagevec_lru_add(pvec); |
902aaed0d
|
219 |
|
80bfed904
|
220 |
pvec = &per_cpu(lru_add_active_pvecs, cpu); |
1da177e4c
|
221 222 |
if (pagevec_count(pvec)) __pagevec_lru_add_active(pvec); |
902aaed0d
|
223 224 225 226 227 228 229 230 231 232 |
pvec = &per_cpu(lru_rotate_pvecs, cpu); if (pagevec_count(pvec)) { unsigned long flags; /* No harm done if a racing interrupt already did this */ local_irq_save(flags); pagevec_move_tail(pvec); local_irq_restore(flags); } |
80bfed904
|
233 234 235 236 |
} void lru_add_drain(void) { |
902aaed0d
|
237 |
drain_cpu_pagevecs(get_cpu()); |
80bfed904
|
238 |
put_cpu(); |
1da177e4c
|
239 |
} |
053837fce
|
240 |
#ifdef CONFIG_NUMA |
c4028958b
|
241 |
static void lru_add_drain_per_cpu(struct work_struct *dummy) |
053837fce
|
242 243 244 245 246 247 248 249 250 |
{ lru_add_drain(); } /* * Returns 0 for success */ int lru_add_drain_all(void) { |
c4028958b
|
251 |
return schedule_on_each_cpu(lru_add_drain_per_cpu); |
053837fce
|
252 253 254 255 256 257 258 259 260 261 262 263 264 |
} #else /* * Returns 0 for success */ int lru_add_drain_all(void) { lru_add_drain(); return 0; } #endif |
1da177e4c
|
265 |
/* |
1da177e4c
|
266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 |
* Batched page_cache_release(). Decrement the reference count on all the * passed pages. If it fell to zero then remove the page from the LRU and * free it. * * Avoid taking zone->lru_lock if possible, but if it is taken, retain it * for the remainder of the operation. * * The locking in this function is against shrink_cache(): we recheck the * page count inside the lock to see whether shrink_cache grabbed the page * via the LRU. If it did, give up: shrink_cache will free it. */ void release_pages(struct page **pages, int nr, int cold) { int i; struct pagevec pages_to_free; struct zone *zone = NULL; |
902aaed0d
|
282 |
unsigned long uninitialized_var(flags); |
1da177e4c
|
283 284 285 286 |
pagevec_init(&pages_to_free, cold); for (i = 0; i < nr; i++) { struct page *page = pages[i]; |
1da177e4c
|
287 |
|
8519fb30e
|
288 289 |
if (unlikely(PageCompound(page))) { if (zone) { |
902aaed0d
|
290 |
spin_unlock_irqrestore(&zone->lru_lock, flags); |
8519fb30e
|
291 292 293 294 295 |
zone = NULL; } put_compound_page(page); continue; } |
b5810039a
|
296 |
if (!put_page_testzero(page)) |
1da177e4c
|
297 |
continue; |
46453a6e1
|
298 299 300 301 |
if (PageLRU(page)) { struct zone *pagezone = page_zone(page); if (pagezone != zone) { if (zone) |
902aaed0d
|
302 303 |
spin_unlock_irqrestore(&zone->lru_lock, flags); |
46453a6e1
|
304 |
zone = pagezone; |
902aaed0d
|
305 |
spin_lock_irqsave(&zone->lru_lock, flags); |
46453a6e1
|
306 |
} |
725d704ec
|
307 |
VM_BUG_ON(!PageLRU(page)); |
674539115
|
308 |
__ClearPageLRU(page); |
1da177e4c
|
309 |
del_page_from_lru(zone, page); |
46453a6e1
|
310 311 312 313 |
} if (!pagevec_add(&pages_to_free, page)) { if (zone) { |
902aaed0d
|
314 |
spin_unlock_irqrestore(&zone->lru_lock, flags); |
46453a6e1
|
315 |
zone = NULL; |
1da177e4c
|
316 |
} |
46453a6e1
|
317 318 319 |
__pagevec_free(&pages_to_free); pagevec_reinit(&pages_to_free); } |
1da177e4c
|
320 321 |
} if (zone) |
902aaed0d
|
322 |
spin_unlock_irqrestore(&zone->lru_lock, flags); |
1da177e4c
|
323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 |
pagevec_free(&pages_to_free); } /* * The pages which we're about to release may be in the deferred lru-addition * queues. That would prevent them from really being freed right now. That's * OK from a correctness point of view but is inefficient - those pages may be * cache-warm and we want to give them back to the page allocator ASAP. * * So __pagevec_release() will drain those queues here. __pagevec_lru_add() * and __pagevec_lru_add_active() call release_pages() directly to avoid * mutual recursion. */ void __pagevec_release(struct pagevec *pvec) { lru_add_drain(); release_pages(pvec->pages, pagevec_count(pvec), pvec->cold); pagevec_reinit(pvec); } |
7f2857018
|
343 |
EXPORT_SYMBOL(__pagevec_release); |
1da177e4c
|
344 345 346 347 348 349 350 351 352 353 354 |
/* * pagevec_release() for pages which are known to not be on the LRU * * This function reinitialises the caller's pagevec. */ void __pagevec_release_nonlru(struct pagevec *pvec) { int i; struct pagevec pages_to_free; pagevec_init(&pages_to_free, pvec->cold); |
1da177e4c
|
355 356 |
for (i = 0; i < pagevec_count(pvec); i++) { struct page *page = pvec->pages[i]; |
725d704ec
|
357 |
VM_BUG_ON(PageLRU(page)); |
1da177e4c
|
358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 |
if (put_page_testzero(page)) pagevec_add(&pages_to_free, page); } pagevec_free(&pages_to_free); pagevec_reinit(pvec); } /* * Add the passed pages to the LRU, then drop the caller's refcount * on them. Reinitialises the caller's pagevec. */ void __pagevec_lru_add(struct pagevec *pvec) { int i; struct zone *zone = NULL; for (i = 0; i < pagevec_count(pvec); i++) { struct page *page = pvec->pages[i]; struct zone *pagezone = page_zone(page); if (pagezone != zone) { if (zone) spin_unlock_irq(&zone->lru_lock); zone = pagezone; spin_lock_irq(&zone->lru_lock); } |
725d704ec
|
384 |
VM_BUG_ON(PageLRU(page)); |
8d438f96d
|
385 |
SetPageLRU(page); |
1da177e4c
|
386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 |
add_page_to_inactive_list(zone, page); } if (zone) spin_unlock_irq(&zone->lru_lock); release_pages(pvec->pages, pvec->nr, pvec->cold); pagevec_reinit(pvec); } EXPORT_SYMBOL(__pagevec_lru_add); void __pagevec_lru_add_active(struct pagevec *pvec) { int i; struct zone *zone = NULL; for (i = 0; i < pagevec_count(pvec); i++) { struct page *page = pvec->pages[i]; struct zone *pagezone = page_zone(page); if (pagezone != zone) { if (zone) spin_unlock_irq(&zone->lru_lock); zone = pagezone; spin_lock_irq(&zone->lru_lock); } |
725d704ec
|
411 |
VM_BUG_ON(PageLRU(page)); |
8d438f96d
|
412 |
SetPageLRU(page); |
725d704ec
|
413 |
VM_BUG_ON(PageActive(page)); |
4c84cacfa
|
414 |
SetPageActive(page); |
1da177e4c
|
415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 |
add_page_to_active_list(zone, page); } if (zone) spin_unlock_irq(&zone->lru_lock); release_pages(pvec->pages, pvec->nr, pvec->cold); pagevec_reinit(pvec); } /* * Try to drop buffers from the pages in a pagevec */ void pagevec_strip(struct pagevec *pvec) { int i; for (i = 0; i < pagevec_count(pvec); i++) { struct page *page = pvec->pages[i]; if (PagePrivate(page) && !TestSetPageLocked(page)) { |
5b40dc780
|
434 435 |
if (PagePrivate(page)) try_to_release_page(page, 0); |
1da177e4c
|
436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 |
unlock_page(page); } } } /** * pagevec_lookup - gang pagecache lookup * @pvec: Where the resulting pages are placed * @mapping: The address_space to search * @start: The starting page index * @nr_pages: The maximum number of pages * * pagevec_lookup() will search for and return a group of up to @nr_pages pages * in the mapping. The pages are placed in @pvec. pagevec_lookup() takes a * reference against the pages in @pvec. * * The search returns a group of mapping-contiguous pages with ascending * indexes. There may be holes in the indices due to not-present pages. * * pagevec_lookup() returns the number of pages which were found. */ unsigned pagevec_lookup(struct pagevec *pvec, struct address_space *mapping, pgoff_t start, unsigned nr_pages) { pvec->nr = find_get_pages(mapping, start, nr_pages, pvec->pages); return pagevec_count(pvec); } |
78539fdfa
|
463 |
EXPORT_SYMBOL(pagevec_lookup); |
1da177e4c
|
464 465 466 467 468 469 470 |
unsigned pagevec_lookup_tag(struct pagevec *pvec, struct address_space *mapping, pgoff_t *index, int tag, unsigned nr_pages) { pvec->nr = find_get_pages_tag(mapping, index, tag, nr_pages, pvec->pages); return pagevec_count(pvec); } |
7f2857018
|
471 |
EXPORT_SYMBOL(pagevec_lookup_tag); |
1da177e4c
|
472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 |
#ifdef CONFIG_SMP /* * We tolerate a little inaccuracy to avoid ping-ponging the counter between * CPUs */ #define ACCT_THRESHOLD max(16, NR_CPUS * 2) static DEFINE_PER_CPU(long, committed_space) = 0; void vm_acct_memory(long pages) { long *local; preempt_disable(); local = &__get_cpu_var(committed_space); *local += pages; if (*local > ACCT_THRESHOLD || *local < -ACCT_THRESHOLD) { |
80119ef5c
|
490 |
atomic_long_add(*local, &vm_committed_space); |
1da177e4c
|
491 492 493 494 |
*local = 0; } preempt_enable(); } |
1da177e4c
|
495 496 |
#ifdef CONFIG_HOTPLUG_CPU |
1da177e4c
|
497 498 499 500 501 502 503 504 505 |
/* Drop the CPU's cached committed space back into the central pool. */ static int cpu_swap_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) { long *committed; committed = &per_cpu(committed_space, (long)hcpu); |
8bb784428
|
506 |
if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) { |
80119ef5c
|
507 |
atomic_long_add(*committed, &vm_committed_space); |
1da177e4c
|
508 |
*committed = 0; |
902aaed0d
|
509 |
drain_cpu_pagevecs((long)hcpu); |
1da177e4c
|
510 511 512 513 514 |
} return NOTIFY_OK; } #endif /* CONFIG_HOTPLUG_CPU */ #endif /* CONFIG_SMP */ |
1da177e4c
|
515 516 517 518 519 520 |
/* * Perform any setup for the swap system */ void __init swap_setup(void) { unsigned long megs = num_physpages >> (20 - PAGE_SHIFT); |
e0bf68dde
|
521 522 523 |
#ifdef CONFIG_SWAP bdi_init(swapper_space.backing_dev_info); #endif |
1da177e4c
|
524 525 526 527 528 529 530 531 532 |
/* Use a smaller cluster for small-memory machines */ if (megs < 16) page_cluster = 2; else page_cluster = 3; /* * Right now other parts of the system means that we * _really_ don't want to cluster much more */ |
023160678
|
533 |
#ifdef CONFIG_HOTPLUG_CPU |
1da177e4c
|
534 |
hotcpu_notifier(cpu_swap_callback, 0); |
023160678
|
535 |
#endif |
1da177e4c
|
536 |
} |