Blame view
mm/swap.c
15.2 KB
1da177e4c
|
1 2 3 4 5 6 7 |
/* * linux/mm/swap.c * * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds */ /* |
183ff22bb
|
8 |
* This file contains the default values for the operation of the |
1da177e4c
|
9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 |
* Linux VM subsystem. Fine-tuning documentation can be found in * Documentation/sysctl/vm.txt. * Started 18.12.91 * Swap aging added 23.2.95, Stephen Tweedie. * Buffermem limits added 12.3.98, Rik van Riel. */ #include <linux/mm.h> #include <linux/sched.h> #include <linux/kernel_stat.h> #include <linux/swap.h> #include <linux/mman.h> #include <linux/pagemap.h> #include <linux/pagevec.h> #include <linux/init.h> #include <linux/module.h> #include <linux/mm_inline.h> #include <linux/buffer_head.h> /* for try_to_release_page() */ |
1da177e4c
|
27 28 29 30 |
#include <linux/percpu_counter.h> #include <linux/percpu.h> #include <linux/cpu.h> #include <linux/notifier.h> |
e0bf68dde
|
31 |
#include <linux/backing-dev.h> |
66e1707bc
|
32 |
#include <linux/memcontrol.h> |
1da177e4c
|
33 |
|
64d6519dd
|
34 |
#include "internal.h" |
1da177e4c
|
35 36 |
/* How many pages do we try to swap or page in/out together? */ int page_cluster; |
f04e9ebbe
|
37 |
static DEFINE_PER_CPU(struct pagevec[NR_LRU_LISTS], lru_add_pvecs); |
f84f9504b
|
38 |
static DEFINE_PER_CPU(struct pagevec, lru_rotate_pvecs); |
902aaed0d
|
39 |
|
b221385bc
|
40 41 42 43 |
/* * This path almost never happens for VM activity - pages are normally * freed via pagevecs. But it gets used by networking. */ |
920c7a5d0
|
44 |
static void __page_cache_release(struct page *page) |
b221385bc
|
45 46 47 48 49 50 51 52 53 54 55 56 57 |
{ if (PageLRU(page)) { unsigned long flags; struct zone *zone = page_zone(page); spin_lock_irqsave(&zone->lru_lock, flags); VM_BUG_ON(!PageLRU(page)); __ClearPageLRU(page); del_page_from_lru(zone, page); spin_unlock_irqrestore(&zone->lru_lock, flags); } free_hot_page(page); } |
8519fb30e
|
58 |
static void put_compound_page(struct page *page) |
1da177e4c
|
59 |
{ |
d85f33855
|
60 |
page = compound_head(page); |
8519fb30e
|
61 |
if (put_page_testzero(page)) { |
33f2ef89f
|
62 |
compound_page_dtor *dtor; |
1da177e4c
|
63 |
|
33f2ef89f
|
64 |
dtor = get_compound_page_dtor(page); |
8519fb30e
|
65 |
(*dtor)(page); |
1da177e4c
|
66 |
} |
8519fb30e
|
67 68 69 70 71 72 73 |
} void put_page(struct page *page) { if (unlikely(PageCompound(page))) put_compound_page(page); else if (put_page_testzero(page)) |
1da177e4c
|
74 75 76 |
__page_cache_release(page); } EXPORT_SYMBOL(put_page); |
1da177e4c
|
77 |
|
1d7ea7324
|
78 |
/** |
7682486b3
|
79 80 |
* put_pages_list() - release a list of pages * @pages: list of pages threaded on page->lru |
1d7ea7324
|
81 82 83 |
* * Release a list of pages which are strung together on page.lru. Currently * used by read_cache_pages() and related error recovery code. |
1d7ea7324
|
84 85 86 87 88 89 90 91 92 93 94 95 |
*/ void put_pages_list(struct list_head *pages) { while (!list_empty(pages)) { struct page *victim; victim = list_entry(pages->prev, struct page, lru); list_del(&victim->lru); page_cache_release(victim); } } EXPORT_SYMBOL(put_pages_list); |
1da177e4c
|
96 |
/* |
902aaed0d
|
97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 |
* pagevec_move_tail() must be called with IRQ disabled. * Otherwise this may cause nasty races. */ static void pagevec_move_tail(struct pagevec *pvec) { int i; int pgmoved = 0; struct zone *zone = NULL; for (i = 0; i < pagevec_count(pvec); i++) { struct page *page = pvec->pages[i]; struct zone *pagezone = page_zone(page); if (pagezone != zone) { if (zone) spin_unlock(&zone->lru_lock); zone = pagezone; spin_lock(&zone->lru_lock); } |
894bc3104
|
116 |
if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) { |
4f98a2fee
|
117 118 |
int lru = page_is_file_cache(page); list_move_tail(&page->lru, &zone->lru[lru].list); |
902aaed0d
|
119 120 121 122 123 124 125 126 127 128 129 |
pgmoved++; } } if (zone) spin_unlock(&zone->lru_lock); __count_vm_events(PGROTATED, pgmoved); release_pages(pvec->pages, pvec->nr, pvec->cold); pagevec_reinit(pvec); } /* |
1da177e4c
|
130 131 |
* Writeback is about to end against a page which has been marked for immediate * reclaim. If it still appears to be reclaimable, move it to the tail of the |
902aaed0d
|
132 |
* inactive list. |
1da177e4c
|
133 |
*/ |
ac6aadb24
|
134 |
void rotate_reclaimable_page(struct page *page) |
1da177e4c
|
135 |
{ |
ac6aadb24
|
136 |
if (!PageLocked(page) && !PageDirty(page) && !PageActive(page) && |
894bc3104
|
137 |
!PageUnevictable(page) && PageLRU(page)) { |
ac6aadb24
|
138 139 140 141 142 143 144 145 146 147 |
struct pagevec *pvec; unsigned long flags; page_cache_get(page); local_irq_save(flags); pvec = &__get_cpu_var(lru_rotate_pvecs); if (!pagevec_add(pvec, page)) pagevec_move_tail(pvec); local_irq_restore(flags); } |
1da177e4c
|
148 149 150 151 152 |
} /* * FIXME: speed this up? */ |
920c7a5d0
|
153 |
void activate_page(struct page *page) |
1da177e4c
|
154 155 156 157 |
{ struct zone *zone = page_zone(page); spin_lock_irq(&zone->lru_lock); |
894bc3104
|
158 |
if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) { |
4f98a2fee
|
159 160 161 |
int file = page_is_file_cache(page); int lru = LRU_BASE + file; del_page_from_lru_list(zone, page, lru); |
1da177e4c
|
162 |
SetPageActive(page); |
4f98a2fee
|
163 164 |
lru += LRU_ACTIVE; add_page_to_lru_list(zone, page, lru); |
f8891e5e1
|
165 |
__count_vm_event(PGACTIVATE); |
894bc3104
|
166 |
mem_cgroup_move_lists(page, lru); |
4f98a2fee
|
167 168 169 |
zone->recent_rotated[!!file]++; zone->recent_scanned[!!file]++; |
1da177e4c
|
170 171 172 173 174 175 176 177 178 179 180 |
} spin_unlock_irq(&zone->lru_lock); } /* * Mark a page as having seen activity. * * inactive,unreferenced -> inactive,referenced * inactive,referenced -> active,unreferenced * active,unreferenced -> active,referenced */ |
920c7a5d0
|
181 |
void mark_page_accessed(struct page *page) |
1da177e4c
|
182 |
{ |
894bc3104
|
183 184 |
if (!PageActive(page) && !PageUnevictable(page) && PageReferenced(page) && PageLRU(page)) { |
1da177e4c
|
185 186 187 188 189 190 191 192 |
activate_page(page); ClearPageReferenced(page); } else if (!PageReferenced(page)) { SetPageReferenced(page); } } EXPORT_SYMBOL(mark_page_accessed); |
f04e9ebbe
|
193 |
void __lru_cache_add(struct page *page, enum lru_list lru) |
1da177e4c
|
194 |
{ |
f04e9ebbe
|
195 |
struct pagevec *pvec = &get_cpu_var(lru_add_pvecs)[lru]; |
1da177e4c
|
196 197 198 |
page_cache_get(page); if (!pagevec_add(pvec, page)) |
f04e9ebbe
|
199 |
____pagevec_lru_add(pvec, lru); |
1da177e4c
|
200 201 |
put_cpu_var(lru_add_pvecs); } |
f04e9ebbe
|
202 203 204 205 206 207 |
/** * lru_cache_add_lru - add a page to a page list * @page: the page to be added to the LRU. * @lru: the LRU list to which the page is added. */ void lru_cache_add_lru(struct page *page, enum lru_list lru) |
1da177e4c
|
208 |
{ |
f04e9ebbe
|
209 |
if (PageActive(page)) { |
894bc3104
|
210 |
VM_BUG_ON(PageUnevictable(page)); |
f04e9ebbe
|
211 |
ClearPageActive(page); |
894bc3104
|
212 213 214 |
} else if (PageUnevictable(page)) { VM_BUG_ON(PageActive(page)); ClearPageUnevictable(page); |
f04e9ebbe
|
215 |
} |
1da177e4c
|
216 |
|
894bc3104
|
217 |
VM_BUG_ON(PageLRU(page) || PageActive(page) || PageUnevictable(page)); |
f04e9ebbe
|
218 |
__lru_cache_add(page, lru); |
1da177e4c
|
219 |
} |
894bc3104
|
220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 |
/** * add_page_to_unevictable_list - add a page to the unevictable list * @page: the page to be added to the unevictable list * * Add page directly to its zone's unevictable list. To avoid races with * tasks that might be making the page evictable, through eg. munlock, * munmap or exit, while it's not on the lru, we want to add the page * while it's locked or otherwise "invisible" to other tasks. This is * difficult to do when using the pagevec cache, so bypass that. */ void add_page_to_unevictable_list(struct page *page) { struct zone *zone = page_zone(page); spin_lock_irq(&zone->lru_lock); SetPageUnevictable(page); SetPageLRU(page); add_page_to_lru_list(zone, page, LRU_UNEVICTABLE); spin_unlock_irq(&zone->lru_lock); } |
64d6519dd
|
240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 |
/** * lru_cache_add_active_or_unevictable * @page: the page to be added to LRU * @vma: vma in which page is mapped for determining reclaimability * * place @page on active or unevictable LRU list, depending on * page_evictable(). Note that if the page is not evictable, * it goes directly back onto it's zone's unevictable list. It does * NOT use a per cpu pagevec. */ void lru_cache_add_active_or_unevictable(struct page *page, struct vm_area_struct *vma) { if (page_evictable(page, vma)) lru_cache_add_lru(page, LRU_ACTIVE + page_is_file_cache(page)); else add_page_to_unevictable_list(page); } |
902aaed0d
|
258 259 260 261 262 263 |
/* * Drain pages out of the cpu's pagevecs. * Either "cpu" is the current CPU, and preemption has already been * disabled; or "cpu" is being hot-unplugged, and is already dead. */ static void drain_cpu_pagevecs(int cpu) |
1da177e4c
|
264 |
{ |
f04e9ebbe
|
265 |
struct pagevec *pvecs = per_cpu(lru_add_pvecs, cpu); |
902aaed0d
|
266 |
struct pagevec *pvec; |
f04e9ebbe
|
267 |
int lru; |
1da177e4c
|
268 |
|
f04e9ebbe
|
269 270 271 272 273 |
for_each_lru(lru) { pvec = &pvecs[lru - LRU_BASE]; if (pagevec_count(pvec)) ____pagevec_lru_add(pvec, lru); } |
902aaed0d
|
274 275 276 277 278 279 280 281 282 283 |
pvec = &per_cpu(lru_rotate_pvecs, cpu); if (pagevec_count(pvec)) { unsigned long flags; /* No harm done if a racing interrupt already did this */ local_irq_save(flags); pagevec_move_tail(pvec); local_irq_restore(flags); } |
80bfed904
|
284 285 286 287 |
} void lru_add_drain(void) { |
902aaed0d
|
288 |
drain_cpu_pagevecs(get_cpu()); |
80bfed904
|
289 |
put_cpu(); |
1da177e4c
|
290 |
} |
c4028958b
|
291 |
static void lru_add_drain_per_cpu(struct work_struct *dummy) |
053837fce
|
292 293 294 295 296 297 298 299 300 |
{ lru_add_drain(); } /* * Returns 0 for success */ int lru_add_drain_all(void) { |
c4028958b
|
301 |
return schedule_on_each_cpu(lru_add_drain_per_cpu); |
053837fce
|
302 |
} |
1da177e4c
|
303 |
/* |
1da177e4c
|
304 305 306 307 308 309 310 |
* Batched page_cache_release(). Decrement the reference count on all the * passed pages. If it fell to zero then remove the page from the LRU and * free it. * * Avoid taking zone->lru_lock if possible, but if it is taken, retain it * for the remainder of the operation. * |
ab33dc09a
|
311 312 313 314 |
* The locking in this function is against shrink_inactive_list(): we recheck * the page count inside the lock to see whether shrink_inactive_list() * grabbed the page via the LRU. If it did, give up: shrink_inactive_list() * will free it. |
1da177e4c
|
315 316 317 318 319 320 |
*/ void release_pages(struct page **pages, int nr, int cold) { int i; struct pagevec pages_to_free; struct zone *zone = NULL; |
902aaed0d
|
321 |
unsigned long uninitialized_var(flags); |
1da177e4c
|
322 323 324 325 |
pagevec_init(&pages_to_free, cold); for (i = 0; i < nr; i++) { struct page *page = pages[i]; |
1da177e4c
|
326 |
|
8519fb30e
|
327 328 |
if (unlikely(PageCompound(page))) { if (zone) { |
902aaed0d
|
329 |
spin_unlock_irqrestore(&zone->lru_lock, flags); |
8519fb30e
|
330 331 332 333 334 |
zone = NULL; } put_compound_page(page); continue; } |
b5810039a
|
335 |
if (!put_page_testzero(page)) |
1da177e4c
|
336 |
continue; |
46453a6e1
|
337 338 |
if (PageLRU(page)) { struct zone *pagezone = page_zone(page); |
894bc3104
|
339 |
|
46453a6e1
|
340 341 |
if (pagezone != zone) { if (zone) |
902aaed0d
|
342 343 |
spin_unlock_irqrestore(&zone->lru_lock, flags); |
46453a6e1
|
344 |
zone = pagezone; |
902aaed0d
|
345 |
spin_lock_irqsave(&zone->lru_lock, flags); |
46453a6e1
|
346 |
} |
725d704ec
|
347 |
VM_BUG_ON(!PageLRU(page)); |
674539115
|
348 |
__ClearPageLRU(page); |
1da177e4c
|
349 |
del_page_from_lru(zone, page); |
46453a6e1
|
350 351 352 353 |
} if (!pagevec_add(&pages_to_free, page)) { if (zone) { |
902aaed0d
|
354 |
spin_unlock_irqrestore(&zone->lru_lock, flags); |
46453a6e1
|
355 |
zone = NULL; |
1da177e4c
|
356 |
} |
46453a6e1
|
357 358 359 |
__pagevec_free(&pages_to_free); pagevec_reinit(&pages_to_free); } |
1da177e4c
|
360 361 |
} if (zone) |
902aaed0d
|
362 |
spin_unlock_irqrestore(&zone->lru_lock, flags); |
1da177e4c
|
363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 |
pagevec_free(&pages_to_free); } /* * The pages which we're about to release may be in the deferred lru-addition * queues. That would prevent them from really being freed right now. That's * OK from a correctness point of view but is inefficient - those pages may be * cache-warm and we want to give them back to the page allocator ASAP. * * So __pagevec_release() will drain those queues here. __pagevec_lru_add() * and __pagevec_lru_add_active() call release_pages() directly to avoid * mutual recursion. */ void __pagevec_release(struct pagevec *pvec) { lru_add_drain(); release_pages(pvec->pages, pagevec_count(pvec), pvec->cold); pagevec_reinit(pvec); } |
7f2857018
|
383 |
EXPORT_SYMBOL(__pagevec_release); |
1da177e4c
|
384 385 386 387 388 389 390 391 392 393 394 |
/* * pagevec_release() for pages which are known to not be on the LRU * * This function reinitialises the caller's pagevec. */ void __pagevec_release_nonlru(struct pagevec *pvec) { int i; struct pagevec pages_to_free; pagevec_init(&pages_to_free, pvec->cold); |
1da177e4c
|
395 396 |
for (i = 0; i < pagevec_count(pvec); i++) { struct page *page = pvec->pages[i]; |
725d704ec
|
397 |
VM_BUG_ON(PageLRU(page)); |
1da177e4c
|
398 399 400 401 402 403 404 405 406 407 408 |
if (put_page_testzero(page)) pagevec_add(&pages_to_free, page); } pagevec_free(&pages_to_free); pagevec_reinit(pvec); } /* * Add the passed pages to the LRU, then drop the caller's refcount * on them. Reinitialises the caller's pagevec. */ |
f04e9ebbe
|
409 |
void ____pagevec_lru_add(struct pagevec *pvec, enum lru_list lru) |
1da177e4c
|
410 411 412 |
{ int i; struct zone *zone = NULL; |
894bc3104
|
413 |
VM_BUG_ON(is_unevictable_lru(lru)); |
1da177e4c
|
414 415 416 417 |
for (i = 0; i < pagevec_count(pvec); i++) { struct page *page = pvec->pages[i]; struct zone *pagezone = page_zone(page); |
9ff473b9a
|
418 |
int file; |
1da177e4c
|
419 420 421 422 423 424 425 |
if (pagezone != zone) { if (zone) spin_unlock_irq(&zone->lru_lock); zone = pagezone; spin_lock_irq(&zone->lru_lock); } |
894bc3104
|
426 427 |
VM_BUG_ON(PageActive(page)); VM_BUG_ON(PageUnevictable(page)); |
725d704ec
|
428 |
VM_BUG_ON(PageLRU(page)); |
8d438f96d
|
429 |
SetPageLRU(page); |
9ff473b9a
|
430 431 432 |
file = is_file_lru(lru); zone->recent_scanned[file]++; if (is_active_lru(lru)) { |
f04e9ebbe
|
433 |
SetPageActive(page); |
9ff473b9a
|
434 435 |
zone->recent_rotated[file]++; } |
f04e9ebbe
|
436 |
add_page_to_lru_list(zone, page, lru); |
1da177e4c
|
437 438 439 440 441 442 |
} if (zone) spin_unlock_irq(&zone->lru_lock); release_pages(pvec->pages, pvec->nr, pvec->cold); pagevec_reinit(pvec); } |
f04e9ebbe
|
443 |
EXPORT_SYMBOL(____pagevec_lru_add); |
1da177e4c
|
444 445 446 447 448 449 450 451 452 453 |
/* * Try to drop buffers from the pages in a pagevec */ void pagevec_strip(struct pagevec *pvec) { int i; for (i = 0; i < pagevec_count(pvec); i++) { struct page *page = pvec->pages[i]; |
529ae9aaa
|
454 |
if (PagePrivate(page) && trylock_page(page)) { |
5b40dc780
|
455 456 |
if (PagePrivate(page)) try_to_release_page(page, 0); |
1da177e4c
|
457 458 459 460 461 462 |
unlock_page(page); } } } /** |
68a22394c
|
463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 |
* pagevec_swap_free - try to free swap space from the pages in a pagevec * @pvec: pagevec with swapcache pages to free the swap space of * * The caller needs to hold an extra reference to each page and * not hold the page lock on the pages. This function uses a * trylock on the page lock so it may not always free the swap * space associated with a page. */ void pagevec_swap_free(struct pagevec *pvec) { int i; for (i = 0; i < pagevec_count(pvec); i++) { struct page *page = pvec->pages[i]; if (PageSwapCache(page) && trylock_page(page)) { if (PageSwapCache(page)) remove_exclusive_swap_page_ref(page); unlock_page(page); } } } /** |
1da177e4c
|
487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 |
* pagevec_lookup - gang pagecache lookup * @pvec: Where the resulting pages are placed * @mapping: The address_space to search * @start: The starting page index * @nr_pages: The maximum number of pages * * pagevec_lookup() will search for and return a group of up to @nr_pages pages * in the mapping. The pages are placed in @pvec. pagevec_lookup() takes a * reference against the pages in @pvec. * * The search returns a group of mapping-contiguous pages with ascending * indexes. There may be holes in the indices due to not-present pages. * * pagevec_lookup() returns the number of pages which were found. */ unsigned pagevec_lookup(struct pagevec *pvec, struct address_space *mapping, pgoff_t start, unsigned nr_pages) { pvec->nr = find_get_pages(mapping, start, nr_pages, pvec->pages); return pagevec_count(pvec); } |
78539fdfa
|
508 |
EXPORT_SYMBOL(pagevec_lookup); |
1da177e4c
|
509 510 511 512 513 514 515 |
unsigned pagevec_lookup_tag(struct pagevec *pvec, struct address_space *mapping, pgoff_t *index, int tag, unsigned nr_pages) { pvec->nr = find_get_pages_tag(mapping, index, tag, nr_pages, pvec->pages); return pagevec_count(pvec); } |
7f2857018
|
516 |
EXPORT_SYMBOL(pagevec_lookup_tag); |
1da177e4c
|
517 518 519 520 521 522 523 |
#ifdef CONFIG_SMP /* * We tolerate a little inaccuracy to avoid ping-ponging the counter between * CPUs */ #define ACCT_THRESHOLD max(16, NR_CPUS * 2) |
f84f9504b
|
524 |
static DEFINE_PER_CPU(long, committed_space); |
1da177e4c
|
525 526 527 528 529 530 531 532 533 |
void vm_acct_memory(long pages) { long *local; preempt_disable(); local = &__get_cpu_var(committed_space); *local += pages; if (*local > ACCT_THRESHOLD || *local < -ACCT_THRESHOLD) { |
80119ef5c
|
534 |
atomic_long_add(*local, &vm_committed_space); |
1da177e4c
|
535 536 537 538 |
*local = 0; } preempt_enable(); } |
1da177e4c
|
539 540 |
#ifdef CONFIG_HOTPLUG_CPU |
1da177e4c
|
541 542 543 544 545 546 547 548 549 |
/* Drop the CPU's cached committed space back into the central pool. */ static int cpu_swap_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) { long *committed; committed = &per_cpu(committed_space, (long)hcpu); |
8bb784428
|
550 |
if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) { |
80119ef5c
|
551 |
atomic_long_add(*committed, &vm_committed_space); |
1da177e4c
|
552 |
*committed = 0; |
902aaed0d
|
553 |
drain_cpu_pagevecs((long)hcpu); |
1da177e4c
|
554 555 556 557 558 |
} return NOTIFY_OK; } #endif /* CONFIG_HOTPLUG_CPU */ #endif /* CONFIG_SMP */ |
1da177e4c
|
559 560 561 562 563 564 |
/* * Perform any setup for the swap system */ void __init swap_setup(void) { unsigned long megs = num_physpages >> (20 - PAGE_SHIFT); |
e0bf68dde
|
565 566 567 |
#ifdef CONFIG_SWAP bdi_init(swapper_space.backing_dev_info); #endif |
1da177e4c
|
568 569 570 571 572 573 574 575 576 |
/* Use a smaller cluster for small-memory machines */ if (megs < 16) page_cluster = 2; else page_cluster = 3; /* * Right now other parts of the system means that we * _really_ don't want to cluster much more */ |
023160678
|
577 |
#ifdef CONFIG_HOTPLUG_CPU |
1da177e4c
|
578 |
hotcpu_notifier(cpu_swap_callback, 0); |
023160678
|
579 |
#endif |
1da177e4c
|
580 |
} |