Commit a2c43eed8334e878702fca713b212ae2a11d84b9

Authored by Hugh Dickins
Committed by Linus Torvalds
1 parent 7b1fe59793

mm: try_to_free_swap replaces remove_exclusive_swap_page

remove_exclusive_swap_page(): its problem is in living up to its name.

It doesn't matter if someone else has a reference to the page (raised
page_count); it doesn't matter if the page is mapped into userspace
(raised page_mapcount - though that hints it may be worth keeping the
swap): all that matters is that there be no more references to the swap
(and no writeback in progress).

swapoff (try_to_unuse) has been removing pages from swapcache for years,
with no concern for page count or page mapcount, and we used to have a
comment in lookup_swap_cache() recognizing that: if you go for a page of
swapcache, you'll get the right page, but it could have been removed from
swapcache by the time you get page lock.

So, give up asking for exclusivity: get rid of
remove_exclusive_swap_page(), and remove_exclusive_swap_page_ref() and
remove_exclusive_swap_page_count() which were spawned for the recent LRU
work: replace them by the simpler try_to_free_swap() which just checks
page_swapcount().

Similarly, remove the page_count limitation from free_swap_and_count(),
but assume that it's worth holding on to the swap if page is mapped and
swap nowhere near full.  Add a vm_swap_full() test in free_swap_cache()?
It would be consistent, but I think we probably have enough for now.

Signed-off-by: Hugh Dickins <hugh@veritas.com>
Cc: Lee Schermerhorn <lee.schermerhorn@hp.com>
Cc: Rik van Riel <riel@redhat.com>
Cc: Nick Piggin <nickpiggin@yahoo.com.au>
Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Robin Holt <holt@sgi.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

Showing 7 changed files with 22 additions and 75 deletions Side-by-side Diff

include/linux/swap.h
... ... @@ -305,8 +305,7 @@
305 305 extern sector_t swapdev_block(int, pgoff_t);
306 306 extern struct swap_info_struct *get_swap_info_struct(unsigned);
307 307 extern int reuse_swap_page(struct page *);
308   -extern int remove_exclusive_swap_page(struct page *);
309   -extern int remove_exclusive_swap_page_ref(struct page *);
  308 +extern int try_to_free_swap(struct page *);
310 309 struct backing_dev_info;
311 310  
312 311 /* linux/mm/thrash.c */
... ... @@ -388,12 +387,7 @@
388 387  
389 388 #define reuse_swap_page(page) (page_mapcount(page) == 1)
390 389  
391   -static inline int remove_exclusive_swap_page(struct page *p)
392   -{
393   - return 0;
394   -}
395   -
396   -static inline int remove_exclusive_swap_page_ref(struct page *page)
  390 +static inline int try_to_free_swap(struct page *page)
397 391 {
398 392 return 0;
399 393 }
... ... @@ -2403,7 +2403,7 @@
2403 2403  
2404 2404 swap_free(entry);
2405 2405 if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
2406   - remove_exclusive_swap_page(page);
  2406 + try_to_free_swap(page);
2407 2407 unlock_page(page);
2408 2408  
2409 2409 if (write_access) {
... ... @@ -98,7 +98,7 @@
98 98 struct bio *bio;
99 99 int ret = 0, rw = WRITE;
100 100  
101   - if (remove_exclusive_swap_page(page)) {
  101 + if (try_to_free_swap(page)) {
102 102 unlock_page(page);
103 103 goto out;
104 104 }
... ... @@ -454,8 +454,7 @@
454 454 struct page *page = pvec->pages[i];
455 455  
456 456 if (PageSwapCache(page) && trylock_page(page)) {
457   - if (PageSwapCache(page))
458   - remove_exclusive_swap_page_ref(page);
  457 + try_to_free_swap(page);
459 458 unlock_page(page);
460 459 }
461 460 }
... ... @@ -195,14 +195,14 @@
195 195 * If we are the only user, then try to free up the swap cache.
196 196 *
197 197 * Its ok to check for PageSwapCache without the page lock
198   - * here because we are going to recheck again inside
199   - * exclusive_swap_page() _with_ the lock.
  198 + * here because we are going to recheck again inside
  199 + * try_to_free_swap() _with_ the lock.
200 200 * - Marcelo
201 201 */
202 202 static inline void free_swap_cache(struct page *page)
203 203 {
204   - if (PageSwapCache(page) && trylock_page(page)) {
205   - remove_exclusive_swap_page(page);
  204 + if (PageSwapCache(page) && !page_mapped(page) && trylock_page(page)) {
  205 + try_to_free_swap(page);
206 206 unlock_page(page);
207 207 }
208 208 }
... ... @@ -348,71 +348,26 @@
348 348 }
349 349  
350 350 /*
351   - * Work out if there are any other processes sharing this
352   - * swap cache page. Free it if you can. Return success.
  351 + * If swap is getting full, or if there are no more mappings of this page,
  352 + * then try_to_free_swap is called to free its swap space.
353 353 */
354   -static int remove_exclusive_swap_page_count(struct page *page, int count)
  354 +int try_to_free_swap(struct page *page)
355 355 {
356   - int retval;
357   - struct swap_info_struct * p;
358   - swp_entry_t entry;
359   -
360 356 VM_BUG_ON(!PageLocked(page));
361 357  
362 358 if (!PageSwapCache(page))
363 359 return 0;
364 360 if (PageWriteback(page))
365 361 return 0;
366   - if (page_count(page) != count) /* us + cache + ptes */
  362 + if (page_swapcount(page))
367 363 return 0;
368 364  
369   - entry.val = page_private(page);
370   - p = swap_info_get(entry);
371   - if (!p)
372   - return 0;
373   -
374   - /* Is the only swap cache user the cache itself? */
375   - retval = 0;
376   - if (p->swap_map[swp_offset(entry)] == 1) {
377   - /* Recheck the page count with the swapcache lock held.. */
378   - spin_lock_irq(&swapper_space.tree_lock);
379   - if ((page_count(page) == count) && !PageWriteback(page)) {
380   - __delete_from_swap_cache(page);
381   - SetPageDirty(page);
382   - retval = 1;
383   - }
384   - spin_unlock_irq(&swapper_space.tree_lock);
385   - }
386   - spin_unlock(&swap_lock);
387   -
388   - if (retval) {
389   - swap_free(entry);
390   - page_cache_release(page);
391   - }
392   -
393   - return retval;
  365 + delete_from_swap_cache(page);
  366 + SetPageDirty(page);
  367 + return 1;
394 368 }
395 369  
396 370 /*
397   - * Most of the time the page should have two references: one for the
398   - * process and one for the swap cache.
399   - */
400   -int remove_exclusive_swap_page(struct page *page)
401   -{
402   - return remove_exclusive_swap_page_count(page, 2);
403   -}
404   -
405   -/*
406   - * The pageout code holds an extra reference to the page. That raises
407   - * the reference count to test for to 2 for a page that is only in the
408   - * swap cache plus 1 for each process that maps the page.
409   - */
410   -int remove_exclusive_swap_page_ref(struct page *page)
411   -{
412   - return remove_exclusive_swap_page_count(page, 2 + page_mapcount(page));
413   -}
414   -
415   -/*
416 371 * Free the swap entry like above, but also try to
417 372 * free the page cache entry if it is the last user.
418 373 */
419 374  
... ... @@ -436,13 +391,12 @@
436 391 spin_unlock(&swap_lock);
437 392 }
438 393 if (page) {
439   - int one_user;
440   -
441   - one_user = (page_count(page) == 2);
442   - /* Only cache user (+us), or swap space full? Free it! */
443   - /* Also recheck PageSwapCache after page is locked (above) */
  394 + /*
  395 + * Not mapped elsewhere, or swap space full? Free it!
  396 + * Also recheck PageSwapCache now page is locked (above).
  397 + */
444 398 if (PageSwapCache(page) && !PageWriteback(page) &&
445   - (one_user || vm_swap_full())) {
  399 + (!page_mapped(page) || vm_swap_full())) {
446 400 delete_from_swap_cache(page);
447 401 SetPageDirty(page);
448 402 }
... ... @@ -759,7 +759,7 @@
759 759 activate_locked:
760 760 /* Not a candidate for swapping, so reclaim swap space. */
761 761 if (PageSwapCache(page) && vm_swap_full())
762   - remove_exclusive_swap_page_ref(page);
  762 + try_to_free_swap(page);
763 763 VM_BUG_ON(PageActive(page));
764 764 SetPageActive(page);
765 765 pgactivate++;