Commit 746b18d421da7f27e948e8af1ad82b6d0309324d

Authored by Peter Zijlstra
Committed by Linus Torvalds
1 parent 6111e4ca68

mm: use refcounts for page_lock_anon_vma()

Convert page_lock_anon_vma() over to use refcounts.  This is done to
prepare for the conversion of anon_vma from spinlock to mutex.

Sadly this inceases the cost of page_lock_anon_vma() from one to two
atomics, a follow up patch addresses this, lets keep that simple for now.

Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Reviewed-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Reviewed-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Acked-by: Hugh Dickins <hughd@google.com>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: David Miller <davem@davemloft.net>
Cc: Martin Schwidefsky <schwidefsky@de.ibm.com>
Cc: Russell King <rmk@arm.linux.org.uk>
Cc: Paul Mundt <lethal@linux-sh.org>
Cc: Jeff Dike <jdike@addtoit.com>
Cc: Richard Weinberger <richard@nod.at>
Cc: Tony Luck <tony.luck@intel.com>
Cc: Mel Gorman <mel@csn.ul.ie>
Cc: Nick Piggin <npiggin@kernel.dk>
Cc: Namhyung Kim <namhyung@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

Showing 2 changed files with 31 additions and 28 deletions Side-by-side Diff

... ... @@ -721,15 +721,11 @@
721 721 * Only page_lock_anon_vma() understands the subtleties of
722 722 * getting a hold on an anon_vma from outside one of its mms.
723 723 */
724   - anon_vma = page_lock_anon_vma(page);
  724 + anon_vma = page_get_anon_vma(page);
725 725 if (anon_vma) {
726 726 /*
727   - * Take a reference count on the anon_vma if the
728   - * page is mapped so that it is guaranteed to
729   - * exist when the page is remapped later
  727 + * Anon page
730 728 */
731   - get_anon_vma(anon_vma);
732   - page_unlock_anon_vma(anon_vma);
733 729 } else if (PageSwapCache(page)) {
734 730 /*
735 731 * We cannot be sure that the anon_vma of an unmapped
... ... @@ -857,13 +853,8 @@
857 853 lock_page(hpage);
858 854 }
859 855  
860   - if (PageAnon(hpage)) {
861   - anon_vma = page_lock_anon_vma(hpage);
862   - if (anon_vma) {
863   - get_anon_vma(anon_vma);
864   - page_unlock_anon_vma(anon_vma);
865   - }
866   - }
  856 + if (PageAnon(hpage))
  857 + anon_vma = page_get_anon_vma(hpage);
867 858  
868 859 try_to_unmap(hpage, TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS);
869 860  
... ... @@ -337,9 +337,9 @@
337 337 * that the anon_vma pointer from page->mapping is valid if there is a
338 338 * mapcount, we can dereference the anon_vma after observing those.
339 339 */
340   -struct anon_vma *page_lock_anon_vma(struct page *page)
  340 +struct anon_vma *page_get_anon_vma(struct page *page)
341 341 {
342   - struct anon_vma *anon_vma, *root_anon_vma;
  342 + struct anon_vma *anon_vma = NULL;
343 343 unsigned long anon_mapping;
344 344  
345 345 rcu_read_lock();
346 346  
347 347  
348 348  
349 349  
350 350  
... ... @@ -350,30 +350,42 @@
350 350 goto out;
351 351  
352 352 anon_vma = (struct anon_vma *) (anon_mapping - PAGE_MAPPING_ANON);
353   - root_anon_vma = ACCESS_ONCE(anon_vma->root);
354   - spin_lock(&root_anon_vma->lock);
  353 + if (!atomic_inc_not_zero(&anon_vma->refcount)) {
  354 + anon_vma = NULL;
  355 + goto out;
  356 + }
355 357  
356 358 /*
357 359 * If this page is still mapped, then its anon_vma cannot have been
358   - * freed. But if it has been unmapped, we have no security against
359   - * the anon_vma structure being freed and reused (for another anon_vma:
360   - * SLAB_DESTROY_BY_RCU guarantees that - so the spin_lock above cannot
361   - * corrupt): with anon_vma_prepare() or anon_vma_fork() redirecting
362   - * anon_vma->root before page_unlock_anon_vma() is called to unlock.
  360 + * freed. But if it has been unmapped, we have no security against the
  361 + * anon_vma structure being freed and reused (for another anon_vma:
  362 + * SLAB_DESTROY_BY_RCU guarantees that - so the atomic_inc_not_zero()
  363 + * above cannot corrupt).
363 364 */
364   - if (page_mapped(page))
365   - return anon_vma;
366   -
367   - spin_unlock(&root_anon_vma->lock);
  365 + if (!page_mapped(page)) {
  366 + put_anon_vma(anon_vma);
  367 + anon_vma = NULL;
  368 + }
368 369 out:
369 370 rcu_read_unlock();
370   - return NULL;
  371 +
  372 + return anon_vma;
371 373 }
372 374  
  375 +struct anon_vma *page_lock_anon_vma(struct page *page)
  376 +{
  377 + struct anon_vma *anon_vma = page_get_anon_vma(page);
  378 +
  379 + if (anon_vma)
  380 + anon_vma_lock(anon_vma);
  381 +
  382 + return anon_vma;
  383 +}
  384 +
373 385 void page_unlock_anon_vma(struct anon_vma *anon_vma)
374 386 {
375 387 anon_vma_unlock(anon_vma);
376   - rcu_read_unlock();
  388 + put_anon_vma(anon_vma);
377 389 }
378 390  
379 391 /*