Commit 9617d95e6e9ffd883cf90a89724fe60d7ab22f9a

Authored by Nick Piggin
Committed by Linus Torvalds
1 parent 224abf92b2

[PATCH] mm: rmap optimisation

Optimise rmap functions by minimising atomic operations when we know there
will be no concurrent modifications.

Signed-off-by: Nick Piggin <npiggin@suse.de>
Cc: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>

Showing 4 changed files with 43 additions and 15 deletions Side-by-side Diff

... ... @@ -324,7 +324,7 @@
324 324 lru_cache_add_active(page);
325 325 set_pte_at(mm, address, pte, pte_mkdirty(pte_mkwrite(mk_pte(
326 326 page, vma->vm_page_prot))));
327   - page_add_anon_rmap(page, vma, address);
  327 + page_add_new_anon_rmap(page, vma, address);
328 328 pte_unmap_unlock(pte, ptl);
329 329  
330 330 /* no need for flush_tlb */
include/linux/rmap.h
... ... @@ -71,6 +71,7 @@
71 71 * rmap interfaces called when adding or removing pte of page
72 72 */
73 73 void page_add_anon_rmap(struct page *, struct vm_area_struct *, unsigned long);
  74 +void page_add_new_anon_rmap(struct page *, struct vm_area_struct *, unsigned long);
74 75 void page_add_file_rmap(struct page *);
75 76 void page_remove_rmap(struct page *);
76 77  
... ... @@ -1498,7 +1498,7 @@
1498 1498 update_mmu_cache(vma, address, entry);
1499 1499 lazy_mmu_prot_update(entry);
1500 1500 lru_cache_add_active(new_page);
1501   - page_add_anon_rmap(new_page, vma, address);
  1501 + page_add_new_anon_rmap(new_page, vma, address);
1502 1502  
1503 1503 /* Free the old page.. */
1504 1504 new_page = old_page;
... ... @@ -1978,7 +1978,7 @@
1978 1978 inc_mm_counter(mm, anon_rss);
1979 1979 lru_cache_add_active(page);
1980 1980 SetPageReferenced(page);
1981   - page_add_anon_rmap(page, vma, address);
  1981 + page_add_new_anon_rmap(page, vma, address);
1982 1982 } else {
1983 1983 /* Map the ZERO_PAGE - vm_page_prot is readonly */
1984 1984 page = ZERO_PAGE(address);
... ... @@ -2109,7 +2109,7 @@
2109 2109 if (anon) {
2110 2110 inc_mm_counter(mm, anon_rss);
2111 2111 lru_cache_add_active(new_page);
2112   - page_add_anon_rmap(new_page, vma, address);
  2112 + page_add_new_anon_rmap(new_page, vma, address);
2113 2113 } else {
2114 2114 inc_mm_counter(mm, file_rss);
2115 2115 page_add_file_rmap(new_page);
... ... @@ -435,6 +435,26 @@
435 435 }
436 436  
437 437 /**
  438 + * page_set_anon_rmap - setup new anonymous rmap
  439 + * @page: the page to add the mapping to
  440 + * @vma: the vm area in which the mapping is added
  441 + * @address: the user virtual address mapped
  442 + */
  443 +static void __page_set_anon_rmap(struct page *page,
  444 + struct vm_area_struct *vma, unsigned long address)
  445 +{
  446 + struct anon_vma *anon_vma = vma->anon_vma;
  447 +
  448 + BUG_ON(!anon_vma);
  449 + anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON;
  450 + page->mapping = (struct address_space *) anon_vma;
  451 +
  452 + page->index = linear_page_index(vma, address);
  453 +
  454 + inc_page_state(nr_mapped);
  455 +}
  456 +
  457 +/**
438 458 * page_add_anon_rmap - add pte mapping to an anonymous page
439 459 * @page: the page to add the mapping to
440 460 * @vma: the vm area in which the mapping is added
441 461  
... ... @@ -445,18 +465,25 @@
445 465 void page_add_anon_rmap(struct page *page,
446 466 struct vm_area_struct *vma, unsigned long address)
447 467 {
448   - if (atomic_inc_and_test(&page->_mapcount)) {
449   - struct anon_vma *anon_vma = vma->anon_vma;
450   -
451   - BUG_ON(!anon_vma);
452   - anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON;
453   - page->mapping = (struct address_space *) anon_vma;
454   -
455   - page->index = linear_page_index(vma, address);
456   -
457   - inc_page_state(nr_mapped);
458   - }
  468 + if (atomic_inc_and_test(&page->_mapcount))
  469 + __page_set_anon_rmap(page, vma, address);
459 470 /* else checking page index and mapping is racy */
  471 +}
  472 +
  473 +/*
  474 + * page_add_new_anon_rmap - add pte mapping to a new anonymous page
  475 + * @page: the page to add the mapping to
  476 + * @vma: the vm area in which the mapping is added
  477 + * @address: the user virtual address mapped
  478 + *
  479 + * Same as page_add_anon_rmap but must only be called on *new* pages.
  480 + * This means the inc-and-test can be bypassed.
  481 + */
  482 +void page_add_new_anon_rmap(struct page *page,
  483 + struct vm_area_struct *vma, unsigned long address)
  484 +{
  485 + atomic_set(&page->_mapcount, 0); /* elevate count by 1 (starts at -1) */
  486 + __page_set_anon_rmap(page, vma, address);
460 487 }
461 488  
462 489 /**