Commit 051ac83adf69eea4f57a97356e4282e395a5fa6d

Authored by Joonsoo Kim
Committed by Linus Torvalds
1 parent faecd8dd85

mm/rmap: make rmap_walk to get the rmap_walk_control argument

In each rmap traverse case, there is some difference so that we need
function pointers and arguments to them in order to handle these

For this purpose, struct rmap_walk_control is introduced in this patch,
and will be extended in following patch.  Introducing and extending are
separate, because it clarify changes.

Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Reviewed-by: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Hugh Dickins <hughd@google.com>
Cc: Rik van Riel <riel@redhat.com>
Cc: Ingo Molnar <mingo@kernel.org>
Cc: Hillf Danton <dhillf@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

Showing 5 changed files with 27 additions and 21 deletions Side-by-side Diff

... ... @@ -76,8 +76,7 @@
76 76 int page_referenced_ksm(struct page *page,
77 77 struct mem_cgroup *memcg, unsigned long *vm_flags);
78 78 int try_to_unmap_ksm(struct page *page, enum ttu_flags flags);
79   -int rmap_walk_ksm(struct page *page, int (*rmap_one)(struct page *,
80   - struct vm_area_struct *, unsigned long, void *), void *arg);
  79 +int rmap_walk_ksm(struct page *page, struct rmap_walk_control *rwc);
81 80 void ksm_migrate_page(struct page *newpage, struct page *oldpage);
82 81  
83 82 #else /* !CONFIG_KSM */
... ... @@ -120,8 +119,8 @@
120 119 return 0;
121 120 }
122 121  
123   -static inline int rmap_walk_ksm(struct page *page, int (*rmap_one)(struct page*,
124   - struct vm_area_struct *, unsigned long, void *), void *arg)
  122 +static inline int rmap_walk_ksm(struct page *page,
  123 + struct rmap_walk_control *rwc)
125 124 {
126 125 return 0;
127 126 }
include/linux/rmap.h
... ... @@ -235,11 +235,16 @@
235 235 void page_unlock_anon_vma_read(struct anon_vma *anon_vma);
236 236 int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma);
237 237  
  238 +struct rmap_walk_control {
  239 + void *arg;
  240 + int (*rmap_one)(struct page *page, struct vm_area_struct *vma,
  241 + unsigned long addr, void *arg);
  242 +};
  243 +
238 244 /*
239 245 * Called by migrate.c to remove migration ptes, but might be used more later.
240 246 */
241   -int rmap_walk(struct page *page, int (*rmap_one)(struct page *,
242   - struct vm_area_struct *, unsigned long, void *), void *arg);
  247 +int rmap_walk(struct page *page, struct rmap_walk_control *rwc);
243 248  
244 249 #else /* !CONFIG_MMU */
245 250  
... ... @@ -1997,8 +1997,7 @@
1997 1997 }
1998 1998  
1999 1999 #ifdef CONFIG_MIGRATION
2000   -int rmap_walk_ksm(struct page *page, int (*rmap_one)(struct page *,
2001   - struct vm_area_struct *, unsigned long, void *), void *arg)
  2000 +int rmap_walk_ksm(struct page *page, struct rmap_walk_control *rwc)
2002 2001 {
2003 2002 struct stable_node *stable_node;
2004 2003 struct rmap_item *rmap_item;
... ... @@ -2033,7 +2032,8 @@
2033 2032 if ((rmap_item->mm == vma->vm_mm) == search_new_forks)
2034 2033 continue;
2035 2034  
2036   - ret = rmap_one(page, vma, rmap_item->address, arg);
  2035 + ret = rwc->rmap_one(page, vma,
  2036 + rmap_item->address, rwc->arg);
2037 2037 if (ret != SWAP_AGAIN) {
2038 2038 anon_vma_unlock_read(anon_vma);
2039 2039 goto out;
... ... @@ -199,7 +199,12 @@
199 199 */
200 200 static void remove_migration_ptes(struct page *old, struct page *new)
201 201 {
202   - rmap_walk(new, remove_migration_pte, old);
  202 + struct rmap_walk_control rwc = {
  203 + .rmap_one = remove_migration_pte,
  204 + .arg = old,
  205 + };
  206 +
  207 + rmap_walk(new, &rwc);
203 208 }
204 209  
205 210 /*
... ... @@ -1706,8 +1706,7 @@
1706 1706 * rmap_walk() and its helpers rmap_walk_anon() and rmap_walk_file():
1707 1707 * Called by migrate.c to remove migration ptes, but might be used more later.
1708 1708 */
1709   -static int rmap_walk_anon(struct page *page, int (*rmap_one)(struct page *,
1710   - struct vm_area_struct *, unsigned long, void *), void *arg)
  1709 +static int rmap_walk_anon(struct page *page, struct rmap_walk_control *rwc)
1711 1710 {
1712 1711 struct anon_vma *anon_vma;
1713 1712 pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
... ... @@ -1721,7 +1720,7 @@
1721 1720 anon_vma_interval_tree_foreach(avc, &anon_vma->rb_root, pgoff, pgoff) {
1722 1721 struct vm_area_struct *vma = avc->vma;
1723 1722 unsigned long address = vma_address(page, vma);
1724   - ret = rmap_one(page, vma, address, arg);
  1723 + ret = rwc->rmap_one(page, vma, address, rwc->arg);
1725 1724 if (ret != SWAP_AGAIN)
1726 1725 break;
1727 1726 }
... ... @@ -1729,8 +1728,7 @@
1729 1728 return ret;
1730 1729 }
1731 1730  
1732   -static int rmap_walk_file(struct page *page, int (*rmap_one)(struct page *,
1733   - struct vm_area_struct *, unsigned long, void *), void *arg)
  1731 +static int rmap_walk_file(struct page *page, struct rmap_walk_control *rwc)
1734 1732 {
1735 1733 struct address_space *mapping = page->mapping;
1736 1734 pgoff_t pgoff = page->index << compound_order(page);
... ... @@ -1742,7 +1740,7 @@
1742 1740 mutex_lock(&mapping->i_mmap_mutex);
1743 1741 vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) {
1744 1742 unsigned long address = vma_address(page, vma);
1745   - ret = rmap_one(page, vma, address, arg);
  1743 + ret = rwc->rmap_one(page, vma, address, rwc->arg);
1746 1744 if (ret != SWAP_AGAIN)
1747 1745 break;
1748 1746 }
1749 1747  
1750 1748  
1751 1749  
... ... @@ -1755,17 +1753,16 @@
1755 1753 return ret;
1756 1754 }
1757 1755  
1758   -int rmap_walk(struct page *page, int (*rmap_one)(struct page *,
1759   - struct vm_area_struct *, unsigned long, void *), void *arg)
  1756 +int rmap_walk(struct page *page, struct rmap_walk_control *rwc)
1760 1757 {
1761 1758 VM_BUG_ON(!PageLocked(page));
1762 1759  
1763 1760 if (unlikely(PageKsm(page)))
1764   - return rmap_walk_ksm(page, rmap_one, arg);
  1761 + return rmap_walk_ksm(page, rwc);
1765 1762 else if (PageAnon(page))
1766   - return rmap_walk_anon(page, rmap_one, arg);
  1763 + return rmap_walk_anon(page, rwc);
1767 1764 else
1768   - return rmap_walk_file(page, rmap_one, arg);
  1765 + return rmap_walk_file(page, rwc);
1769 1766 }
1770 1767 #endif /* CONFIG_MIGRATION */
1771 1768