Commit 25ef0e50cca790370ad7838e3ad74db6a6a2d829

Authored by Johannes Weiner
Committed by Linus Torvalds
1 parent f488401076

mincore: pass ranges as start,end address pairs

Instead of passing a start address and a number of pages into the helper
functions, convert them to use a start and an end address.

Signed-off-by: Johannes Weiner <hannes@cmpxchg.org>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

Showing 1 changed file with 27 additions and 30 deletions Side-by-side Diff

... ... @@ -20,14 +20,12 @@
20 20 #include <asm/pgtable.h>
21 21  
22 22 static void mincore_hugetlb_page_range(struct vm_area_struct *vma,
23   - unsigned long addr, unsigned long nr,
  23 + unsigned long addr, unsigned long end,
24 24 unsigned char *vec)
25 25 {
26 26 #ifdef CONFIG_HUGETLB_PAGE
27 27 struct hstate *h;
28   - int i;
29 28  
30   - i = 0;
31 29 h = hstate_vma(vma);
32 30 while (1) {
33 31 unsigned char present;
34 32  
... ... @@ -40,10 +38,10 @@
40 38 addr & huge_page_mask(h));
41 39 present = ptep && !huge_pte_none(huge_ptep_get(ptep));
42 40 while (1) {
43   - vec[i++] = present;
  41 + *vec = present;
  42 + vec++;
44 43 addr += PAGE_SIZE;
45   - /* reach buffer limit */
46   - if (i == nr)
  44 + if (addr == end)
47 45 return;
48 46 /* check hugepage border */
49 47 if (!(addr & ~huge_page_mask(h)))
50 48  
... ... @@ -86,9 +84,10 @@
86 84 }
87 85  
88 86 static void mincore_unmapped_range(struct vm_area_struct *vma,
89   - unsigned long addr, unsigned long nr,
  87 + unsigned long addr, unsigned long end,
90 88 unsigned char *vec)
91 89 {
  90 + unsigned long nr = (end - addr) >> PAGE_SHIFT;
92 91 int i;
93 92  
94 93 if (vma->vm_file) {
95 94  
96 95  
97 96  
98 97  
99 98  
100 99  
101 100  
102 101  
103 102  
104 103  
105 104  
... ... @@ -104,42 +103,44 @@
104 103 }
105 104  
106 105 static void mincore_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
107   - unsigned long addr, unsigned long nr,
  106 + unsigned long addr, unsigned long end,
108 107 unsigned char *vec)
109 108 {
  109 + unsigned long next;
110 110 spinlock_t *ptl;
111 111 pte_t *ptep;
112   - int i;
113 112  
114 113 ptep = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
115   - for (i = 0; i < nr; i++, ptep++, addr += PAGE_SIZE) {
  114 + do {
116 115 pte_t pte = *ptep;
117 116 pgoff_t pgoff;
118 117  
  118 + next = addr + PAGE_SIZE;
119 119 if (pte_none(pte))
120   - mincore_unmapped_range(vma, addr, 1, vec);
  120 + mincore_unmapped_range(vma, addr, next, vec);
121 121 else if (pte_present(pte))
122   - vec[i] = 1;
  122 + *vec = 1;
123 123 else if (pte_file(pte)) {
124 124 pgoff = pte_to_pgoff(pte);
125   - vec[i] = mincore_page(vma->vm_file->f_mapping, pgoff);
  125 + *vec = mincore_page(vma->vm_file->f_mapping, pgoff);
126 126 } else { /* pte is a swap entry */
127 127 swp_entry_t entry = pte_to_swp_entry(pte);
128 128  
129 129 if (is_migration_entry(entry)) {
130 130 /* migration entries are always uptodate */
131   - vec[i] = 1;
  131 + *vec = 1;
132 132 } else {
133 133 #ifdef CONFIG_SWAP
134 134 pgoff = entry.val;
135   - vec[i] = mincore_page(&swapper_space, pgoff);
  135 + *vec = mincore_page(&swapper_space, pgoff);
136 136 #else
137 137 WARN_ON(1);
138   - vec[i] = 1;
  138 + *vec = 1;
139 139 #endif
140 140 }
141 141 }
142   - }
  142 + vec++;
  143 + } while (ptep++, addr = next, addr != end);
143 144 pte_unmap_unlock(ptep - 1, ptl);
144 145 }
145 146  
146 147  
147 148  
148 149  
149 150  
... ... @@ -153,25 +154,21 @@
153 154 pgd_t *pgd;
154 155 pud_t *pud;
155 156 pmd_t *pmd;
156   - unsigned long nr;
157 157 struct vm_area_struct *vma;
  158 + unsigned long end;
158 159  
159 160 vma = find_vma(current->mm, addr);
160 161 if (!vma || addr < vma->vm_start)
161 162 return -ENOMEM;
162 163  
163   - nr = min(pages, (vma->vm_end - addr) >> PAGE_SHIFT);
  164 + end = min(vma->vm_end, addr + (pages << PAGE_SHIFT));
164 165  
165 166 if (is_vm_hugetlb_page(vma)) {
166   - mincore_hugetlb_page_range(vma, addr, nr, vec);
167   - return nr;
  167 + mincore_hugetlb_page_range(vma, addr, end, vec);
  168 + return (end - addr) >> PAGE_SHIFT;
168 169 }
169 170  
170   - /*
171   - * Calculate how many pages there are left in the last level of the
172   - * PTE array for our address.
173   - */
174   - nr = min(nr, PTRS_PER_PTE - ((addr >> PAGE_SHIFT) & (PTRS_PER_PTE-1)));
  171 + end = pmd_addr_end(addr, end);
175 172  
176 173 pgd = pgd_offset(vma->vm_mm, addr);
177 174 if (pgd_none_or_clear_bad(pgd))
178 175  
... ... @@ -183,12 +180,12 @@
183 180 if (pmd_none_or_clear_bad(pmd))
184 181 goto none_mapped;
185 182  
186   - mincore_pte_range(vma, pmd, addr, nr, vec);
187   - return nr;
  183 + mincore_pte_range(vma, pmd, addr, end, vec);
  184 + return (end - addr) >> PAGE_SHIFT;
188 185  
189 186 none_mapped:
190   - mincore_unmapped_range(vma, addr, nr, vec);
191   - return nr;
  187 + mincore_unmapped_range(vma, addr, end, vec);
  188 + return (end - addr) >> PAGE_SHIFT;
192 189 }
193 190  
194 191 /*