Commit ba470de43188cdbff795b5da43a1474523c6c2fb

Authored by Rik van Riel
Committed by Linus Torvalds
1 parent 8edb08caf6

mmap: handle mlocked pages during map, remap, unmap

Originally by Nick Piggin <npiggin@suse.de>

Remove mlocked pages from the LRU using "unevictable infrastructure"
during mmap(), munmap(), mremap() and truncate().  Try to move back to
normal LRU lists on munmap() when last mlocked mapping removed.  Remove
PageMlocked() status when page truncated from file.

[akpm@linux-foundation.org: cleanup]
[kamezawa.hiroyu@jp.fujitsu.com: fix double unlock_page()]
[kosaki.motohiro@jp.fujitsu.com: split LRU: munlock rework]
[lee.schermerhorn@hp.com: mlock: fix __mlock_vma_pages_range comment block]
[akpm@linux-foundation.org: remove bogus kerneldoc token]
Signed-off-by: Nick Piggin <npiggin@suse.de>
Signed-off-by: Lee Schermerhorn <lee.schermerhorn@hp.com>
Signed-off-by: Rik van Riel <riel@redhat.com>
Signed-off-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Signed-off-by: KAMEZAWA Hiroyuki <kamewzawa.hiroyu@jp.fujitsu.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

Showing 6 changed files with 180 additions and 160 deletions Side-by-side Diff

... ... @@ -21,6 +21,8 @@
21 21 #include <asm/cacheflush.h>
22 22 #include <asm/tlbflush.h>
23 23  
  24 +#include "internal.h"
  25 +
24 26 static void zap_pte(struct mm_struct *mm, struct vm_area_struct *vma,
25 27 unsigned long addr, pte_t *ptep)
26 28 {
27 29  
28 30  
... ... @@ -215,15 +217,31 @@
215 217 spin_unlock(&mapping->i_mmap_lock);
216 218 }
217 219  
  220 + if (vma->vm_flags & VM_LOCKED) {
  221 + /*
  222 + * drop PG_Mlocked flag for over-mapped range
  223 + */
  224 + unsigned int saved_flags = vma->vm_flags;
  225 + munlock_vma_pages_range(vma, start, start + size);
  226 + vma->vm_flags = saved_flags;
  227 + }
  228 +
218 229 mmu_notifier_invalidate_range_start(mm, start, start + size);
219 230 err = populate_range(mm, vma, start, size, pgoff);
220 231 mmu_notifier_invalidate_range_end(mm, start, start + size);
221 232 if (!err && !(flags & MAP_NONBLOCK)) {
222   - if (unlikely(has_write_lock)) {
223   - downgrade_write(&mm->mmap_sem);
224   - has_write_lock = 0;
  233 + if (vma->vm_flags & VM_LOCKED) {
  234 + /*
  235 + * might be mapping previously unmapped range of file
  236 + */
  237 + mlock_vma_pages_range(vma, start, start + size);
  238 + } else {
  239 + if (unlikely(has_write_lock)) {
  240 + downgrade_write(&mm->mmap_sem);
  241 + has_write_lock = 0;
  242 + }
  243 + make_pages_present(start, start+size);
225 244 }
226   - make_pages_present(start, start+size);
227 245 }
228 246  
229 247 /*
... ... @@ -61,9 +61,14 @@
61 61 return page_private(page);
62 62 }
63 63  
64   -extern int mlock_vma_pages_range(struct vm_area_struct *vma,
  64 +extern long mlock_vma_pages_range(struct vm_area_struct *vma,
65 65 unsigned long start, unsigned long end);
66   -extern void munlock_vma_pages_all(struct vm_area_struct *vma);
  66 +extern void munlock_vma_pages_range(struct vm_area_struct *vma,
  67 + unsigned long start, unsigned long end);
  68 +static inline void munlock_vma_pages_all(struct vm_area_struct *vma)
  69 +{
  70 + munlock_vma_pages_range(vma, vma->vm_start, vma->vm_end);
  71 +}
67 72  
68 73 #ifdef CONFIG_UNEVICTABLE_LRU
69 74 /*
... ... @@ -112,27 +112,50 @@
112 112 }
113 113 }
114 114  
115   -/*
116   - * mlock a range of pages in the vma.
  115 +/**
  116 + * __mlock_vma_pages_range() - mlock/munlock a range of pages in the vma.
  117 + * @vma: target vma
  118 + * @start: start address
  119 + * @end: end address
  120 + * @mlock: 0 indicate munlock, otherwise mlock.
117 121 *
118   - * This takes care of making the pages present too.
  122 + * If @mlock == 0, unlock an mlocked range;
  123 + * else mlock the range of pages. This takes care of making the pages present ,
  124 + * too.
119 125 *
120   - * vma->vm_mm->mmap_sem must be held for write.
  126 + * return 0 on success, negative error code on error.
  127 + *
  128 + * vma->vm_mm->mmap_sem must be held for at least read.
121 129 */
122   -static int __mlock_vma_pages_range(struct vm_area_struct *vma,
123   - unsigned long start, unsigned long end)
  130 +static long __mlock_vma_pages_range(struct vm_area_struct *vma,
  131 + unsigned long start, unsigned long end,
  132 + int mlock)
124 133 {
125 134 struct mm_struct *mm = vma->vm_mm;
126 135 unsigned long addr = start;
127 136 struct page *pages[16]; /* 16 gives a reasonable batch */
128   - int write = !!(vma->vm_flags & VM_WRITE);
129 137 int nr_pages = (end - start) / PAGE_SIZE;
130 138 int ret;
  139 + int gup_flags = 0;
131 140  
132   - VM_BUG_ON(start & ~PAGE_MASK || end & ~PAGE_MASK);
133   - VM_BUG_ON(start < vma->vm_start || end > vma->vm_end);
134   - VM_BUG_ON(!rwsem_is_locked(&vma->vm_mm->mmap_sem));
  141 + VM_BUG_ON(start & ~PAGE_MASK);
  142 + VM_BUG_ON(end & ~PAGE_MASK);
  143 + VM_BUG_ON(start < vma->vm_start);
  144 + VM_BUG_ON(end > vma->vm_end);
  145 + VM_BUG_ON((!rwsem_is_locked(&mm->mmap_sem)) &&
  146 + (atomic_read(&mm->mm_users) != 0));
135 147  
  148 + /*
  149 + * mlock: don't page populate if page has PROT_NONE permission.
  150 + * munlock: the pages always do munlock althrough
  151 + * its has PROT_NONE permission.
  152 + */
  153 + if (!mlock)
  154 + gup_flags |= GUP_FLAGS_IGNORE_VMA_PERMISSIONS;
  155 +
  156 + if (vma->vm_flags & VM_WRITE)
  157 + gup_flags |= GUP_FLAGS_WRITE;
  158 +
136 159 lru_add_drain_all(); /* push cached pages to LRU */
137 160  
138 161 while (nr_pages > 0) {
139 162  
... ... @@ -146,9 +169,9 @@
146 169 * disable migration of this page. However, page may
147 170 * still be truncated out from under us.
148 171 */
149   - ret = get_user_pages(current, mm, addr,
  172 + ret = __get_user_pages(current, mm, addr,
150 173 min_t(int, nr_pages, ARRAY_SIZE(pages)),
151   - write, 0, pages, NULL);
  174 + gup_flags, pages, NULL);
152 175 /*
153 176 * This can happen for, e.g., VM_NONLINEAR regions before
154 177 * a page has been allocated and mapped at a given offset,
... ... @@ -178,8 +201,12 @@
178 201 * by the elevated reference, we need only check for
179 202 * page truncation (file-cache only).
180 203 */
181   - if (page->mapping)
182   - mlock_vma_page(page);
  204 + if (page->mapping) {
  205 + if (mlock)
  206 + mlock_vma_page(page);
  207 + else
  208 + munlock_vma_page(page);
  209 + }
183 210 unlock_page(page);
184 211 put_page(page); /* ref from get_user_pages() */
185 212  
186 213  
187 214  
188 215  
189 216  
190 217  
... ... @@ -197,125 +224,38 @@
197 224 return 0; /* count entire vma as locked_vm */
198 225 }
199 226  
200   -/*
201   - * private structure for munlock page table walk
202   - */
203   -struct munlock_page_walk {
204   - struct vm_area_struct *vma;
205   - pmd_t *pmd; /* for migration_entry_wait() */
206   -};
207   -
208   -/*
209   - * munlock normal pages for present ptes
210   - */
211   -static int __munlock_pte_handler(pte_t *ptep, unsigned long addr,
212   - unsigned long end, struct mm_walk *walk)
213   -{
214   - struct munlock_page_walk *mpw = walk->private;
215   - swp_entry_t entry;
216   - struct page *page;
217   - pte_t pte;
218   -
219   -retry:
220   - pte = *ptep;
221   - /*
222   - * If it's a swap pte, we might be racing with page migration.
223   - */
224   - if (unlikely(!pte_present(pte))) {
225   - if (!is_swap_pte(pte))
226   - goto out;
227   - entry = pte_to_swp_entry(pte);
228   - if (is_migration_entry(entry)) {
229   - migration_entry_wait(mpw->vma->vm_mm, mpw->pmd, addr);
230   - goto retry;
231   - }
232   - goto out;
233   - }
234   -
235   - page = vm_normal_page(mpw->vma, addr, pte);
236   - if (!page)
237   - goto out;
238   -
239   - lock_page(page);
240   - if (!page->mapping) {
241   - unlock_page(page);
242   - goto retry;
243   - }
244   - munlock_vma_page(page);
245   - unlock_page(page);
246   -
247   -out:
248   - return 0;
249   -}
250   -
251   -/*
252   - * Save pmd for pte handler for waiting on migration entries
253   - */
254   -static int __munlock_pmd_handler(pmd_t *pmd, unsigned long addr,
255   - unsigned long end, struct mm_walk *walk)
256   -{
257   - struct munlock_page_walk *mpw = walk->private;
258   -
259   - mpw->pmd = pmd;
260   - return 0;
261   -}
262   -
263   -
264   -/*
265   - * munlock a range of pages in the vma using standard page table walk.
266   - *
267   - * vma->vm_mm->mmap_sem must be held for write.
268   - */
269   -static void __munlock_vma_pages_range(struct vm_area_struct *vma,
270   - unsigned long start, unsigned long end)
271   -{
272   - struct mm_struct *mm = vma->vm_mm;
273   - struct munlock_page_walk mpw = {
274   - .vma = vma,
275   - };
276   - struct mm_walk munlock_page_walk = {
277   - .pmd_entry = __munlock_pmd_handler,
278   - .pte_entry = __munlock_pte_handler,
279   - .private = &mpw,
280   - .mm = mm,
281   - };
282   -
283   - VM_BUG_ON(start & ~PAGE_MASK || end & ~PAGE_MASK);
284   - VM_BUG_ON(!rwsem_is_locked(&vma->vm_mm->mmap_sem));
285   - VM_BUG_ON(start < vma->vm_start);
286   - VM_BUG_ON(end > vma->vm_end);
287   -
288   - lru_add_drain_all(); /* push cached pages to LRU */
289   - walk_page_range(start, end, &munlock_page_walk);
290   - lru_add_drain_all(); /* to update stats */
291   -}
292   -
293 227 #else /* CONFIG_UNEVICTABLE_LRU */
294 228  
295 229 /*
296 230 * Just make pages present if VM_LOCKED. No-op if unlocking.
297 231 */
298   -static int __mlock_vma_pages_range(struct vm_area_struct *vma,
299   - unsigned long start, unsigned long end)
  232 +static long __mlock_vma_pages_range(struct vm_area_struct *vma,
  233 + unsigned long start, unsigned long end,
  234 + int mlock)
300 235 {
301   - if (vma->vm_flags & VM_LOCKED)
  236 + if (mlock && (vma->vm_flags & VM_LOCKED))
302 237 make_pages_present(start, end);
303 238 return 0;
304 239 }
305   -
306   -/*
307   - * munlock a range of pages in the vma -- no-op.
308   - */
309   -static void __munlock_vma_pages_range(struct vm_area_struct *vma,
310   - unsigned long start, unsigned long end)
311   -{
312   -}
313 240 #endif /* CONFIG_UNEVICTABLE_LRU */
314 241  
315   -/*
316   - * mlock all pages in this vma range. For mmap()/mremap()/...
  242 +/**
  243 + * mlock_vma_pages_range() - mlock pages in specified vma range.
  244 + * @vma - the vma containing the specfied address range
  245 + * @start - starting address in @vma to mlock
  246 + * @end - end address [+1] in @vma to mlock
  247 + *
  248 + * For mmap()/mremap()/expansion of mlocked vma.
  249 + *
  250 + * return 0 on success for "normal" vmas.
  251 + *
  252 + * return number of pages [> 0] to be removed from locked_vm on success
  253 + * of "special" vmas.
  254 + *
  255 + * return negative error if vma spanning @start-@range disappears while
  256 + * mmap semaphore is dropped. Unlikely?
317 257 */
318   -int mlock_vma_pages_range(struct vm_area_struct *vma,
  258 +long mlock_vma_pages_range(struct vm_area_struct *vma,
319 259 unsigned long start, unsigned long end)
320 260 {
321 261 struct mm_struct *mm = vma->vm_mm;
322 262  
323 263  
324 264  
... ... @@ -331,17 +271,20 @@
331 271 if (!((vma->vm_flags & (VM_DONTEXPAND | VM_RESERVED)) ||
332 272 is_vm_hugetlb_page(vma) ||
333 273 vma == get_gate_vma(current))) {
  274 + long error;
334 275 downgrade_write(&mm->mmap_sem);
335   - nr_pages = __mlock_vma_pages_range(vma, start, end);
336 276  
  277 + error = __mlock_vma_pages_range(vma, start, end, 1);
  278 +
337 279 up_read(&mm->mmap_sem);
338 280 /* vma can change or disappear */
339 281 down_write(&mm->mmap_sem);
340 282 vma = find_vma(mm, start);
341 283 /* non-NULL vma must contain @start, but need to check @end */
342 284 if (!vma || end > vma->vm_end)
343   - return -EAGAIN;
344   - return nr_pages;
  285 + return -ENOMEM;
  286 +
  287 + return 0; /* hide other errors from mmap(), et al */
345 288 }
346 289  
347 290 /*
348 291  
349 292  
350 293  
... ... @@ -356,17 +299,33 @@
356 299  
357 300 no_mlock:
358 301 vma->vm_flags &= ~VM_LOCKED; /* and don't come back! */
359   - return nr_pages; /* pages NOT mlocked */
  302 + return nr_pages; /* error or pages NOT mlocked */
360 303 }
361 304  
362 305  
363 306 /*
364   - * munlock all pages in vma. For munmap() and exit().
  307 + * munlock_vma_pages_range() - munlock all pages in the vma range.'
  308 + * @vma - vma containing range to be munlock()ed.
  309 + * @start - start address in @vma of the range
  310 + * @end - end of range in @vma.
  311 + *
  312 + * For mremap(), munmap() and exit().
  313 + *
  314 + * Called with @vma VM_LOCKED.
  315 + *
  316 + * Returns with VM_LOCKED cleared. Callers must be prepared to
  317 + * deal with this.
  318 + *
  319 + * We don't save and restore VM_LOCKED here because pages are
  320 + * still on lru. In unmap path, pages might be scanned by reclaim
  321 + * and re-mlocked by try_to_{munlock|unmap} before we unmap and
  322 + * free them. This will result in freeing mlocked pages.
365 323 */
366   -void munlock_vma_pages_all(struct vm_area_struct *vma)
  324 +void munlock_vma_pages_range(struct vm_area_struct *vma,
  325 + unsigned long start, unsigned long end)
367 326 {
368 327 vma->vm_flags &= ~VM_LOCKED;
369   - __munlock_vma_pages_range(vma, vma->vm_start, vma->vm_end);
  328 + __mlock_vma_pages_range(vma, start, end, 0);
370 329 }
371 330  
372 331 /*
... ... @@ -443,7 +402,7 @@
443 402 */
444 403 downgrade_write(&mm->mmap_sem);
445 404  
446   - ret = __mlock_vma_pages_range(vma, start, end);
  405 + ret = __mlock_vma_pages_range(vma, start, end, 1);
447 406 if (ret > 0) {
448 407 mm->locked_vm -= ret;
449 408 ret = 0;
... ... @@ -460,7 +419,7 @@
460 419 *prev = find_vma(mm, start);
461 420 /* non-NULL *prev must contain @start, but need to check @end */
462 421 if (!(*prev) || end > (*prev)->vm_end)
463   - ret = -EAGAIN;
  422 + ret = -ENOMEM;
464 423 } else {
465 424 /*
466 425 * TODO: for unlocking, pages will already be resident, so
... ... @@ -469,7 +428,7 @@
469 428 * while. Should we downgrade the semaphore for both lock
470 429 * AND unlock ?
471 430 */
472   - __munlock_vma_pages_range(vma, start, end);
  431 + __mlock_vma_pages_range(vma, start, end, 0);
473 432 }
474 433  
475 434 out:
... ... @@ -970,6 +970,7 @@
970 970 return -EPERM;
971 971 vm_flags |= VM_LOCKED;
972 972 }
  973 +
973 974 /* mlock MCL_FUTURE? */
974 975 if (vm_flags & VM_LOCKED) {
975 976 unsigned long locked, lock_limit;
... ... @@ -1137,10 +1138,12 @@
1137 1138 * The VM_SHARED test is necessary because shmem_zero_setup
1138 1139 * will create the file object for a shared anonymous map below.
1139 1140 */
1140   - if (!file && !(vm_flags & VM_SHARED) &&
1141   - vma_merge(mm, prev, addr, addr + len, vm_flags,
1142   - NULL, NULL, pgoff, NULL))
1143   - goto out;
  1141 + if (!file && !(vm_flags & VM_SHARED)) {
  1142 + vma = vma_merge(mm, prev, addr, addr + len, vm_flags,
  1143 + NULL, NULL, pgoff, NULL);
  1144 + if (vma)
  1145 + goto out;
  1146 + }
1144 1147  
1145 1148 /*
1146 1149 * Determine the object being mapped and call the appropriate
1147 1150  
... ... @@ -1222,11 +1225,15 @@
1222 1225 mm->total_vm += len >> PAGE_SHIFT;
1223 1226 vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT);
1224 1227 if (vm_flags & VM_LOCKED) {
1225   - mm->locked_vm += len >> PAGE_SHIFT;
  1228 + /*
  1229 + * makes pages present; downgrades, drops, reacquires mmap_sem
  1230 + */
  1231 + long nr_pages = mlock_vma_pages_range(vma, addr, addr + len);
  1232 + if (nr_pages < 0)
  1233 + return nr_pages; /* vma gone! */
  1234 + mm->locked_vm += (len >> PAGE_SHIFT) - nr_pages;
  1235 + } else if ((flags & MAP_POPULATE) && !(flags & MAP_NONBLOCK))
1226 1236 make_pages_present(addr, addr + len);
1227   - }
1228   - if ((flags & MAP_POPULATE) && !(flags & MAP_NONBLOCK))
1229   - make_pages_present(addr, addr + len);
1230 1237 return addr;
1231 1238  
1232 1239 unmap_and_free_vma:
... ... @@ -1698,8 +1705,10 @@
1698 1705 return vma;
1699 1706 if (!prev || expand_stack(prev, addr))
1700 1707 return NULL;
1701   - if (prev->vm_flags & VM_LOCKED)
1702   - make_pages_present(addr, prev->vm_end);
  1708 + if (prev->vm_flags & VM_LOCKED) {
  1709 + if (mlock_vma_pages_range(prev, addr, prev->vm_end) < 0)
  1710 + return NULL; /* vma gone! */
  1711 + }
1703 1712 return prev;
1704 1713 }
1705 1714 #else
... ... @@ -1725,8 +1734,10 @@
1725 1734 start = vma->vm_start;
1726 1735 if (expand_stack(vma, addr))
1727 1736 return NULL;
1728   - if (vma->vm_flags & VM_LOCKED)
1729   - make_pages_present(addr, start);
  1737 + if (vma->vm_flags & VM_LOCKED) {
  1738 + if (mlock_vma_pages_range(vma, addr, start) < 0)
  1739 + return NULL; /* vma gone! */
  1740 + }
1730 1741 return vma;
1731 1742 }
1732 1743 #endif
... ... @@ -1745,8 +1756,6 @@
1745 1756 long nrpages = vma_pages(vma);
1746 1757  
1747 1758 mm->total_vm -= nrpages;
1748   - if (vma->vm_flags & VM_LOCKED)
1749   - mm->locked_vm -= nrpages;
1750 1759 vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages);
1751 1760 vma = remove_vma(vma);
1752 1761 } while (vma);
... ... @@ -1912,6 +1921,20 @@
1912 1921 vma = prev? prev->vm_next: mm->mmap;
1913 1922  
1914 1923 /*
  1924 + * unlock any mlock()ed ranges before detaching vmas
  1925 + */
  1926 + if (mm->locked_vm) {
  1927 + struct vm_area_struct *tmp = vma;
  1928 + while (tmp && tmp->vm_start < end) {
  1929 + if (tmp->vm_flags & VM_LOCKED) {
  1930 + mm->locked_vm -= vma_pages(tmp);
  1931 + munlock_vma_pages_all(tmp);
  1932 + }
  1933 + tmp = tmp->vm_next;
  1934 + }
  1935 + }
  1936 +
  1937 + /*
1915 1938 * Remove the vma's, and unmap the actual pages
1916 1939 */
1917 1940 detach_vmas_to_be_unmapped(mm, vma, prev, end);
... ... @@ -2023,8 +2046,9 @@
2023 2046 return -ENOMEM;
2024 2047  
2025 2048 /* Can we just expand an old private anonymous mapping? */
2026   - if (vma_merge(mm, prev, addr, addr + len, flags,
2027   - NULL, NULL, pgoff, NULL))
  2049 + vma = vma_merge(mm, prev, addr, addr + len, flags,
  2050 + NULL, NULL, pgoff, NULL);
  2051 + if (vma)
2028 2052 goto out;
2029 2053  
2030 2054 /*
... ... @@ -2046,8 +2070,8 @@
2046 2070 out:
2047 2071 mm->total_vm += len >> PAGE_SHIFT;
2048 2072 if (flags & VM_LOCKED) {
2049   - mm->locked_vm += len >> PAGE_SHIFT;
2050   - make_pages_present(addr, addr + len);
  2073 + if (!mlock_vma_pages_range(vma, addr, addr + len))
  2074 + mm->locked_vm += (len >> PAGE_SHIFT);
2051 2075 }
2052 2076 return addr;
2053 2077 }
... ... @@ -2058,7 +2082,7 @@
2058 2082 void exit_mmap(struct mm_struct *mm)
2059 2083 {
2060 2084 struct mmu_gather *tlb;
2061   - struct vm_area_struct *vma = mm->mmap;
  2085 + struct vm_area_struct *vma;
2062 2086 unsigned long nr_accounted = 0;
2063 2087 unsigned long end;
2064 2088  
... ... @@ -2066,6 +2090,15 @@
2066 2090 arch_exit_mmap(mm);
2067 2091 mmu_notifier_release(mm);
2068 2092  
  2093 + if (mm->locked_vm) {
  2094 + vma = mm->mmap;
  2095 + while (vma) {
  2096 + if (vma->vm_flags & VM_LOCKED)
  2097 + munlock_vma_pages_all(vma);
  2098 + vma = vma->vm_next;
  2099 + }
  2100 + }
  2101 + vma = mm->mmap;
2069 2102 lru_add_drain();
2070 2103 flush_cache_mm(mm);
2071 2104 tlb = tlb_gather_mmu(mm, 1);
... ... @@ -24,6 +24,8 @@
24 24 #include <asm/cacheflush.h>
25 25 #include <asm/tlbflush.h>
26 26  
  27 +#include "internal.h"
  28 +
27 29 static pmd_t *get_old_pmd(struct mm_struct *mm, unsigned long addr)
28 30 {
29 31 pgd_t *pgd;
... ... @@ -238,8 +240,8 @@
238 240 if (vm_flags & VM_LOCKED) {
239 241 mm->locked_vm += new_len >> PAGE_SHIFT;
240 242 if (new_len > old_len)
241   - make_pages_present(new_addr + old_len,
242   - new_addr + new_len);
  243 + mlock_vma_pages_range(new_vma, new_addr + old_len,
  244 + new_addr + new_len);
243 245 }
244 246  
245 247 return new_addr;
... ... @@ -379,7 +381,7 @@
379 381 vm_stat_account(mm, vma->vm_flags, vma->vm_file, pages);
380 382 if (vma->vm_flags & VM_LOCKED) {
381 383 mm->locked_vm += pages;
382   - make_pages_present(addr + old_len,
  384 + mlock_vma_pages_range(vma, addr + old_len,
383 385 addr + new_len);
384 386 }
385 387 ret = addr;
... ... @@ -18,6 +18,7 @@
18 18 #include <linux/task_io_accounting_ops.h>
19 19 #include <linux/buffer_head.h> /* grr. try_to_release_page,
20 20 do_invalidatepage */
  21 +#include "internal.h"
21 22  
22 23  
23 24 /**
... ... @@ -103,6 +104,7 @@
103 104  
104 105 cancel_dirty_page(page, PAGE_CACHE_SIZE);
105 106  
  107 + clear_page_mlock(page);
106 108 remove_from_page_cache(page);
107 109 ClearPageMappedToDisk(page);
108 110 page_cache_release(page); /* pagecache ref */
... ... @@ -127,6 +129,7 @@
127 129 if (PagePrivate(page) && !try_to_release_page(page, 0))
128 130 return 0;
129 131  
  132 + clear_page_mlock(page);
130 133 ret = remove_mapping(mapping, page);
131 134  
132 135 return ret;
... ... @@ -352,6 +355,7 @@
352 355 if (PageDirty(page))
353 356 goto failed;
354 357  
  358 + clear_page_mlock(page);
355 359 BUG_ON(PagePrivate(page));
356 360 __remove_from_page_cache(page);
357 361 spin_unlock_irq(&mapping->tree_lock);