Commit cea10a19b7972a1954c4a2d05a7de8db48b444fb

Authored by Michel Lespinasse
Committed by Linus Torvalds
1 parent c22c0d6344

mm: directly use __mlock_vma_pages_range() in find_extend_vma()

In find_extend_vma(), we don't need mlock_vma_pages_range() to verify
the vma type - we know we're working with a stack.  So, we can call
directly into __mlock_vma_pages_range(), and remove the last
make_pages_present() call site.

Note that we don't use mm_populate() here, so we can't release the
mmap_sem while allocating new stack pages.  This is deemed acceptable,
because the stack vmas grow by a bounded number of pages at a time, and
these are anon pages so we don't have to read from disk to populate
them.

Signed-off-by: Michel Lespinasse <walken@google.com>
Acked-by: Rik van Riel <riel@redhat.com>
Tested-by: Andy Lutomirski <luto@amacapital.net>
Cc: Greg Ungerer <gregungerer@westnet.com.au>
Cc: David Howells <dhowells@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

Showing 5 changed files with 9 additions and 87 deletions Side-by-side Diff

... ... @@ -1035,7 +1035,6 @@
1035 1035 }
1036 1036 #endif
1037 1037  
1038   -extern int make_pages_present(unsigned long addr, unsigned long end);
1039 1038 extern int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write);
1040 1039 extern int access_remote_vm(struct mm_struct *mm, unsigned long addr,
1041 1040 void *buf, int len, int write);
... ... @@ -162,8 +162,8 @@
162 162 struct vm_area_struct *prev, struct rb_node *rb_parent);
163 163  
164 164 #ifdef CONFIG_MMU
165   -extern long mlock_vma_pages_range(struct vm_area_struct *vma,
166   - unsigned long start, unsigned long end);
  165 +extern long __mlock_vma_pages_range(struct vm_area_struct *vma,
  166 + unsigned long start, unsigned long end, int *nonblocking);
167 167 extern void munlock_vma_pages_range(struct vm_area_struct *vma,
168 168 unsigned long start, unsigned long end);
169 169 static inline void munlock_vma_pages_all(struct vm_area_struct *vma)
... ... @@ -3824,30 +3824,6 @@
3824 3824 }
3825 3825 #endif /* __PAGETABLE_PMD_FOLDED */
3826 3826  
3827   -int make_pages_present(unsigned long addr, unsigned long end)
3828   -{
3829   - int ret, len, write;
3830   - struct vm_area_struct * vma;
3831   -
3832   - vma = find_vma(current->mm, addr);
3833   - if (!vma)
3834   - return -ENOMEM;
3835   - /*
3836   - * We want to touch writable mappings with a write fault in order
3837   - * to break COW, except for shared mappings because these don't COW
3838   - * and we would not want to dirty them for nothing.
3839   - */
3840   - write = (vma->vm_flags & (VM_WRITE | VM_SHARED)) == VM_WRITE;
3841   - BUG_ON(addr >= end);
3842   - BUG_ON(end > vma->vm_end);
3843   - len = DIV_ROUND_UP(end, PAGE_SIZE) - addr/PAGE_SIZE;
3844   - ret = get_user_pages(current, current->mm, addr,
3845   - len, write, 0, NULL, NULL);
3846   - if (ret < 0)
3847   - return ret;
3848   - return ret == len ? 0 : -EFAULT;
3849   -}
3850   -
3851 3827 #if !defined(__HAVE_ARCH_GATE_AREA)
3852 3828  
3853 3829 #if defined(AT_SYSINFO_EHDR)
... ... @@ -155,9 +155,8 @@
155 155 *
156 156 * vma->vm_mm->mmap_sem must be held for at least read.
157 157 */
158   -static long __mlock_vma_pages_range(struct vm_area_struct *vma,
159   - unsigned long start, unsigned long end,
160   - int *nonblocking)
  158 +long __mlock_vma_pages_range(struct vm_area_struct *vma,
  159 + unsigned long start, unsigned long end, int *nonblocking)
161 160 {
162 161 struct mm_struct *mm = vma->vm_mm;
163 162 unsigned long addr = start;
... ... @@ -202,56 +201,6 @@
202 201 return retval;
203 202 }
204 203  
205   -/**
206   - * mlock_vma_pages_range() - mlock pages in specified vma range.
207   - * @vma - the vma containing the specfied address range
208   - * @start - starting address in @vma to mlock
209   - * @end - end address [+1] in @vma to mlock
210   - *
211   - * For mmap()/mremap()/expansion of mlocked vma.
212   - *
213   - * return 0 on success for "normal" vmas.
214   - *
215   - * return number of pages [> 0] to be removed from locked_vm on success
216   - * of "special" vmas.
217   - */
218   -long mlock_vma_pages_range(struct vm_area_struct *vma,
219   - unsigned long start, unsigned long end)
220   -{
221   - int nr_pages = (end - start) / PAGE_SIZE;
222   - BUG_ON(!(vma->vm_flags & VM_LOCKED));
223   -
224   - /*
225   - * filter unlockable vmas
226   - */
227   - if (vma->vm_flags & (VM_IO | VM_PFNMAP))
228   - goto no_mlock;
229   -
230   - if (!((vma->vm_flags & VM_DONTEXPAND) ||
231   - is_vm_hugetlb_page(vma) ||
232   - vma == get_gate_vma(current->mm))) {
233   -
234   - __mlock_vma_pages_range(vma, start, end, NULL);
235   -
236   - /* Hide errors from mmap() and other callers */
237   - return 0;
238   - }
239   -
240   - /*
241   - * User mapped kernel pages or huge pages:
242   - * make these pages present to populate the ptes, but
243   - * fall thru' to reset VM_LOCKED--no need to unlock, and
244   - * return nr_pages so these don't get counted against task's
245   - * locked limit. huge pages are already counted against
246   - * locked vm limit.
247   - */
248   - make_pages_present(start, end);
249   -
250   -no_mlock:
251   - vma->vm_flags &= ~VM_LOCKED; /* and don't come back! */
252   - return nr_pages; /* error or pages NOT mlocked */
253   -}
254   -
255 204 /*
256 205 * munlock_vma_pages_range() - munlock all pages in the vma range.'
257 206 * @vma - vma containing range to be munlock()ed.
... ... @@ -303,7 +252,7 @@
303 252 *
304 253 * Filters out "special" vmas -- VM_LOCKED never gets set for these, and
305 254 * munlock is a no-op. However, for some special vmas, we go ahead and
306   - * populate the ptes via make_pages_present().
  255 + * populate the ptes.
307 256 *
308 257 * For vmas that pass the filters, merge/split as appropriate.
309 258 */
... ... @@ -2204,9 +2204,8 @@
2204 2204 return vma;
2205 2205 if (!prev || expand_stack(prev, addr))
2206 2206 return NULL;
2207   - if (prev->vm_flags & VM_LOCKED) {
2208   - mlock_vma_pages_range(prev, addr, prev->vm_end);
2209   - }
  2207 + if (prev->vm_flags & VM_LOCKED)
  2208 + __mlock_vma_pages_range(prev, addr, prev->vm_end, NULL);
2210 2209 return prev;
2211 2210 }
2212 2211 #else
... ... @@ -2232,9 +2231,8 @@
2232 2231 start = vma->vm_start;
2233 2232 if (expand_stack(vma, addr))
2234 2233 return NULL;
2235   - if (vma->vm_flags & VM_LOCKED) {
2236   - mlock_vma_pages_range(vma, addr, start);
2237   - }
  2234 + if (vma->vm_flags & VM_LOCKED)
  2235 + __mlock_vma_pages_range(vma, addr, start, NULL);
2238 2236 return vma;
2239 2237 }
2240 2238 #endif