Commit 28a35716d317980ae9bc2ff2f84c33a3cda9e884

Authored by Michel Lespinasse
Committed by Linus Torvalds
1 parent e0fb581529

mm: use long type for page counts in mm_populate() and get_user_pages()

Use long type for page counts in mm_populate() so as to avoid integer
overflow when running the following test code:

int main(void) {
  void *p = mmap(NULL, 0x100000000000, PROT_READ,
                 MAP_PRIVATE | MAP_ANON, -1, 0);
  printf("p: %p\n", p);
  mlockall(MCL_CURRENT);
  printf("done\n");
  return 0;
}

Signed-off-by: Michel Lespinasse <walken@google.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Rik van Riel <riel@redhat.com>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Hugh Dickins <hughd@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

Showing 6 changed files with 36 additions and 34 deletions Side-by-side Diff

include/linux/hugetlb.h
... ... @@ -43,9 +43,9 @@
43 43 #endif
44 44  
45 45 int copy_hugetlb_page_range(struct mm_struct *, struct mm_struct *, struct vm_area_struct *);
46   -int follow_hugetlb_page(struct mm_struct *, struct vm_area_struct *,
47   - struct page **, struct vm_area_struct **,
48   - unsigned long *, int *, int, unsigned int flags);
  46 +long follow_hugetlb_page(struct mm_struct *, struct vm_area_struct *,
  47 + struct page **, struct vm_area_struct **,
  48 + unsigned long *, unsigned long *, long, unsigned int);
49 49 void unmap_hugepage_range(struct vm_area_struct *,
50 50 unsigned long, unsigned long, struct page *);
51 51 void __unmap_hugepage_range_final(struct mmu_gather *tlb,
... ... @@ -1013,13 +1013,14 @@
1013 1013 extern int access_remote_vm(struct mm_struct *mm, unsigned long addr,
1014 1014 void *buf, int len, int write);
1015 1015  
1016   -int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
1017   - unsigned long start, int len, unsigned int foll_flags,
1018   - struct page **pages, struct vm_area_struct **vmas,
1019   - int *nonblocking);
1020   -int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
1021   - unsigned long start, int nr_pages, int write, int force,
1022   - struct page **pages, struct vm_area_struct **vmas);
  1016 +long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
  1017 + unsigned long start, unsigned long nr_pages,
  1018 + unsigned int foll_flags, struct page **pages,
  1019 + struct vm_area_struct **vmas, int *nonblocking);
  1020 +long get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
  1021 + unsigned long start, unsigned long nr_pages,
  1022 + int write, int force, struct page **pages,
  1023 + struct vm_area_struct **vmas);
1023 1024 int get_user_pages_fast(unsigned long start, int nr_pages, int write,
1024 1025 struct page **pages);
1025 1026 struct kvec;
... ... @@ -2920,14 +2920,14 @@
2920 2920 return NULL;
2921 2921 }
2922 2922  
2923   -int follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
2924   - struct page **pages, struct vm_area_struct **vmas,
2925   - unsigned long *position, int *length, int i,
2926   - unsigned int flags)
  2923 +long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
  2924 + struct page **pages, struct vm_area_struct **vmas,
  2925 + unsigned long *position, unsigned long *nr_pages,
  2926 + long i, unsigned int flags)
2927 2927 {
2928 2928 unsigned long pfn_offset;
2929 2929 unsigned long vaddr = *position;
2930   - int remainder = *length;
  2930 + unsigned long remainder = *nr_pages;
2931 2931 struct hstate *h = hstate_vma(vma);
2932 2932  
2933 2933 spin_lock(&mm->page_table_lock);
... ... @@ -2997,7 +2997,7 @@
2997 2997 }
2998 2998 }
2999 2999 spin_unlock(&mm->page_table_lock);
3000   - *length = remainder;
  3000 + *nr_pages = remainder;
3001 3001 *position = vaddr;
3002 3002  
3003 3003 return i ? i : -EFAULT;
... ... @@ -1677,15 +1677,15 @@
1677 1677 * instead of __get_user_pages. __get_user_pages should be used only if
1678 1678 * you need some special @gup_flags.
1679 1679 */
1680   -int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
1681   - unsigned long start, int nr_pages, unsigned int gup_flags,
1682   - struct page **pages, struct vm_area_struct **vmas,
1683   - int *nonblocking)
  1680 +long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
  1681 + unsigned long start, unsigned long nr_pages,
  1682 + unsigned int gup_flags, struct page **pages,
  1683 + struct vm_area_struct **vmas, int *nonblocking)
1684 1684 {
1685   - int i;
  1685 + long i;
1686 1686 unsigned long vm_flags;
1687 1687  
1688   - if (nr_pages <= 0)
  1688 + if (!nr_pages)
1689 1689 return 0;
1690 1690  
1691 1691 VM_BUG_ON(!!pages != !!(gup_flags & FOLL_GET));
... ... @@ -1981,9 +1981,9 @@
1981 1981 *
1982 1982 * See also get_user_pages_fast, for performance critical applications.
1983 1983 */
1984   -int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
1985   - unsigned long start, int nr_pages, int write, int force,
1986   - struct page **pages, struct vm_area_struct **vmas)
  1984 +long get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
  1985 + unsigned long start, unsigned long nr_pages, int write,
  1986 + int force, struct page **pages, struct vm_area_struct **vmas)
1987 1987 {
1988 1988 int flags = FOLL_TOUCH;
1989 1989  
... ... @@ -160,7 +160,7 @@
160 160 {
161 161 struct mm_struct *mm = vma->vm_mm;
162 162 unsigned long addr = start;
163   - int nr_pages = (end - start) / PAGE_SIZE;
  163 + unsigned long nr_pages = (end - start) / PAGE_SIZE;
164 164 int gup_flags;
165 165  
166 166 VM_BUG_ON(start & ~PAGE_MASK);
... ... @@ -382,7 +382,7 @@
382 382 unsigned long end, nstart, nend;
383 383 struct vm_area_struct *vma = NULL;
384 384 int locked = 0;
385   - int ret = 0;
  385 + long ret = 0;
386 386  
387 387 VM_BUG_ON(start & ~PAGE_MASK);
388 388 VM_BUG_ON(len != PAGE_ALIGN(len));
... ... @@ -140,10 +140,10 @@
140 140 return PAGE_SIZE << compound_order(page);
141 141 }
142 142  
143   -int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
144   - unsigned long start, int nr_pages, unsigned int foll_flags,
145   - struct page **pages, struct vm_area_struct **vmas,
146   - int *retry)
  143 +long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
  144 + unsigned long start, unsigned long nr_pages,
  145 + unsigned int foll_flags, struct page **pages,
  146 + struct vm_area_struct **vmas, int *nonblocking)
147 147 {
148 148 struct vm_area_struct *vma;
149 149 unsigned long vm_flags;
... ... @@ -190,9 +190,10 @@
190 190 * slab page or a secondary page from a compound page
191 191 * - don't permit access to VMAs that don't support it, such as I/O mappings
192 192 */
193   -int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
194   - unsigned long start, int nr_pages, int write, int force,
195   - struct page **pages, struct vm_area_struct **vmas)
  193 +long get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
  194 + unsigned long start, unsigned long nr_pages,
  195 + int write, int force, struct page **pages,
  196 + struct vm_area_struct **vmas)
196 197 {
197 198 int flags = 0;
198 199