Commit 9de455b20705f36384a711d4a20bcf7ba1ab180b

Authored by Atsushi Nemoto
Committed by Linus Torvalds
1 parent 77fff4ae2b

[PATCH] Pass vma argument to copy_user_highpage().

To allow a more effective copy_user_highpage() on certain architectures,
a vma argument is added to the function and cow_user_page() allowing
the implementation of these functions to check for the VM_EXEC bit.

The main part of this patch was originally written by Ralf Baechle;
Atushi Nemoto did the the debugging.

Signed-off-by: Atsushi Nemoto <anemo@mba.ocn.ne.jp>
Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>

Showing 3 changed files with 10 additions and 9 deletions Side-by-side Diff

include/linux/highmem.h
... ... @@ -98,7 +98,8 @@
98 98  
99 99 #ifndef __HAVE_ARCH_COPY_USER_HIGHPAGE
100 100  
101   -static inline void copy_user_highpage(struct page *to, struct page *from, unsigned long vaddr)
  101 +static inline void copy_user_highpage(struct page *to, struct page *from,
  102 + unsigned long vaddr, struct vm_area_struct *vma)
102 103 {
103 104 char *vfrom, *vto;
104 105  
... ... @@ -44,14 +44,14 @@
44 44 }
45 45  
46 46 static void copy_huge_page(struct page *dst, struct page *src,
47   - unsigned long addr)
  47 + unsigned long addr, struct vm_area_struct *vma)
48 48 {
49 49 int i;
50 50  
51 51 might_sleep();
52 52 for (i = 0; i < HPAGE_SIZE/PAGE_SIZE; i++) {
53 53 cond_resched();
54   - copy_user_highpage(dst + i, src + i, addr + i*PAGE_SIZE);
  54 + copy_user_highpage(dst + i, src + i, addr + i*PAGE_SIZE, vma);
55 55 }
56 56 }
57 57  
... ... @@ -442,7 +442,7 @@
442 442 }
443 443  
444 444 spin_unlock(&mm->page_table_lock);
445   - copy_huge_page(new_page, old_page, address);
  445 + copy_huge_page(new_page, old_page, address, vma);
446 446 spin_lock(&mm->page_table_lock);
447 447  
448 448 ptep = huge_pte_offset(mm, address & HPAGE_MASK);
... ... @@ -1441,7 +1441,7 @@
1441 1441 return pte;
1442 1442 }
1443 1443  
1444   -static inline void cow_user_page(struct page *dst, struct page *src, unsigned long va)
  1444 +static inline void cow_user_page(struct page *dst, struct page *src, unsigned long va, struct vm_area_struct *vma)
1445 1445 {
1446 1446 /*
1447 1447 * If the source page was a PFN mapping, we don't have
1448 1448  
... ... @@ -1464,9 +1464,9 @@
1464 1464 kunmap_atomic(kaddr, KM_USER0);
1465 1465 flush_dcache_page(dst);
1466 1466 return;
1467   -
  1467 +
1468 1468 }
1469   - copy_user_highpage(dst, src, va);
  1469 + copy_user_highpage(dst, src, va, vma);
1470 1470 }
1471 1471  
1472 1472 /*
... ... @@ -1577,7 +1577,7 @@
1577 1577 new_page = alloc_page_vma(GFP_HIGHUSER, vma, address);
1578 1578 if (!new_page)
1579 1579 goto oom;
1580   - cow_user_page(new_page, old_page, address);
  1580 + cow_user_page(new_page, old_page, address, vma);
1581 1581 }
1582 1582  
1583 1583 /*
... ... @@ -2200,7 +2200,7 @@
2200 2200 page = alloc_page_vma(GFP_HIGHUSER, vma, address);
2201 2201 if (!page)
2202 2202 goto oom;
2203   - copy_user_highpage(page, new_page, address);
  2203 + copy_user_highpage(page, new_page, address, vma);
2204 2204 page_cache_release(new_page);
2205 2205 new_page = page;
2206 2206 anon = 1;