Commit 30c9f3a9fae79517bca595826a19c6855fbb6d32

Authored by Linus Torvalds
1 parent 232086b199

Remove internal use of 'write_access' in mm/memory.c

The fault handling routines really want more fine-grained flags than a
single "was it a write fault" boolean - the callers will want to set
flags like "you can return a retry error" etc.

And that's actually how the VM works internally, but right now the
top-level fault handling functions in mm/memory.c all pass just the
'write_access' boolean around.

This switches them over to pass around the FAULT_FLAG_xyzzy 'flags'
variable instead.  The 'write_access' calling convention still exists
for the exported 'handle_mm_fault()' function, but that is next.

Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

Showing 1 changed file with 21 additions and 21 deletions Side-by-side Diff

... ... @@ -2496,7 +2496,7 @@
2496 2496 */
2497 2497 static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
2498 2498 unsigned long address, pte_t *page_table, pmd_t *pmd,
2499   - int write_access, pte_t orig_pte)
  2499 + unsigned int flags, pte_t orig_pte)
2500 2500 {
2501 2501 spinlock_t *ptl;
2502 2502 struct page *page;
2503 2503  
... ... @@ -2572,9 +2572,9 @@
2572 2572  
2573 2573 inc_mm_counter(mm, anon_rss);
2574 2574 pte = mk_pte(page, vma->vm_page_prot);
2575   - if (write_access && reuse_swap_page(page)) {
  2575 + if ((flags & FAULT_FLAG_WRITE) && reuse_swap_page(page)) {
2576 2576 pte = maybe_mkwrite(pte_mkdirty(pte), vma);
2577   - write_access = 0;
  2577 + flags &= ~FAULT_FLAG_WRITE;
2578 2578 }
2579 2579 flush_icache_page(vma, page);
2580 2580 set_pte_at(mm, address, page_table, pte);
... ... @@ -2587,7 +2587,7 @@
2587 2587 try_to_free_swap(page);
2588 2588 unlock_page(page);
2589 2589  
2590   - if (write_access) {
  2590 + if (flags & FAULT_FLAG_WRITE) {
2591 2591 ret |= do_wp_page(mm, vma, address, page_table, pmd, ptl, pte);
2592 2592 if (ret & VM_FAULT_ERROR)
2593 2593 ret &= VM_FAULT_ERROR;
... ... @@ -2616,7 +2616,7 @@
2616 2616 */
2617 2617 static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
2618 2618 unsigned long address, pte_t *page_table, pmd_t *pmd,
2619   - int write_access)
  2619 + unsigned int flags)
2620 2620 {
2621 2621 struct page *page;
2622 2622 spinlock_t *ptl;
... ... @@ -2776,7 +2776,7 @@
2776 2776 * due to the bad i386 page protection. But it's valid
2777 2777 * for other architectures too.
2778 2778 *
2779   - * Note that if write_access is true, we either now have
  2779 + * Note that if FAULT_FLAG_WRITE is set, we either now have
2780 2780 * an exclusive copy of the page, or this is a shared mapping,
2781 2781 * so we can make it writable and dirty to avoid having to
2782 2782 * handle that later.
2783 2783  
... ... @@ -2847,11 +2847,10 @@
2847 2847  
2848 2848 static int do_linear_fault(struct mm_struct *mm, struct vm_area_struct *vma,
2849 2849 unsigned long address, pte_t *page_table, pmd_t *pmd,
2850   - int write_access, pte_t orig_pte)
  2850 + unsigned int flags, pte_t orig_pte)
2851 2851 {
2852 2852 pgoff_t pgoff = (((address & PAGE_MASK)
2853 2853 - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
2854   - unsigned int flags = (write_access ? FAULT_FLAG_WRITE : 0);
2855 2854  
2856 2855 pte_unmap(page_table);
2857 2856 return __do_fault(mm, vma, address, pmd, pgoff, flags, orig_pte);
2858 2857  
2859 2858  
... ... @@ -2868,12 +2867,12 @@
2868 2867 */
2869 2868 static int do_nonlinear_fault(struct mm_struct *mm, struct vm_area_struct *vma,
2870 2869 unsigned long address, pte_t *page_table, pmd_t *pmd,
2871   - int write_access, pte_t orig_pte)
  2870 + unsigned int flags, pte_t orig_pte)
2872 2871 {
2873   - unsigned int flags = FAULT_FLAG_NONLINEAR |
2874   - (write_access ? FAULT_FLAG_WRITE : 0);
2875 2872 pgoff_t pgoff;
2876 2873  
  2874 + flags |= FAULT_FLAG_NONLINEAR;
  2875 +
2877 2876 if (!pte_unmap_same(mm, pmd, page_table, orig_pte))
2878 2877 return 0;
2879 2878  
... ... @@ -2904,7 +2903,7 @@
2904 2903 */
2905 2904 static inline int handle_pte_fault(struct mm_struct *mm,
2906 2905 struct vm_area_struct *vma, unsigned long address,
2907   - pte_t *pte, pmd_t *pmd, int write_access)
  2906 + pte_t *pte, pmd_t *pmd, unsigned int flags)
2908 2907 {
2909 2908 pte_t entry;
2910 2909 spinlock_t *ptl;
2911 2910  
2912 2911  
2913 2912  
2914 2913  
2915 2914  
... ... @@ -2915,30 +2914,30 @@
2915 2914 if (vma->vm_ops) {
2916 2915 if (likely(vma->vm_ops->fault))
2917 2916 return do_linear_fault(mm, vma, address,
2918   - pte, pmd, write_access, entry);
  2917 + pte, pmd, flags, entry);
2919 2918 }
2920 2919 return do_anonymous_page(mm, vma, address,
2921   - pte, pmd, write_access);
  2920 + pte, pmd, flags);
2922 2921 }
2923 2922 if (pte_file(entry))
2924 2923 return do_nonlinear_fault(mm, vma, address,
2925   - pte, pmd, write_access, entry);
  2924 + pte, pmd, flags, entry);
2926 2925 return do_swap_page(mm, vma, address,
2927   - pte, pmd, write_access, entry);
  2926 + pte, pmd, flags, entry);
2928 2927 }
2929 2928  
2930 2929 ptl = pte_lockptr(mm, pmd);
2931 2930 spin_lock(ptl);
2932 2931 if (unlikely(!pte_same(*pte, entry)))
2933 2932 goto unlock;
2934   - if (write_access) {
  2933 + if (flags & FAULT_FLAG_WRITE) {
2935 2934 if (!pte_write(entry))
2936 2935 return do_wp_page(mm, vma, address,
2937 2936 pte, pmd, ptl, entry);
2938 2937 entry = pte_mkdirty(entry);
2939 2938 }
2940 2939 entry = pte_mkyoung(entry);
2941   - if (ptep_set_access_flags(vma, address, pte, entry, write_access)) {
  2940 + if (ptep_set_access_flags(vma, address, pte, entry, flags & FAULT_FLAG_WRITE)) {
2942 2941 update_mmu_cache(vma, address, entry);
2943 2942 } else {
2944 2943 /*
... ... @@ -2947,7 +2946,7 @@
2947 2946 * This still avoids useless tlb flushes for .text page faults
2948 2947 * with threads.
2949 2948 */
2950   - if (write_access)
  2949 + if (flags & FAULT_FLAG_WRITE)
2951 2950 flush_tlb_page(vma, address);
2952 2951 }
2953 2952 unlock:
2954 2953  
... ... @@ -2965,13 +2964,14 @@
2965 2964 pud_t *pud;
2966 2965 pmd_t *pmd;
2967 2966 pte_t *pte;
  2967 + unsigned int flags = write_access ? FAULT_FLAG_WRITE : 0;
2968 2968  
2969 2969 __set_current_state(TASK_RUNNING);
2970 2970  
2971 2971 count_vm_event(PGFAULT);
2972 2972  
2973 2973 if (unlikely(is_vm_hugetlb_page(vma)))
2974   - return hugetlb_fault(mm, vma, address, write_access);
  2974 + return hugetlb_fault(mm, vma, address, flags);
2975 2975  
2976 2976 pgd = pgd_offset(mm, address);
2977 2977 pud = pud_alloc(mm, pgd, address);
... ... @@ -2984,7 +2984,7 @@
2984 2984 if (!pte)
2985 2985 return VM_FAULT_OOM;
2986 2986  
2987   - return handle_pte_fault(mm, vma, address, pte, pmd, write_access);
  2987 + return handle_pte_fault(mm, vma, address, pte, pmd, flags);
2988 2988 }
2989 2989  
2990 2990 #ifndef __PAGETABLE_PUD_FOLDED