Commit e2cda322648122dc400c85ada80eaddbc612ef6a

Authored by Andrea Arcangeli
Committed by Linus Torvalds
1 parent 5f6e8da70a

thp: add pmd mangling generic functions

Some are needed to build but not actually used on archs not supporting
transparent hugepages.  Others like pmdp_clear_flush are used by x86 too.

Signed-off-by: Andrea Arcangeli <aarcange@redhat.com>
Acked-by: Rik van Riel <riel@redhat.com>
Acked-by: Mel Gorman <mel@csn.ul.ie>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

Showing 3 changed files with 278 additions and 61 deletions Side-by-side Diff

include/asm-generic/pgtable.h
... ... @@ -5,67 +5,108 @@
5 5 #ifdef CONFIG_MMU
6 6  
7 7 #ifndef __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
8   -/*
9   - * Largely same as above, but only sets the access flags (dirty,
10   - * accessed, and writable). Furthermore, we know it always gets set
11   - * to a "more permissive" setting, which allows most architectures
12   - * to optimize this. We return whether the PTE actually changed, which
13   - * in turn instructs the caller to do things like update__mmu_cache.
14   - * This used to be done in the caller, but sparc needs minor faults to
15   - * force that call on sun4c so we changed this macro slightly
16   - */
17   -#define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \
18   -({ \
19   - int __changed = !pte_same(*(__ptep), __entry); \
20   - if (__changed) { \
21   - set_pte_at((__vma)->vm_mm, (__address), __ptep, __entry); \
22   - flush_tlb_page(__vma, __address); \
23   - } \
24   - __changed; \
25   -})
  8 +extern int ptep_set_access_flags(struct vm_area_struct *vma,
  9 + unsigned long address, pte_t *ptep,
  10 + pte_t entry, int dirty);
26 11 #endif
27 12  
  13 +#ifndef __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
  14 +extern int pmdp_set_access_flags(struct vm_area_struct *vma,
  15 + unsigned long address, pmd_t *pmdp,
  16 + pmd_t entry, int dirty);
  17 +#endif
  18 +
28 19 #ifndef __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
29   -#define ptep_test_and_clear_young(__vma, __address, __ptep) \
30   -({ \
31   - pte_t __pte = *(__ptep); \
32   - int r = 1; \
33   - if (!pte_young(__pte)) \
34   - r = 0; \
35   - else \
36   - set_pte_at((__vma)->vm_mm, (__address), \
37   - (__ptep), pte_mkold(__pte)); \
38   - r; \
39   -})
  20 +static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
  21 + unsigned long address,
  22 + pte_t *ptep)
  23 +{
  24 + pte_t pte = *ptep;
  25 + int r = 1;
  26 + if (!pte_young(pte))
  27 + r = 0;
  28 + else
  29 + set_pte_at(vma->vm_mm, address, ptep, pte_mkold(pte));
  30 + return r;
  31 +}
40 32 #endif
41 33  
  34 +#ifndef __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
  35 +#ifdef CONFIG_TRANSPARENT_HUGEPAGE
  36 +static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma,
  37 + unsigned long address,
  38 + pmd_t *pmdp)
  39 +{
  40 + pmd_t pmd = *pmdp;
  41 + int r = 1;
  42 + if (!pmd_young(pmd))
  43 + r = 0;
  44 + else
  45 + set_pmd_at(vma->vm_mm, address, pmdp, pmd_mkold(pmd));
  46 + return r;
  47 +}
  48 +#else /* CONFIG_TRANSPARENT_HUGEPAGE */
  49 +static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma,
  50 + unsigned long address,
  51 + pmd_t *pmdp)
  52 +{
  53 + BUG();
  54 + return 0;
  55 +}
  56 +#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
  57 +#endif
  58 +
42 59 #ifndef __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
43   -#define ptep_clear_flush_young(__vma, __address, __ptep) \
44   -({ \
45   - int __young; \
46   - __young = ptep_test_and_clear_young(__vma, __address, __ptep); \
47   - if (__young) \
48   - flush_tlb_page(__vma, __address); \
49   - __young; \
50   -})
  60 +int ptep_clear_flush_young(struct vm_area_struct *vma,
  61 + unsigned long address, pte_t *ptep);
51 62 #endif
52 63  
  64 +#ifndef __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH
  65 +int pmdp_clear_flush_young(struct vm_area_struct *vma,
  66 + unsigned long address, pmd_t *pmdp);
  67 +#endif
  68 +
53 69 #ifndef __HAVE_ARCH_PTEP_GET_AND_CLEAR
54   -#define ptep_get_and_clear(__mm, __address, __ptep) \
55   -({ \
56   - pte_t __pte = *(__ptep); \
57   - pte_clear((__mm), (__address), (__ptep)); \
58   - __pte; \
  70 +static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
  71 + unsigned long address,
  72 + pte_t *ptep)
  73 +{
  74 + pte_t pte = *ptep;
  75 + pte_clear(mm, address, ptep);
  76 + return pte;
  77 +}
  78 +#endif
  79 +
  80 +#ifndef __HAVE_ARCH_PMDP_GET_AND_CLEAR
  81 +#ifdef CONFIG_TRANSPARENT_HUGEPAGE
  82 +static inline pmd_t pmdp_get_and_clear(struct mm_struct *mm,
  83 + unsigned long address,
  84 + pmd_t *pmdp)
  85 +{
  86 + pmd_t pmd = *pmdp;
  87 + pmd_clear(mm, address, pmdp);
  88 + return pmd;
59 89 })
  90 +#else /* CONFIG_TRANSPARENT_HUGEPAGE */
  91 +static inline pmd_t pmdp_get_and_clear(struct mm_struct *mm,
  92 + unsigned long address,
  93 + pmd_t *pmdp)
  94 +{
  95 + BUG();
  96 + return __pmd(0);
  97 +}
  98 +#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
60 99 #endif
61 100  
62 101 #ifndef __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
63   -#define ptep_get_and_clear_full(__mm, __address, __ptep, __full) \
64   -({ \
65   - pte_t __pte; \
66   - __pte = ptep_get_and_clear((__mm), (__address), (__ptep)); \
67   - __pte; \
68   -})
  102 +static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
  103 + unsigned long address, pte_t *ptep,
  104 + int full)
  105 +{
  106 + pte_t pte;
  107 + pte = ptep_get_and_clear(mm, address, ptep);
  108 + return pte;
  109 +}
69 110 #endif
70 111  
71 112 /*
72 113  
73 114  
... ... @@ -74,22 +115,27 @@
74 115 * not present, or in the process of an address space destruction.
75 116 */
76 117 #ifndef __HAVE_ARCH_PTE_CLEAR_NOT_PRESENT_FULL
77   -#define pte_clear_not_present_full(__mm, __address, __ptep, __full) \
78   -do { \
79   - pte_clear((__mm), (__address), (__ptep)); \
80   -} while (0)
  118 +static inline void pte_clear_not_present_full(struct mm_struct *mm,
  119 + unsigned long address,
  120 + pte_t *ptep,
  121 + int full)
  122 +{
  123 + pte_clear(mm, address, ptep);
  124 +}
81 125 #endif
82 126  
83 127 #ifndef __HAVE_ARCH_PTEP_CLEAR_FLUSH
84   -#define ptep_clear_flush(__vma, __address, __ptep) \
85   -({ \
86   - pte_t __pte; \
87   - __pte = ptep_get_and_clear((__vma)->vm_mm, __address, __ptep); \
88   - flush_tlb_page(__vma, __address); \
89   - __pte; \
90   -})
  128 +extern pte_t ptep_clear_flush(struct vm_area_struct *vma,
  129 + unsigned long address,
  130 + pte_t *ptep);
91 131 #endif
92 132  
  133 +#ifndef __HAVE_ARCH_PMDP_CLEAR_FLUSH
  134 +extern pmd_t pmdp_clear_flush(struct vm_area_struct *vma,
  135 + unsigned long address,
  136 + pmd_t *pmdp);
  137 +#endif
  138 +
93 139 #ifndef __HAVE_ARCH_PTEP_SET_WRPROTECT
94 140 struct mm_struct;
95 141 static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long address, pte_t *ptep)
96 142  
97 143  
... ... @@ -99,10 +145,51 @@
99 145 }
100 146 #endif
101 147  
  148 +#ifndef __HAVE_ARCH_PMDP_SET_WRPROTECT
  149 +#ifdef CONFIG_TRANSPARENT_HUGEPAGE
  150 +static inline void pmdp_set_wrprotect(struct mm_struct *mm,
  151 + unsigned long address, pmd_t *pmdp)
  152 +{
  153 + pmd_t old_pmd = *pmdp;
  154 + set_pmd_at(mm, address, pmdp, pmd_wrprotect(old_pmd));
  155 +}
  156 +#else /* CONFIG_TRANSPARENT_HUGEPAGE */
  157 +static inline void pmdp_set_wrprotect(struct mm_struct *mm,
  158 + unsigned long address, pmd_t *pmdp)
  159 +{
  160 + BUG();
  161 +}
  162 +#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
  163 +#endif
  164 +
  165 +#ifndef __HAVE_ARCH_PMDP_SPLITTING_FLUSH
  166 +extern pmd_t pmdp_clear_flush(struct vm_area_struct *vma,
  167 + unsigned long address,
  168 + pmd_t *pmdp);
  169 +#endif
  170 +
102 171 #ifndef __HAVE_ARCH_PTE_SAME
103   -#define pte_same(A,B) (pte_val(A) == pte_val(B))
  172 +static inline int pte_same(pte_t pte_a, pte_t pte_b)
  173 +{
  174 + return pte_val(pte_a) == pte_val(pte_b);
  175 +}
104 176 #endif
105 177  
  178 +#ifndef __HAVE_ARCH_PMD_SAME
  179 +#ifdef CONFIG_TRANSPARENT_HUGEPAGE
  180 +static inline int pmd_same(pmd_t pmd_a, pmd_t pmd_b)
  181 +{
  182 + return pmd_val(pmd_a) == pmd_val(pmd_b);
  183 +}
  184 +#else /* CONFIG_TRANSPARENT_HUGEPAGE */
  185 +static inline int pmd_same(pmd_t pmd_a, pmd_t pmd_b)
  186 +{
  187 + BUG();
  188 + return 0;
  189 +}
  190 +#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
  191 +#endif
  192 +
106 193 #ifndef __HAVE_ARCH_PAGE_TEST_DIRTY
107 194 #define page_test_dirty(page) (0)
108 195 #endif
... ... @@ -357,6 +444,13 @@
357 444 {
358 445 return 0;
359 446 }
  447 +#ifndef __HAVE_ARCH_PMD_WRITE
  448 +static inline int pmd_write(pmd_t pmd)
  449 +{
  450 + BUG();
  451 + return 0;
  452 +}
  453 +#endif /* __HAVE_ARCH_PMD_WRITE */
360 454 #endif
361 455  
362 456 #endif /* !__ASSEMBLY__ */
... ... @@ -5,7 +5,7 @@
5 5 mmu-y := nommu.o
6 6 mmu-$(CONFIG_MMU) := fremap.o highmem.o madvise.o memory.o mincore.o \
7 7 mlock.o mmap.o mprotect.o mremap.o msync.o rmap.o \
8   - vmalloc.o pagewalk.o
  8 + vmalloc.o pagewalk.o pgtable-generic.o
9 9  
10 10 obj-y := bootmem.o filemap.o mempool.o oom_kill.o fadvise.o \
11 11 maccess.o page_alloc.o page-writeback.o \
mm/pgtable-generic.c
  1 +/*
  2 + * mm/pgtable-generic.c
  3 + *
  4 + * Generic pgtable methods declared in asm-generic/pgtable.h
  5 + *
  6 + * Copyright (C) 2010 Linus Torvalds
  7 + */
  8 +
  9 +#include <asm/tlb.h>
  10 +#include <asm-generic/pgtable.h>
  11 +
  12 +#ifndef __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
  13 +/*
  14 + * Only sets the access flags (dirty, accessed, and
  15 + * writable). Furthermore, we know it always gets set to a "more
  16 + * permissive" setting, which allows most architectures to optimize
  17 + * this. We return whether the PTE actually changed, which in turn
  18 + * instructs the caller to do things like update__mmu_cache. This
  19 + * used to be done in the caller, but sparc needs minor faults to
  20 + * force that call on sun4c so we changed this macro slightly
  21 + */
  22 +int ptep_set_access_flags(struct vm_area_struct *vma,
  23 + unsigned long address, pte_t *ptep,
  24 + pte_t entry, int dirty)
  25 +{
  26 + int changed = !pte_same(*ptep, entry);
  27 + if (changed) {
  28 + set_pte_at(vma->vm_mm, address, ptep, entry);
  29 + flush_tlb_page(vma, address);
  30 + }
  31 + return changed;
  32 +}
  33 +#endif
  34 +
  35 +#ifndef __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
  36 +int pmdp_set_access_flags(struct vm_area_struct *vma,
  37 + unsigned long address, pmd_t *pmdp,
  38 + pmd_t entry, int dirty)
  39 +{
  40 +#ifdef CONFIG_TRANSPARENT_HUGEPAGE
  41 + int changed = !pmd_same(*pmdp, entry);
  42 + VM_BUG_ON(address & ~HPAGE_PMD_MASK);
  43 + if (changed) {
  44 + set_pmd_at(vma->vm_mm, address, pmdp, entry);
  45 + flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
  46 + }
  47 + return changed;
  48 +#else /* CONFIG_TRANSPARENT_HUGEPAGE */
  49 + BUG();
  50 + return 0;
  51 +#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
  52 +}
  53 +#endif
  54 +
  55 +#ifndef __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
  56 +int ptep_clear_flush_young(struct vm_area_struct *vma,
  57 + unsigned long address, pte_t *ptep)
  58 +{
  59 + int young;
  60 + young = ptep_test_and_clear_young(vma, address, ptep);
  61 + if (young)
  62 + flush_tlb_page(vma, address);
  63 + return young;
  64 +}
  65 +#endif
  66 +
  67 +#ifndef __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH
  68 +int pmdp_clear_flush_young(struct vm_area_struct *vma,
  69 + unsigned long address, pmd_t *pmdp)
  70 +{
  71 + int young;
  72 +#ifndef CONFIG_TRANSPARENT_HUGEPAGE
  73 + BUG();
  74 +#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
  75 + VM_BUG_ON(address & ~HPAGE_PMD_MASK);
  76 + young = pmdp_test_and_clear_young(vma, address, pmdp);
  77 + if (young)
  78 + flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
  79 + return young;
  80 +}
  81 +#endif
  82 +
  83 +#ifndef __HAVE_ARCH_PTEP_CLEAR_FLUSH
  84 +pte_t ptep_clear_flush(struct vm_area_struct *vma, unsigned long address,
  85 + pte_t *ptep)
  86 +{
  87 + pte_t pte;
  88 + pte = ptep_get_and_clear((vma)->vm_mm, address, ptep);
  89 + flush_tlb_page(vma, address);
  90 + return pte;
  91 +}
  92 +#endif
  93 +
  94 +#ifndef __HAVE_ARCH_PMDP_CLEAR_FLUSH
  95 +pmd_t pmdp_clear_flush(struct vm_area_struct *vma, unsigned long address,
  96 + pmd_t *pmdp)
  97 +{
  98 + pmd_t pmd;
  99 +#ifndef CONFIG_TRANSPARENT_HUGEPAGE
  100 + BUG();
  101 +#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
  102 + VM_BUG_ON(address & ~HPAGE_PMD_MASK);
  103 + pmd = pmdp_get_and_clear(vma->vm_mm, address, pmdp);
  104 + flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
  105 + return pmd;
  106 +}
  107 +#endif
  108 +
  109 +#ifndef __HAVE_ARCH_PMDP_SPLITTING_FLUSH
  110 +pmd_t pmdp_splitting_flush(struct vm_area_struct *vma, unsigned long address,
  111 + pmd_t *pmdp)
  112 +{
  113 +#ifdef CONFIG_TRANSPARENT_HUGEPAGE
  114 + pmd_t pmd = pmd_mksplitting(*pmdp);
  115 + VM_BUG_ON(address & ~HPAGE_PMD_MASK);
  116 + set_pmd_at(vma->vm_mm, address, pmdp, pmd);
  117 + /* tlb flush only to serialize against gup-fast */
  118 + flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
  119 +#else /* CONFIG_TRANSPARENT_HUGEPAGE */
  120 + BUG();
  121 +#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
  122 +}
  123 +#endif