Commit 2099597401c7710c00b0d7c32b24a44a193836e1

Authored by Alex Shi
Committed by Linus Torvalds
1 parent e30d539b3f

mm: move is_vma_temporary_stack() declaration to huge_mm.h

When transparent_hugepage_enabled() is used outside mm/, such as in
arch/x86/xx/tlb.c:

+       if (!cpu_has_invlpg || vma->vm_flags & VM_HUGETLB
+                       || transparent_hugepage_enabled(vma)) {
+               flush_tlb_mm(vma->vm_mm);

is_vma_temporary_stack() isn't referenced in huge_mm.h, so it has compile
errors:

  arch/x86/mm/tlb.c: In function `flush_tlb_range':
  arch/x86/mm/tlb.c:324:4: error: implicit declaration of function `is_vma_temporary_stack' [-Werror=implicit-function-declaration]

Since is_vma_temporay_stack() is just used in rmap.c and huge_memory.c, it
is better to move it to huge_mm.h from rmap.h to avoid such errors.

Signed-off-by: Alex Shi <alex.shi@intel.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

Showing 2 changed files with 2 additions and 2 deletions Inline Diff

include/linux/huge_mm.h
1 #ifndef _LINUX_HUGE_MM_H 1 #ifndef _LINUX_HUGE_MM_H
2 #define _LINUX_HUGE_MM_H 2 #define _LINUX_HUGE_MM_H
3 3
4 extern int do_huge_pmd_anonymous_page(struct mm_struct *mm, 4 extern int do_huge_pmd_anonymous_page(struct mm_struct *mm,
5 struct vm_area_struct *vma, 5 struct vm_area_struct *vma,
6 unsigned long address, pmd_t *pmd, 6 unsigned long address, pmd_t *pmd,
7 unsigned int flags); 7 unsigned int flags);
8 extern int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm, 8 extern int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
9 pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr, 9 pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr,
10 struct vm_area_struct *vma); 10 struct vm_area_struct *vma);
11 extern int do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma, 11 extern int do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
12 unsigned long address, pmd_t *pmd, 12 unsigned long address, pmd_t *pmd,
13 pmd_t orig_pmd); 13 pmd_t orig_pmd);
14 extern pgtable_t get_pmd_huge_pte(struct mm_struct *mm); 14 extern pgtable_t get_pmd_huge_pte(struct mm_struct *mm);
15 extern struct page *follow_trans_huge_pmd(struct mm_struct *mm, 15 extern struct page *follow_trans_huge_pmd(struct mm_struct *mm,
16 unsigned long addr, 16 unsigned long addr,
17 pmd_t *pmd, 17 pmd_t *pmd,
18 unsigned int flags); 18 unsigned int flags);
19 extern int zap_huge_pmd(struct mmu_gather *tlb, 19 extern int zap_huge_pmd(struct mmu_gather *tlb,
20 struct vm_area_struct *vma, 20 struct vm_area_struct *vma,
21 pmd_t *pmd, unsigned long addr); 21 pmd_t *pmd, unsigned long addr);
22 extern int mincore_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, 22 extern int mincore_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
23 unsigned long addr, unsigned long end, 23 unsigned long addr, unsigned long end,
24 unsigned char *vec); 24 unsigned char *vec);
25 extern int move_huge_pmd(struct vm_area_struct *vma, 25 extern int move_huge_pmd(struct vm_area_struct *vma,
26 struct vm_area_struct *new_vma, 26 struct vm_area_struct *new_vma,
27 unsigned long old_addr, 27 unsigned long old_addr,
28 unsigned long new_addr, unsigned long old_end, 28 unsigned long new_addr, unsigned long old_end,
29 pmd_t *old_pmd, pmd_t *new_pmd); 29 pmd_t *old_pmd, pmd_t *new_pmd);
30 extern int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, 30 extern int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
31 unsigned long addr, pgprot_t newprot); 31 unsigned long addr, pgprot_t newprot);
32 32
33 enum transparent_hugepage_flag { 33 enum transparent_hugepage_flag {
34 TRANSPARENT_HUGEPAGE_FLAG, 34 TRANSPARENT_HUGEPAGE_FLAG,
35 TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, 35 TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
36 TRANSPARENT_HUGEPAGE_DEFRAG_FLAG, 36 TRANSPARENT_HUGEPAGE_DEFRAG_FLAG,
37 TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, 37 TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG,
38 TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG, 38 TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG,
39 #ifdef CONFIG_DEBUG_VM 39 #ifdef CONFIG_DEBUG_VM
40 TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG, 40 TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG,
41 #endif 41 #endif
42 }; 42 };
43 43
44 enum page_check_address_pmd_flag { 44 enum page_check_address_pmd_flag {
45 PAGE_CHECK_ADDRESS_PMD_FLAG, 45 PAGE_CHECK_ADDRESS_PMD_FLAG,
46 PAGE_CHECK_ADDRESS_PMD_NOTSPLITTING_FLAG, 46 PAGE_CHECK_ADDRESS_PMD_NOTSPLITTING_FLAG,
47 PAGE_CHECK_ADDRESS_PMD_SPLITTING_FLAG, 47 PAGE_CHECK_ADDRESS_PMD_SPLITTING_FLAG,
48 }; 48 };
49 extern pmd_t *page_check_address_pmd(struct page *page, 49 extern pmd_t *page_check_address_pmd(struct page *page,
50 struct mm_struct *mm, 50 struct mm_struct *mm,
51 unsigned long address, 51 unsigned long address,
52 enum page_check_address_pmd_flag flag); 52 enum page_check_address_pmd_flag flag);
53 53
54 #define HPAGE_PMD_ORDER (HPAGE_PMD_SHIFT-PAGE_SHIFT) 54 #define HPAGE_PMD_ORDER (HPAGE_PMD_SHIFT-PAGE_SHIFT)
55 #define HPAGE_PMD_NR (1<<HPAGE_PMD_ORDER) 55 #define HPAGE_PMD_NR (1<<HPAGE_PMD_ORDER)
56 56
57 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 57 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
58 #define HPAGE_PMD_SHIFT HPAGE_SHIFT 58 #define HPAGE_PMD_SHIFT HPAGE_SHIFT
59 #define HPAGE_PMD_MASK HPAGE_MASK 59 #define HPAGE_PMD_MASK HPAGE_MASK
60 #define HPAGE_PMD_SIZE HPAGE_SIZE 60 #define HPAGE_PMD_SIZE HPAGE_SIZE
61 61
62 extern bool is_vma_temporary_stack(struct vm_area_struct *vma);
63
62 #define transparent_hugepage_enabled(__vma) \ 64 #define transparent_hugepage_enabled(__vma) \
63 ((transparent_hugepage_flags & \ 65 ((transparent_hugepage_flags & \
64 (1<<TRANSPARENT_HUGEPAGE_FLAG) || \ 66 (1<<TRANSPARENT_HUGEPAGE_FLAG) || \
65 (transparent_hugepage_flags & \ 67 (transparent_hugepage_flags & \
66 (1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG) && \ 68 (1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG) && \
67 ((__vma)->vm_flags & VM_HUGEPAGE))) && \ 69 ((__vma)->vm_flags & VM_HUGEPAGE))) && \
68 !((__vma)->vm_flags & VM_NOHUGEPAGE) && \ 70 !((__vma)->vm_flags & VM_NOHUGEPAGE) && \
69 !is_vma_temporary_stack(__vma)) 71 !is_vma_temporary_stack(__vma))
70 #define transparent_hugepage_defrag(__vma) \ 72 #define transparent_hugepage_defrag(__vma) \
71 ((transparent_hugepage_flags & \ 73 ((transparent_hugepage_flags & \
72 (1<<TRANSPARENT_HUGEPAGE_DEFRAG_FLAG)) || \ 74 (1<<TRANSPARENT_HUGEPAGE_DEFRAG_FLAG)) || \
73 (transparent_hugepage_flags & \ 75 (transparent_hugepage_flags & \
74 (1<<TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG) && \ 76 (1<<TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG) && \
75 (__vma)->vm_flags & VM_HUGEPAGE)) 77 (__vma)->vm_flags & VM_HUGEPAGE))
76 #ifdef CONFIG_DEBUG_VM 78 #ifdef CONFIG_DEBUG_VM
77 #define transparent_hugepage_debug_cow() \ 79 #define transparent_hugepage_debug_cow() \
78 (transparent_hugepage_flags & \ 80 (transparent_hugepage_flags & \
79 (1<<TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG)) 81 (1<<TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG))
80 #else /* CONFIG_DEBUG_VM */ 82 #else /* CONFIG_DEBUG_VM */
81 #define transparent_hugepage_debug_cow() 0 83 #define transparent_hugepage_debug_cow() 0
82 #endif /* CONFIG_DEBUG_VM */ 84 #endif /* CONFIG_DEBUG_VM */
83 85
84 extern unsigned long transparent_hugepage_flags; 86 extern unsigned long transparent_hugepage_flags;
85 extern int copy_pte_range(struct mm_struct *dst_mm, struct mm_struct *src_mm, 87 extern int copy_pte_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
86 pmd_t *dst_pmd, pmd_t *src_pmd, 88 pmd_t *dst_pmd, pmd_t *src_pmd,
87 struct vm_area_struct *vma, 89 struct vm_area_struct *vma,
88 unsigned long addr, unsigned long end); 90 unsigned long addr, unsigned long end);
89 extern int handle_pte_fault(struct mm_struct *mm, 91 extern int handle_pte_fault(struct mm_struct *mm,
90 struct vm_area_struct *vma, unsigned long address, 92 struct vm_area_struct *vma, unsigned long address,
91 pte_t *pte, pmd_t *pmd, unsigned int flags); 93 pte_t *pte, pmd_t *pmd, unsigned int flags);
92 extern int split_huge_page(struct page *page); 94 extern int split_huge_page(struct page *page);
93 extern void __split_huge_page_pmd(struct mm_struct *mm, pmd_t *pmd); 95 extern void __split_huge_page_pmd(struct mm_struct *mm, pmd_t *pmd);
94 #define split_huge_page_pmd(__mm, __pmd) \ 96 #define split_huge_page_pmd(__mm, __pmd) \
95 do { \ 97 do { \
96 pmd_t *____pmd = (__pmd); \ 98 pmd_t *____pmd = (__pmd); \
97 if (unlikely(pmd_trans_huge(*____pmd))) \ 99 if (unlikely(pmd_trans_huge(*____pmd))) \
98 __split_huge_page_pmd(__mm, ____pmd); \ 100 __split_huge_page_pmd(__mm, ____pmd); \
99 } while (0) 101 } while (0)
100 #define wait_split_huge_page(__anon_vma, __pmd) \ 102 #define wait_split_huge_page(__anon_vma, __pmd) \
101 do { \ 103 do { \
102 pmd_t *____pmd = (__pmd); \ 104 pmd_t *____pmd = (__pmd); \
103 anon_vma_lock(__anon_vma); \ 105 anon_vma_lock(__anon_vma); \
104 anon_vma_unlock(__anon_vma); \ 106 anon_vma_unlock(__anon_vma); \
105 BUG_ON(pmd_trans_splitting(*____pmd) || \ 107 BUG_ON(pmd_trans_splitting(*____pmd) || \
106 pmd_trans_huge(*____pmd)); \ 108 pmd_trans_huge(*____pmd)); \
107 } while (0) 109 } while (0)
108 #if HPAGE_PMD_ORDER > MAX_ORDER 110 #if HPAGE_PMD_ORDER > MAX_ORDER
109 #error "hugepages can't be allocated by the buddy allocator" 111 #error "hugepages can't be allocated by the buddy allocator"
110 #endif 112 #endif
111 extern int hugepage_madvise(struct vm_area_struct *vma, 113 extern int hugepage_madvise(struct vm_area_struct *vma,
112 unsigned long *vm_flags, int advice); 114 unsigned long *vm_flags, int advice);
113 extern void __vma_adjust_trans_huge(struct vm_area_struct *vma, 115 extern void __vma_adjust_trans_huge(struct vm_area_struct *vma,
114 unsigned long start, 116 unsigned long start,
115 unsigned long end, 117 unsigned long end,
116 long adjust_next); 118 long adjust_next);
117 extern int __pmd_trans_huge_lock(pmd_t *pmd, 119 extern int __pmd_trans_huge_lock(pmd_t *pmd,
118 struct vm_area_struct *vma); 120 struct vm_area_struct *vma);
119 /* mmap_sem must be held on entry */ 121 /* mmap_sem must be held on entry */
120 static inline int pmd_trans_huge_lock(pmd_t *pmd, 122 static inline int pmd_trans_huge_lock(pmd_t *pmd,
121 struct vm_area_struct *vma) 123 struct vm_area_struct *vma)
122 { 124 {
123 VM_BUG_ON(!rwsem_is_locked(&vma->vm_mm->mmap_sem)); 125 VM_BUG_ON(!rwsem_is_locked(&vma->vm_mm->mmap_sem));
124 if (pmd_trans_huge(*pmd)) 126 if (pmd_trans_huge(*pmd))
125 return __pmd_trans_huge_lock(pmd, vma); 127 return __pmd_trans_huge_lock(pmd, vma);
126 else 128 else
127 return 0; 129 return 0;
128 } 130 }
129 static inline void vma_adjust_trans_huge(struct vm_area_struct *vma, 131 static inline void vma_adjust_trans_huge(struct vm_area_struct *vma,
130 unsigned long start, 132 unsigned long start,
131 unsigned long end, 133 unsigned long end,
132 long adjust_next) 134 long adjust_next)
133 { 135 {
134 if (!vma->anon_vma || vma->vm_ops) 136 if (!vma->anon_vma || vma->vm_ops)
135 return; 137 return;
136 __vma_adjust_trans_huge(vma, start, end, adjust_next); 138 __vma_adjust_trans_huge(vma, start, end, adjust_next);
137 } 139 }
138 static inline int hpage_nr_pages(struct page *page) 140 static inline int hpage_nr_pages(struct page *page)
139 { 141 {
140 if (unlikely(PageTransHuge(page))) 142 if (unlikely(PageTransHuge(page)))
141 return HPAGE_PMD_NR; 143 return HPAGE_PMD_NR;
142 return 1; 144 return 1;
143 } 145 }
144 static inline struct page *compound_trans_head(struct page *page) 146 static inline struct page *compound_trans_head(struct page *page)
145 { 147 {
146 if (PageTail(page)) { 148 if (PageTail(page)) {
147 struct page *head; 149 struct page *head;
148 head = page->first_page; 150 head = page->first_page;
149 smp_rmb(); 151 smp_rmb();
150 /* 152 /*
151 * head may be a dangling pointer. 153 * head may be a dangling pointer.
152 * __split_huge_page_refcount clears PageTail before 154 * __split_huge_page_refcount clears PageTail before
153 * overwriting first_page, so if PageTail is still 155 * overwriting first_page, so if PageTail is still
154 * there it means the head pointer isn't dangling. 156 * there it means the head pointer isn't dangling.
155 */ 157 */
156 if (PageTail(page)) 158 if (PageTail(page))
157 return head; 159 return head;
158 } 160 }
159 return page; 161 return page;
160 } 162 }
161 #else /* CONFIG_TRANSPARENT_HUGEPAGE */ 163 #else /* CONFIG_TRANSPARENT_HUGEPAGE */
162 #define HPAGE_PMD_SHIFT ({ BUILD_BUG(); 0; }) 164 #define HPAGE_PMD_SHIFT ({ BUILD_BUG(); 0; })
163 #define HPAGE_PMD_MASK ({ BUILD_BUG(); 0; }) 165 #define HPAGE_PMD_MASK ({ BUILD_BUG(); 0; })
164 #define HPAGE_PMD_SIZE ({ BUILD_BUG(); 0; }) 166 #define HPAGE_PMD_SIZE ({ BUILD_BUG(); 0; })
165 167
166 #define hpage_nr_pages(x) 1 168 #define hpage_nr_pages(x) 1
167 169
168 #define transparent_hugepage_enabled(__vma) 0 170 #define transparent_hugepage_enabled(__vma) 0
169 171
170 #define transparent_hugepage_flags 0UL 172 #define transparent_hugepage_flags 0UL
171 static inline int split_huge_page(struct page *page) 173 static inline int split_huge_page(struct page *page)
172 { 174 {
173 return 0; 175 return 0;
174 } 176 }
175 #define split_huge_page_pmd(__mm, __pmd) \ 177 #define split_huge_page_pmd(__mm, __pmd) \
176 do { } while (0) 178 do { } while (0)
177 #define wait_split_huge_page(__anon_vma, __pmd) \ 179 #define wait_split_huge_page(__anon_vma, __pmd) \
178 do { } while (0) 180 do { } while (0)
179 #define compound_trans_head(page) compound_head(page) 181 #define compound_trans_head(page) compound_head(page)
180 static inline int hugepage_madvise(struct vm_area_struct *vma, 182 static inline int hugepage_madvise(struct vm_area_struct *vma,
181 unsigned long *vm_flags, int advice) 183 unsigned long *vm_flags, int advice)
182 { 184 {
183 BUG(); 185 BUG();
184 return 0; 186 return 0;
185 } 187 }
186 static inline void vma_adjust_trans_huge(struct vm_area_struct *vma, 188 static inline void vma_adjust_trans_huge(struct vm_area_struct *vma,
187 unsigned long start, 189 unsigned long start,
188 unsigned long end, 190 unsigned long end,
189 long adjust_next) 191 long adjust_next)
190 { 192 {
191 } 193 }
192 static inline int pmd_trans_huge_lock(pmd_t *pmd, 194 static inline int pmd_trans_huge_lock(pmd_t *pmd,
193 struct vm_area_struct *vma) 195 struct vm_area_struct *vma)
194 { 196 {
195 return 0; 197 return 0;
196 } 198 }
197 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 199 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
198 200
199 #endif /* _LINUX_HUGE_MM_H */ 201 #endif /* _LINUX_HUGE_MM_H */
200 202
include/linux/rmap.h
1 #ifndef _LINUX_RMAP_H 1 #ifndef _LINUX_RMAP_H
2 #define _LINUX_RMAP_H 2 #define _LINUX_RMAP_H
3 /* 3 /*
4 * Declarations for Reverse Mapping functions in mm/rmap.c 4 * Declarations for Reverse Mapping functions in mm/rmap.c
5 */ 5 */
6 6
7 #include <linux/list.h> 7 #include <linux/list.h>
8 #include <linux/slab.h> 8 #include <linux/slab.h>
9 #include <linux/mm.h> 9 #include <linux/mm.h>
10 #include <linux/mutex.h> 10 #include <linux/mutex.h>
11 #include <linux/memcontrol.h> 11 #include <linux/memcontrol.h>
12 12
13 /* 13 /*
14 * The anon_vma heads a list of private "related" vmas, to scan if 14 * The anon_vma heads a list of private "related" vmas, to scan if
15 * an anonymous page pointing to this anon_vma needs to be unmapped: 15 * an anonymous page pointing to this anon_vma needs to be unmapped:
16 * the vmas on the list will be related by forking, or by splitting. 16 * the vmas on the list will be related by forking, or by splitting.
17 * 17 *
18 * Since vmas come and go as they are split and merged (particularly 18 * Since vmas come and go as they are split and merged (particularly
19 * in mprotect), the mapping field of an anonymous page cannot point 19 * in mprotect), the mapping field of an anonymous page cannot point
20 * directly to a vma: instead it points to an anon_vma, on whose list 20 * directly to a vma: instead it points to an anon_vma, on whose list
21 * the related vmas can be easily linked or unlinked. 21 * the related vmas can be easily linked or unlinked.
22 * 22 *
23 * After unlinking the last vma on the list, we must garbage collect 23 * After unlinking the last vma on the list, we must garbage collect
24 * the anon_vma object itself: we're guaranteed no page can be 24 * the anon_vma object itself: we're guaranteed no page can be
25 * pointing to this anon_vma once its vma list is empty. 25 * pointing to this anon_vma once its vma list is empty.
26 */ 26 */
27 struct anon_vma { 27 struct anon_vma {
28 struct anon_vma *root; /* Root of this anon_vma tree */ 28 struct anon_vma *root; /* Root of this anon_vma tree */
29 struct mutex mutex; /* Serialize access to vma list */ 29 struct mutex mutex; /* Serialize access to vma list */
30 /* 30 /*
31 * The refcount is taken on an anon_vma when there is no 31 * The refcount is taken on an anon_vma when there is no
32 * guarantee that the vma of page tables will exist for 32 * guarantee that the vma of page tables will exist for
33 * the duration of the operation. A caller that takes 33 * the duration of the operation. A caller that takes
34 * the reference is responsible for clearing up the 34 * the reference is responsible for clearing up the
35 * anon_vma if they are the last user on release 35 * anon_vma if they are the last user on release
36 */ 36 */
37 atomic_t refcount; 37 atomic_t refcount;
38 38
39 /* 39 /*
40 * NOTE: the LSB of the head.next is set by 40 * NOTE: the LSB of the head.next is set by
41 * mm_take_all_locks() _after_ taking the above lock. So the 41 * mm_take_all_locks() _after_ taking the above lock. So the
42 * head must only be read/written after taking the above lock 42 * head must only be read/written after taking the above lock
43 * to be sure to see a valid next pointer. The LSB bit itself 43 * to be sure to see a valid next pointer. The LSB bit itself
44 * is serialized by a system wide lock only visible to 44 * is serialized by a system wide lock only visible to
45 * mm_take_all_locks() (mm_all_locks_mutex). 45 * mm_take_all_locks() (mm_all_locks_mutex).
46 */ 46 */
47 struct list_head head; /* Chain of private "related" vmas */ 47 struct list_head head; /* Chain of private "related" vmas */
48 }; 48 };
49 49
50 /* 50 /*
51 * The copy-on-write semantics of fork mean that an anon_vma 51 * The copy-on-write semantics of fork mean that an anon_vma
52 * can become associated with multiple processes. Furthermore, 52 * can become associated with multiple processes. Furthermore,
53 * each child process will have its own anon_vma, where new 53 * each child process will have its own anon_vma, where new
54 * pages for that process are instantiated. 54 * pages for that process are instantiated.
55 * 55 *
56 * This structure allows us to find the anon_vmas associated 56 * This structure allows us to find the anon_vmas associated
57 * with a VMA, or the VMAs associated with an anon_vma. 57 * with a VMA, or the VMAs associated with an anon_vma.
58 * The "same_vma" list contains the anon_vma_chains linking 58 * The "same_vma" list contains the anon_vma_chains linking
59 * all the anon_vmas associated with this VMA. 59 * all the anon_vmas associated with this VMA.
60 * The "same_anon_vma" list contains the anon_vma_chains 60 * The "same_anon_vma" list contains the anon_vma_chains
61 * which link all the VMAs associated with this anon_vma. 61 * which link all the VMAs associated with this anon_vma.
62 */ 62 */
63 struct anon_vma_chain { 63 struct anon_vma_chain {
64 struct vm_area_struct *vma; 64 struct vm_area_struct *vma;
65 struct anon_vma *anon_vma; 65 struct anon_vma *anon_vma;
66 struct list_head same_vma; /* locked by mmap_sem & page_table_lock */ 66 struct list_head same_vma; /* locked by mmap_sem & page_table_lock */
67 struct list_head same_anon_vma; /* locked by anon_vma->mutex */ 67 struct list_head same_anon_vma; /* locked by anon_vma->mutex */
68 }; 68 };
69 69
70 #ifdef CONFIG_MMU 70 #ifdef CONFIG_MMU
71 static inline void get_anon_vma(struct anon_vma *anon_vma) 71 static inline void get_anon_vma(struct anon_vma *anon_vma)
72 { 72 {
73 atomic_inc(&anon_vma->refcount); 73 atomic_inc(&anon_vma->refcount);
74 } 74 }
75 75
76 void __put_anon_vma(struct anon_vma *anon_vma); 76 void __put_anon_vma(struct anon_vma *anon_vma);
77 77
78 static inline void put_anon_vma(struct anon_vma *anon_vma) 78 static inline void put_anon_vma(struct anon_vma *anon_vma)
79 { 79 {
80 if (atomic_dec_and_test(&anon_vma->refcount)) 80 if (atomic_dec_and_test(&anon_vma->refcount))
81 __put_anon_vma(anon_vma); 81 __put_anon_vma(anon_vma);
82 } 82 }
83 83
84 static inline struct anon_vma *page_anon_vma(struct page *page) 84 static inline struct anon_vma *page_anon_vma(struct page *page)
85 { 85 {
86 if (((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) != 86 if (((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) !=
87 PAGE_MAPPING_ANON) 87 PAGE_MAPPING_ANON)
88 return NULL; 88 return NULL;
89 return page_rmapping(page); 89 return page_rmapping(page);
90 } 90 }
91 91
92 static inline void vma_lock_anon_vma(struct vm_area_struct *vma) 92 static inline void vma_lock_anon_vma(struct vm_area_struct *vma)
93 { 93 {
94 struct anon_vma *anon_vma = vma->anon_vma; 94 struct anon_vma *anon_vma = vma->anon_vma;
95 if (anon_vma) 95 if (anon_vma)
96 mutex_lock(&anon_vma->root->mutex); 96 mutex_lock(&anon_vma->root->mutex);
97 } 97 }
98 98
99 static inline void vma_unlock_anon_vma(struct vm_area_struct *vma) 99 static inline void vma_unlock_anon_vma(struct vm_area_struct *vma)
100 { 100 {
101 struct anon_vma *anon_vma = vma->anon_vma; 101 struct anon_vma *anon_vma = vma->anon_vma;
102 if (anon_vma) 102 if (anon_vma)
103 mutex_unlock(&anon_vma->root->mutex); 103 mutex_unlock(&anon_vma->root->mutex);
104 } 104 }
105 105
106 static inline void anon_vma_lock(struct anon_vma *anon_vma) 106 static inline void anon_vma_lock(struct anon_vma *anon_vma)
107 { 107 {
108 mutex_lock(&anon_vma->root->mutex); 108 mutex_lock(&anon_vma->root->mutex);
109 } 109 }
110 110
111 static inline void anon_vma_unlock(struct anon_vma *anon_vma) 111 static inline void anon_vma_unlock(struct anon_vma *anon_vma)
112 { 112 {
113 mutex_unlock(&anon_vma->root->mutex); 113 mutex_unlock(&anon_vma->root->mutex);
114 } 114 }
115 115
116 /* 116 /*
117 * anon_vma helper functions. 117 * anon_vma helper functions.
118 */ 118 */
119 void anon_vma_init(void); /* create anon_vma_cachep */ 119 void anon_vma_init(void); /* create anon_vma_cachep */
120 int anon_vma_prepare(struct vm_area_struct *); 120 int anon_vma_prepare(struct vm_area_struct *);
121 void unlink_anon_vmas(struct vm_area_struct *); 121 void unlink_anon_vmas(struct vm_area_struct *);
122 int anon_vma_clone(struct vm_area_struct *, struct vm_area_struct *); 122 int anon_vma_clone(struct vm_area_struct *, struct vm_area_struct *);
123 void anon_vma_moveto_tail(struct vm_area_struct *); 123 void anon_vma_moveto_tail(struct vm_area_struct *);
124 int anon_vma_fork(struct vm_area_struct *, struct vm_area_struct *); 124 int anon_vma_fork(struct vm_area_struct *, struct vm_area_struct *);
125 125
126 static inline void anon_vma_merge(struct vm_area_struct *vma, 126 static inline void anon_vma_merge(struct vm_area_struct *vma,
127 struct vm_area_struct *next) 127 struct vm_area_struct *next)
128 { 128 {
129 VM_BUG_ON(vma->anon_vma != next->anon_vma); 129 VM_BUG_ON(vma->anon_vma != next->anon_vma);
130 unlink_anon_vmas(next); 130 unlink_anon_vmas(next);
131 } 131 }
132 132
133 struct anon_vma *page_get_anon_vma(struct page *page); 133 struct anon_vma *page_get_anon_vma(struct page *page);
134 134
135 /* 135 /*
136 * rmap interfaces called when adding or removing pte of page 136 * rmap interfaces called when adding or removing pte of page
137 */ 137 */
138 void page_move_anon_rmap(struct page *, struct vm_area_struct *, unsigned long); 138 void page_move_anon_rmap(struct page *, struct vm_area_struct *, unsigned long);
139 void page_add_anon_rmap(struct page *, struct vm_area_struct *, unsigned long); 139 void page_add_anon_rmap(struct page *, struct vm_area_struct *, unsigned long);
140 void do_page_add_anon_rmap(struct page *, struct vm_area_struct *, 140 void do_page_add_anon_rmap(struct page *, struct vm_area_struct *,
141 unsigned long, int); 141 unsigned long, int);
142 void page_add_new_anon_rmap(struct page *, struct vm_area_struct *, unsigned long); 142 void page_add_new_anon_rmap(struct page *, struct vm_area_struct *, unsigned long);
143 void page_add_file_rmap(struct page *); 143 void page_add_file_rmap(struct page *);
144 void page_remove_rmap(struct page *); 144 void page_remove_rmap(struct page *);
145 145
146 void hugepage_add_anon_rmap(struct page *, struct vm_area_struct *, 146 void hugepage_add_anon_rmap(struct page *, struct vm_area_struct *,
147 unsigned long); 147 unsigned long);
148 void hugepage_add_new_anon_rmap(struct page *, struct vm_area_struct *, 148 void hugepage_add_new_anon_rmap(struct page *, struct vm_area_struct *,
149 unsigned long); 149 unsigned long);
150 150
151 static inline void page_dup_rmap(struct page *page) 151 static inline void page_dup_rmap(struct page *page)
152 { 152 {
153 atomic_inc(&page->_mapcount); 153 atomic_inc(&page->_mapcount);
154 } 154 }
155 155
156 /* 156 /*
157 * Called from mm/vmscan.c to handle paging out 157 * Called from mm/vmscan.c to handle paging out
158 */ 158 */
159 int page_referenced(struct page *, int is_locked, 159 int page_referenced(struct page *, int is_locked,
160 struct mem_cgroup *memcg, unsigned long *vm_flags); 160 struct mem_cgroup *memcg, unsigned long *vm_flags);
161 int page_referenced_one(struct page *, struct vm_area_struct *, 161 int page_referenced_one(struct page *, struct vm_area_struct *,
162 unsigned long address, unsigned int *mapcount, unsigned long *vm_flags); 162 unsigned long address, unsigned int *mapcount, unsigned long *vm_flags);
163 163
164 enum ttu_flags { 164 enum ttu_flags {
165 TTU_UNMAP = 0, /* unmap mode */ 165 TTU_UNMAP = 0, /* unmap mode */
166 TTU_MIGRATION = 1, /* migration mode */ 166 TTU_MIGRATION = 1, /* migration mode */
167 TTU_MUNLOCK = 2, /* munlock mode */ 167 TTU_MUNLOCK = 2, /* munlock mode */
168 TTU_ACTION_MASK = 0xff, 168 TTU_ACTION_MASK = 0xff,
169 169
170 TTU_IGNORE_MLOCK = (1 << 8), /* ignore mlock */ 170 TTU_IGNORE_MLOCK = (1 << 8), /* ignore mlock */
171 TTU_IGNORE_ACCESS = (1 << 9), /* don't age */ 171 TTU_IGNORE_ACCESS = (1 << 9), /* don't age */
172 TTU_IGNORE_HWPOISON = (1 << 10),/* corrupted page is recoverable */ 172 TTU_IGNORE_HWPOISON = (1 << 10),/* corrupted page is recoverable */
173 }; 173 };
174 #define TTU_ACTION(x) ((x) & TTU_ACTION_MASK) 174 #define TTU_ACTION(x) ((x) & TTU_ACTION_MASK)
175 175
176 bool is_vma_temporary_stack(struct vm_area_struct *vma);
177
178 int try_to_unmap(struct page *, enum ttu_flags flags); 176 int try_to_unmap(struct page *, enum ttu_flags flags);
179 int try_to_unmap_one(struct page *, struct vm_area_struct *, 177 int try_to_unmap_one(struct page *, struct vm_area_struct *,
180 unsigned long address, enum ttu_flags flags); 178 unsigned long address, enum ttu_flags flags);
181 179
182 /* 180 /*
183 * Called from mm/filemap_xip.c to unmap empty zero page 181 * Called from mm/filemap_xip.c to unmap empty zero page
184 */ 182 */
185 pte_t *__page_check_address(struct page *, struct mm_struct *, 183 pte_t *__page_check_address(struct page *, struct mm_struct *,
186 unsigned long, spinlock_t **, int); 184 unsigned long, spinlock_t **, int);
187 185
188 static inline pte_t *page_check_address(struct page *page, struct mm_struct *mm, 186 static inline pte_t *page_check_address(struct page *page, struct mm_struct *mm,
189 unsigned long address, 187 unsigned long address,
190 spinlock_t **ptlp, int sync) 188 spinlock_t **ptlp, int sync)
191 { 189 {
192 pte_t *ptep; 190 pte_t *ptep;
193 191
194 __cond_lock(*ptlp, ptep = __page_check_address(page, mm, address, 192 __cond_lock(*ptlp, ptep = __page_check_address(page, mm, address,
195 ptlp, sync)); 193 ptlp, sync));
196 return ptep; 194 return ptep;
197 } 195 }
198 196
199 /* 197 /*
200 * Used by swapoff to help locate where page is expected in vma. 198 * Used by swapoff to help locate where page is expected in vma.
201 */ 199 */
202 unsigned long page_address_in_vma(struct page *, struct vm_area_struct *); 200 unsigned long page_address_in_vma(struct page *, struct vm_area_struct *);
203 201
204 /* 202 /*
205 * Cleans the PTEs of shared mappings. 203 * Cleans the PTEs of shared mappings.
206 * (and since clean PTEs should also be readonly, write protects them too) 204 * (and since clean PTEs should also be readonly, write protects them too)
207 * 205 *
208 * returns the number of cleaned PTEs. 206 * returns the number of cleaned PTEs.
209 */ 207 */
210 int page_mkclean(struct page *); 208 int page_mkclean(struct page *);
211 209
212 /* 210 /*
213 * called in munlock()/munmap() path to check for other vmas holding 211 * called in munlock()/munmap() path to check for other vmas holding
214 * the page mlocked. 212 * the page mlocked.
215 */ 213 */
216 int try_to_munlock(struct page *); 214 int try_to_munlock(struct page *);
217 215
218 /* 216 /*
219 * Called by memory-failure.c to kill processes. 217 * Called by memory-failure.c to kill processes.
220 */ 218 */
221 struct anon_vma *page_lock_anon_vma(struct page *page); 219 struct anon_vma *page_lock_anon_vma(struct page *page);
222 void page_unlock_anon_vma(struct anon_vma *anon_vma); 220 void page_unlock_anon_vma(struct anon_vma *anon_vma);
223 int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma); 221 int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma);
224 222
225 /* 223 /*
226 * Called by migrate.c to remove migration ptes, but might be used more later. 224 * Called by migrate.c to remove migration ptes, but might be used more later.
227 */ 225 */
228 int rmap_walk(struct page *page, int (*rmap_one)(struct page *, 226 int rmap_walk(struct page *page, int (*rmap_one)(struct page *,
229 struct vm_area_struct *, unsigned long, void *), void *arg); 227 struct vm_area_struct *, unsigned long, void *), void *arg);
230 228
231 #else /* !CONFIG_MMU */ 229 #else /* !CONFIG_MMU */
232 230
233 #define anon_vma_init() do {} while (0) 231 #define anon_vma_init() do {} while (0)
234 #define anon_vma_prepare(vma) (0) 232 #define anon_vma_prepare(vma) (0)
235 #define anon_vma_link(vma) do {} while (0) 233 #define anon_vma_link(vma) do {} while (0)
236 234
237 static inline int page_referenced(struct page *page, int is_locked, 235 static inline int page_referenced(struct page *page, int is_locked,
238 struct mem_cgroup *memcg, 236 struct mem_cgroup *memcg,
239 unsigned long *vm_flags) 237 unsigned long *vm_flags)
240 { 238 {
241 *vm_flags = 0; 239 *vm_flags = 0;
242 return 0; 240 return 0;
243 } 241 }
244 242
245 #define try_to_unmap(page, refs) SWAP_FAIL 243 #define try_to_unmap(page, refs) SWAP_FAIL
246 244
247 static inline int page_mkclean(struct page *page) 245 static inline int page_mkclean(struct page *page)
248 { 246 {
249 return 0; 247 return 0;
250 } 248 }
251 249
252 250
253 #endif /* CONFIG_MMU */ 251 #endif /* CONFIG_MMU */
254 252
255 /* 253 /*
256 * Return values of try_to_unmap 254 * Return values of try_to_unmap
257 */ 255 */
258 #define SWAP_SUCCESS 0 256 #define SWAP_SUCCESS 0
259 #define SWAP_AGAIN 1 257 #define SWAP_AGAIN 1
260 #define SWAP_FAIL 2 258 #define SWAP_FAIL 2
261 #define SWAP_MLOCK 3 259 #define SWAP_MLOCK 3
262 260
263 #endif /* _LINUX_RMAP_H */ 261 #endif /* _LINUX_RMAP_H */
264 262