Commit fe1a6875fcaaac2041945008a9875d2c07be1d9b
Committed by
Linus Torvalds
1 parent
de3b69d7d8
Exists in
master
and in
7 other branches
mm: fix build on non-mmu machines
Commit 1ea0704e0d aka "mm: add a ptep_modify_prot transaction abstraction" caused: | CC init/main.o |In file included from include2/asm/pgtable.h:68, | from /home/bigeasy/git/linux-2.6-m68k/include/linux/mm.h:39, | from include2/asm/uaccess.h:8, | from /home/bigeasy/git/linux-2.6-m68k/include/linux/poll.h:13, | from /home/bigeasy/git/linux-2.6-m68k/include/linux/rtc.h:113, | from /home/bigeasy/git/linux-2.6-m68k/include/linux/efi.h:19, | from /home/bigeasy/git/linux-2.6-m68k/init/main.c:43: |/linux-2.6/include/asm-generic/pgtable.h: In function '__ptep_modify_prot_start': |/linux-2.6/include/asm-generic/pgtable.h:209: error: implicit declaration of function 'ptep_get_and_clear' |/linux-2.6/include/asm-generic/pgtable.h:209: error: incompatible types in return |/linux-2.6/include/asm-generic/pgtable.h: In function '__ptep_modify_prot_commit': |/linux-2.6/include/asm-generic/pgtable.h:220: error: implicit declaration of function 'set_pte_at' |make[2]: *** [init/main.o] Error 1 |make[1]: *** [init] Error 2 |make: *** [sub-make] Error 2 on my m68knommu box. Acked-by: Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Hugh Dickins <hugh@veritas.com> Cc: Ingo Molnar <mingo@elte.hu> Signed-off-by: Sebastian Siewior <bigeasy@linutronix.de> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Showing 1 changed file with 1 additions and 1 deletions Inline Diff
include/asm-generic/pgtable.h
1 | #ifndef _ASM_GENERIC_PGTABLE_H | 1 | #ifndef _ASM_GENERIC_PGTABLE_H |
2 | #define _ASM_GENERIC_PGTABLE_H | 2 | #define _ASM_GENERIC_PGTABLE_H |
3 | 3 | ||
4 | #ifndef __ASSEMBLY__ | 4 | #ifndef __ASSEMBLY__ |
5 | #ifdef CONFIG_MMU | 5 | #ifdef CONFIG_MMU |
6 | 6 | ||
7 | #ifndef __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS | 7 | #ifndef __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS |
8 | /* | 8 | /* |
9 | * Largely same as above, but only sets the access flags (dirty, | 9 | * Largely same as above, but only sets the access flags (dirty, |
10 | * accessed, and writable). Furthermore, we know it always gets set | 10 | * accessed, and writable). Furthermore, we know it always gets set |
11 | * to a "more permissive" setting, which allows most architectures | 11 | * to a "more permissive" setting, which allows most architectures |
12 | * to optimize this. We return whether the PTE actually changed, which | 12 | * to optimize this. We return whether the PTE actually changed, which |
13 | * in turn instructs the caller to do things like update__mmu_cache. | 13 | * in turn instructs the caller to do things like update__mmu_cache. |
14 | * This used to be done in the caller, but sparc needs minor faults to | 14 | * This used to be done in the caller, but sparc needs minor faults to |
15 | * force that call on sun4c so we changed this macro slightly | 15 | * force that call on sun4c so we changed this macro slightly |
16 | */ | 16 | */ |
17 | #define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \ | 17 | #define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \ |
18 | ({ \ | 18 | ({ \ |
19 | int __changed = !pte_same(*(__ptep), __entry); \ | 19 | int __changed = !pte_same(*(__ptep), __entry); \ |
20 | if (__changed) { \ | 20 | if (__changed) { \ |
21 | set_pte_at((__vma)->vm_mm, (__address), __ptep, __entry); \ | 21 | set_pte_at((__vma)->vm_mm, (__address), __ptep, __entry); \ |
22 | flush_tlb_page(__vma, __address); \ | 22 | flush_tlb_page(__vma, __address); \ |
23 | } \ | 23 | } \ |
24 | __changed; \ | 24 | __changed; \ |
25 | }) | 25 | }) |
26 | #endif | 26 | #endif |
27 | 27 | ||
28 | #ifndef __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG | 28 | #ifndef __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG |
29 | #define ptep_test_and_clear_young(__vma, __address, __ptep) \ | 29 | #define ptep_test_and_clear_young(__vma, __address, __ptep) \ |
30 | ({ \ | 30 | ({ \ |
31 | pte_t __pte = *(__ptep); \ | 31 | pte_t __pte = *(__ptep); \ |
32 | int r = 1; \ | 32 | int r = 1; \ |
33 | if (!pte_young(__pte)) \ | 33 | if (!pte_young(__pte)) \ |
34 | r = 0; \ | 34 | r = 0; \ |
35 | else \ | 35 | else \ |
36 | set_pte_at((__vma)->vm_mm, (__address), \ | 36 | set_pte_at((__vma)->vm_mm, (__address), \ |
37 | (__ptep), pte_mkold(__pte)); \ | 37 | (__ptep), pte_mkold(__pte)); \ |
38 | r; \ | 38 | r; \ |
39 | }) | 39 | }) |
40 | #endif | 40 | #endif |
41 | 41 | ||
42 | #ifndef __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH | 42 | #ifndef __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH |
43 | #define ptep_clear_flush_young(__vma, __address, __ptep) \ | 43 | #define ptep_clear_flush_young(__vma, __address, __ptep) \ |
44 | ({ \ | 44 | ({ \ |
45 | int __young; \ | 45 | int __young; \ |
46 | __young = ptep_test_and_clear_young(__vma, __address, __ptep); \ | 46 | __young = ptep_test_and_clear_young(__vma, __address, __ptep); \ |
47 | if (__young) \ | 47 | if (__young) \ |
48 | flush_tlb_page(__vma, __address); \ | 48 | flush_tlb_page(__vma, __address); \ |
49 | __young; \ | 49 | __young; \ |
50 | }) | 50 | }) |
51 | #endif | 51 | #endif |
52 | 52 | ||
53 | #ifndef __HAVE_ARCH_PTEP_GET_AND_CLEAR | 53 | #ifndef __HAVE_ARCH_PTEP_GET_AND_CLEAR |
54 | #define ptep_get_and_clear(__mm, __address, __ptep) \ | 54 | #define ptep_get_and_clear(__mm, __address, __ptep) \ |
55 | ({ \ | 55 | ({ \ |
56 | pte_t __pte = *(__ptep); \ | 56 | pte_t __pte = *(__ptep); \ |
57 | pte_clear((__mm), (__address), (__ptep)); \ | 57 | pte_clear((__mm), (__address), (__ptep)); \ |
58 | __pte; \ | 58 | __pte; \ |
59 | }) | 59 | }) |
60 | #endif | 60 | #endif |
61 | 61 | ||
62 | #ifndef __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL | 62 | #ifndef __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL |
63 | #define ptep_get_and_clear_full(__mm, __address, __ptep, __full) \ | 63 | #define ptep_get_and_clear_full(__mm, __address, __ptep, __full) \ |
64 | ({ \ | 64 | ({ \ |
65 | pte_t __pte; \ | 65 | pte_t __pte; \ |
66 | __pte = ptep_get_and_clear((__mm), (__address), (__ptep)); \ | 66 | __pte = ptep_get_and_clear((__mm), (__address), (__ptep)); \ |
67 | __pte; \ | 67 | __pte; \ |
68 | }) | 68 | }) |
69 | #endif | 69 | #endif |
70 | 70 | ||
71 | /* | 71 | /* |
72 | * Some architectures may be able to avoid expensive synchronization | 72 | * Some architectures may be able to avoid expensive synchronization |
73 | * primitives when modifications are made to PTE's which are already | 73 | * primitives when modifications are made to PTE's which are already |
74 | * not present, or in the process of an address space destruction. | 74 | * not present, or in the process of an address space destruction. |
75 | */ | 75 | */ |
76 | #ifndef __HAVE_ARCH_PTE_CLEAR_NOT_PRESENT_FULL | 76 | #ifndef __HAVE_ARCH_PTE_CLEAR_NOT_PRESENT_FULL |
77 | #define pte_clear_not_present_full(__mm, __address, __ptep, __full) \ | 77 | #define pte_clear_not_present_full(__mm, __address, __ptep, __full) \ |
78 | do { \ | 78 | do { \ |
79 | pte_clear((__mm), (__address), (__ptep)); \ | 79 | pte_clear((__mm), (__address), (__ptep)); \ |
80 | } while (0) | 80 | } while (0) |
81 | #endif | 81 | #endif |
82 | 82 | ||
83 | #ifndef __HAVE_ARCH_PTEP_CLEAR_FLUSH | 83 | #ifndef __HAVE_ARCH_PTEP_CLEAR_FLUSH |
84 | #define ptep_clear_flush(__vma, __address, __ptep) \ | 84 | #define ptep_clear_flush(__vma, __address, __ptep) \ |
85 | ({ \ | 85 | ({ \ |
86 | pte_t __pte; \ | 86 | pte_t __pte; \ |
87 | __pte = ptep_get_and_clear((__vma)->vm_mm, __address, __ptep); \ | 87 | __pte = ptep_get_and_clear((__vma)->vm_mm, __address, __ptep); \ |
88 | flush_tlb_page(__vma, __address); \ | 88 | flush_tlb_page(__vma, __address); \ |
89 | __pte; \ | 89 | __pte; \ |
90 | }) | 90 | }) |
91 | #endif | 91 | #endif |
92 | 92 | ||
93 | #ifndef __HAVE_ARCH_PTEP_SET_WRPROTECT | 93 | #ifndef __HAVE_ARCH_PTEP_SET_WRPROTECT |
94 | struct mm_struct; | 94 | struct mm_struct; |
95 | static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long address, pte_t *ptep) | 95 | static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long address, pte_t *ptep) |
96 | { | 96 | { |
97 | pte_t old_pte = *ptep; | 97 | pte_t old_pte = *ptep; |
98 | set_pte_at(mm, address, ptep, pte_wrprotect(old_pte)); | 98 | set_pte_at(mm, address, ptep, pte_wrprotect(old_pte)); |
99 | } | 99 | } |
100 | #endif | 100 | #endif |
101 | 101 | ||
102 | #ifndef __HAVE_ARCH_PTE_SAME | 102 | #ifndef __HAVE_ARCH_PTE_SAME |
103 | #define pte_same(A,B) (pte_val(A) == pte_val(B)) | 103 | #define pte_same(A,B) (pte_val(A) == pte_val(B)) |
104 | #endif | 104 | #endif |
105 | 105 | ||
106 | #ifndef __HAVE_ARCH_PAGE_TEST_DIRTY | 106 | #ifndef __HAVE_ARCH_PAGE_TEST_DIRTY |
107 | #define page_test_dirty(page) (0) | 107 | #define page_test_dirty(page) (0) |
108 | #endif | 108 | #endif |
109 | 109 | ||
110 | #ifndef __HAVE_ARCH_PAGE_CLEAR_DIRTY | 110 | #ifndef __HAVE_ARCH_PAGE_CLEAR_DIRTY |
111 | #define page_clear_dirty(page) do { } while (0) | 111 | #define page_clear_dirty(page) do { } while (0) |
112 | #endif | 112 | #endif |
113 | 113 | ||
114 | #ifndef __HAVE_ARCH_PAGE_TEST_DIRTY | 114 | #ifndef __HAVE_ARCH_PAGE_TEST_DIRTY |
115 | #define pte_maybe_dirty(pte) pte_dirty(pte) | 115 | #define pte_maybe_dirty(pte) pte_dirty(pte) |
116 | #else | 116 | #else |
117 | #define pte_maybe_dirty(pte) (1) | 117 | #define pte_maybe_dirty(pte) (1) |
118 | #endif | 118 | #endif |
119 | 119 | ||
120 | #ifndef __HAVE_ARCH_PAGE_TEST_AND_CLEAR_YOUNG | 120 | #ifndef __HAVE_ARCH_PAGE_TEST_AND_CLEAR_YOUNG |
121 | #define page_test_and_clear_young(page) (0) | 121 | #define page_test_and_clear_young(page) (0) |
122 | #endif | 122 | #endif |
123 | 123 | ||
124 | #ifndef __HAVE_ARCH_PGD_OFFSET_GATE | 124 | #ifndef __HAVE_ARCH_PGD_OFFSET_GATE |
125 | #define pgd_offset_gate(mm, addr) pgd_offset(mm, addr) | 125 | #define pgd_offset_gate(mm, addr) pgd_offset(mm, addr) |
126 | #endif | 126 | #endif |
127 | 127 | ||
128 | #ifndef __HAVE_ARCH_MOVE_PTE | 128 | #ifndef __HAVE_ARCH_MOVE_PTE |
129 | #define move_pte(pte, prot, old_addr, new_addr) (pte) | 129 | #define move_pte(pte, prot, old_addr, new_addr) (pte) |
130 | #endif | 130 | #endif |
131 | 131 | ||
132 | /* | 132 | /* |
133 | * When walking page tables, get the address of the next boundary, | 133 | * When walking page tables, get the address of the next boundary, |
134 | * or the end address of the range if that comes earlier. Although no | 134 | * or the end address of the range if that comes earlier. Although no |
135 | * vma end wraps to 0, rounded up __boundary may wrap to 0 throughout. | 135 | * vma end wraps to 0, rounded up __boundary may wrap to 0 throughout. |
136 | */ | 136 | */ |
137 | 137 | ||
138 | #define pgd_addr_end(addr, end) \ | 138 | #define pgd_addr_end(addr, end) \ |
139 | ({ unsigned long __boundary = ((addr) + PGDIR_SIZE) & PGDIR_MASK; \ | 139 | ({ unsigned long __boundary = ((addr) + PGDIR_SIZE) & PGDIR_MASK; \ |
140 | (__boundary - 1 < (end) - 1)? __boundary: (end); \ | 140 | (__boundary - 1 < (end) - 1)? __boundary: (end); \ |
141 | }) | 141 | }) |
142 | 142 | ||
143 | #ifndef pud_addr_end | 143 | #ifndef pud_addr_end |
144 | #define pud_addr_end(addr, end) \ | 144 | #define pud_addr_end(addr, end) \ |
145 | ({ unsigned long __boundary = ((addr) + PUD_SIZE) & PUD_MASK; \ | 145 | ({ unsigned long __boundary = ((addr) + PUD_SIZE) & PUD_MASK; \ |
146 | (__boundary - 1 < (end) - 1)? __boundary: (end); \ | 146 | (__boundary - 1 < (end) - 1)? __boundary: (end); \ |
147 | }) | 147 | }) |
148 | #endif | 148 | #endif |
149 | 149 | ||
150 | #ifndef pmd_addr_end | 150 | #ifndef pmd_addr_end |
151 | #define pmd_addr_end(addr, end) \ | 151 | #define pmd_addr_end(addr, end) \ |
152 | ({ unsigned long __boundary = ((addr) + PMD_SIZE) & PMD_MASK; \ | 152 | ({ unsigned long __boundary = ((addr) + PMD_SIZE) & PMD_MASK; \ |
153 | (__boundary - 1 < (end) - 1)? __boundary: (end); \ | 153 | (__boundary - 1 < (end) - 1)? __boundary: (end); \ |
154 | }) | 154 | }) |
155 | #endif | 155 | #endif |
156 | 156 | ||
157 | /* | 157 | /* |
158 | * When walking page tables, we usually want to skip any p?d_none entries; | 158 | * When walking page tables, we usually want to skip any p?d_none entries; |
159 | * and any p?d_bad entries - reporting the error before resetting to none. | 159 | * and any p?d_bad entries - reporting the error before resetting to none. |
160 | * Do the tests inline, but report and clear the bad entry in mm/memory.c. | 160 | * Do the tests inline, but report and clear the bad entry in mm/memory.c. |
161 | */ | 161 | */ |
162 | void pgd_clear_bad(pgd_t *); | 162 | void pgd_clear_bad(pgd_t *); |
163 | void pud_clear_bad(pud_t *); | 163 | void pud_clear_bad(pud_t *); |
164 | void pmd_clear_bad(pmd_t *); | 164 | void pmd_clear_bad(pmd_t *); |
165 | 165 | ||
166 | static inline int pgd_none_or_clear_bad(pgd_t *pgd) | 166 | static inline int pgd_none_or_clear_bad(pgd_t *pgd) |
167 | { | 167 | { |
168 | if (pgd_none(*pgd)) | 168 | if (pgd_none(*pgd)) |
169 | return 1; | 169 | return 1; |
170 | if (unlikely(pgd_bad(*pgd))) { | 170 | if (unlikely(pgd_bad(*pgd))) { |
171 | pgd_clear_bad(pgd); | 171 | pgd_clear_bad(pgd); |
172 | return 1; | 172 | return 1; |
173 | } | 173 | } |
174 | return 0; | 174 | return 0; |
175 | } | 175 | } |
176 | 176 | ||
177 | static inline int pud_none_or_clear_bad(pud_t *pud) | 177 | static inline int pud_none_or_clear_bad(pud_t *pud) |
178 | { | 178 | { |
179 | if (pud_none(*pud)) | 179 | if (pud_none(*pud)) |
180 | return 1; | 180 | return 1; |
181 | if (unlikely(pud_bad(*pud))) { | 181 | if (unlikely(pud_bad(*pud))) { |
182 | pud_clear_bad(pud); | 182 | pud_clear_bad(pud); |
183 | return 1; | 183 | return 1; |
184 | } | 184 | } |
185 | return 0; | 185 | return 0; |
186 | } | 186 | } |
187 | 187 | ||
188 | static inline int pmd_none_or_clear_bad(pmd_t *pmd) | 188 | static inline int pmd_none_or_clear_bad(pmd_t *pmd) |
189 | { | 189 | { |
190 | if (pmd_none(*pmd)) | 190 | if (pmd_none(*pmd)) |
191 | return 1; | 191 | return 1; |
192 | if (unlikely(pmd_bad(*pmd))) { | 192 | if (unlikely(pmd_bad(*pmd))) { |
193 | pmd_clear_bad(pmd); | 193 | pmd_clear_bad(pmd); |
194 | return 1; | 194 | return 1; |
195 | } | 195 | } |
196 | return 0; | 196 | return 0; |
197 | } | 197 | } |
198 | #endif /* CONFIG_MMU */ | ||
199 | 198 | ||
200 | static inline pte_t __ptep_modify_prot_start(struct mm_struct *mm, | 199 | static inline pte_t __ptep_modify_prot_start(struct mm_struct *mm, |
201 | unsigned long addr, | 200 | unsigned long addr, |
202 | pte_t *ptep) | 201 | pte_t *ptep) |
203 | { | 202 | { |
204 | /* | 203 | /* |
205 | * Get the current pte state, but zero it out to make it | 204 | * Get the current pte state, but zero it out to make it |
206 | * non-present, preventing the hardware from asynchronously | 205 | * non-present, preventing the hardware from asynchronously |
207 | * updating it. | 206 | * updating it. |
208 | */ | 207 | */ |
209 | return ptep_get_and_clear(mm, addr, ptep); | 208 | return ptep_get_and_clear(mm, addr, ptep); |
210 | } | 209 | } |
211 | 210 | ||
212 | static inline void __ptep_modify_prot_commit(struct mm_struct *mm, | 211 | static inline void __ptep_modify_prot_commit(struct mm_struct *mm, |
213 | unsigned long addr, | 212 | unsigned long addr, |
214 | pte_t *ptep, pte_t pte) | 213 | pte_t *ptep, pte_t pte) |
215 | { | 214 | { |
216 | /* | 215 | /* |
217 | * The pte is non-present, so there's no hardware state to | 216 | * The pte is non-present, so there's no hardware state to |
218 | * preserve. | 217 | * preserve. |
219 | */ | 218 | */ |
220 | set_pte_at(mm, addr, ptep, pte); | 219 | set_pte_at(mm, addr, ptep, pte); |
221 | } | 220 | } |
222 | 221 | ||
223 | #ifndef __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION | 222 | #ifndef __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION |
224 | /* | 223 | /* |
225 | * Start a pte protection read-modify-write transaction, which | 224 | * Start a pte protection read-modify-write transaction, which |
226 | * protects against asynchronous hardware modifications to the pte. | 225 | * protects against asynchronous hardware modifications to the pte. |
227 | * The intention is not to prevent the hardware from making pte | 226 | * The intention is not to prevent the hardware from making pte |
228 | * updates, but to prevent any updates it may make from being lost. | 227 | * updates, but to prevent any updates it may make from being lost. |
229 | * | 228 | * |
230 | * This does not protect against other software modifications of the | 229 | * This does not protect against other software modifications of the |
231 | * pte; the appropriate pte lock must be held over the transation. | 230 | * pte; the appropriate pte lock must be held over the transation. |
232 | * | 231 | * |
233 | * Note that this interface is intended to be batchable, meaning that | 232 | * Note that this interface is intended to be batchable, meaning that |
234 | * ptep_modify_prot_commit may not actually update the pte, but merely | 233 | * ptep_modify_prot_commit may not actually update the pte, but merely |
235 | * queue the update to be done at some later time. The update must be | 234 | * queue the update to be done at some later time. The update must be |
236 | * actually committed before the pte lock is released, however. | 235 | * actually committed before the pte lock is released, however. |
237 | */ | 236 | */ |
238 | static inline pte_t ptep_modify_prot_start(struct mm_struct *mm, | 237 | static inline pte_t ptep_modify_prot_start(struct mm_struct *mm, |
239 | unsigned long addr, | 238 | unsigned long addr, |
240 | pte_t *ptep) | 239 | pte_t *ptep) |
241 | { | 240 | { |
242 | return __ptep_modify_prot_start(mm, addr, ptep); | 241 | return __ptep_modify_prot_start(mm, addr, ptep); |
243 | } | 242 | } |
244 | 243 | ||
245 | /* | 244 | /* |
246 | * Commit an update to a pte, leaving any hardware-controlled bits in | 245 | * Commit an update to a pte, leaving any hardware-controlled bits in |
247 | * the PTE unmodified. | 246 | * the PTE unmodified. |
248 | */ | 247 | */ |
249 | static inline void ptep_modify_prot_commit(struct mm_struct *mm, | 248 | static inline void ptep_modify_prot_commit(struct mm_struct *mm, |
250 | unsigned long addr, | 249 | unsigned long addr, |
251 | pte_t *ptep, pte_t pte) | 250 | pte_t *ptep, pte_t pte) |
252 | { | 251 | { |
253 | __ptep_modify_prot_commit(mm, addr, ptep, pte); | 252 | __ptep_modify_prot_commit(mm, addr, ptep, pte); |
254 | } | 253 | } |
255 | #endif /* __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION */ | 254 | #endif /* __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION */ |
255 | #endif /* CONFIG_MMU */ | ||
256 | 256 | ||
257 | /* | 257 | /* |
258 | * A facility to provide lazy MMU batching. This allows PTE updates and | 258 | * A facility to provide lazy MMU batching. This allows PTE updates and |
259 | * page invalidations to be delayed until a call to leave lazy MMU mode | 259 | * page invalidations to be delayed until a call to leave lazy MMU mode |
260 | * is issued. Some architectures may benefit from doing this, and it is | 260 | * is issued. Some architectures may benefit from doing this, and it is |
261 | * beneficial for both shadow and direct mode hypervisors, which may batch | 261 | * beneficial for both shadow and direct mode hypervisors, which may batch |
262 | * the PTE updates which happen during this window. Note that using this | 262 | * the PTE updates which happen during this window. Note that using this |
263 | * interface requires that read hazards be removed from the code. A read | 263 | * interface requires that read hazards be removed from the code. A read |
264 | * hazard could result in the direct mode hypervisor case, since the actual | 264 | * hazard could result in the direct mode hypervisor case, since the actual |
265 | * write to the page tables may not yet have taken place, so reads though | 265 | * write to the page tables may not yet have taken place, so reads though |
266 | * a raw PTE pointer after it has been modified are not guaranteed to be | 266 | * a raw PTE pointer after it has been modified are not guaranteed to be |
267 | * up to date. This mode can only be entered and left under the protection of | 267 | * up to date. This mode can only be entered and left under the protection of |
268 | * the page table locks for all page tables which may be modified. In the UP | 268 | * the page table locks for all page tables which may be modified. In the UP |
269 | * case, this is required so that preemption is disabled, and in the SMP case, | 269 | * case, this is required so that preemption is disabled, and in the SMP case, |
270 | * it must synchronize the delayed page table writes properly on other CPUs. | 270 | * it must synchronize the delayed page table writes properly on other CPUs. |
271 | */ | 271 | */ |
272 | #ifndef __HAVE_ARCH_ENTER_LAZY_MMU_MODE | 272 | #ifndef __HAVE_ARCH_ENTER_LAZY_MMU_MODE |
273 | #define arch_enter_lazy_mmu_mode() do {} while (0) | 273 | #define arch_enter_lazy_mmu_mode() do {} while (0) |
274 | #define arch_leave_lazy_mmu_mode() do {} while (0) | 274 | #define arch_leave_lazy_mmu_mode() do {} while (0) |
275 | #define arch_flush_lazy_mmu_mode() do {} while (0) | 275 | #define arch_flush_lazy_mmu_mode() do {} while (0) |
276 | #endif | 276 | #endif |
277 | 277 | ||
278 | /* | 278 | /* |
279 | * A facility to provide batching of the reload of page tables with the | 279 | * A facility to provide batching of the reload of page tables with the |
280 | * actual context switch code for paravirtualized guests. By convention, | 280 | * actual context switch code for paravirtualized guests. By convention, |
281 | * only one of the lazy modes (CPU, MMU) should be active at any given | 281 | * only one of the lazy modes (CPU, MMU) should be active at any given |
282 | * time, entry should never be nested, and entry and exits should always | 282 | * time, entry should never be nested, and entry and exits should always |
283 | * be paired. This is for sanity of maintaining and reasoning about the | 283 | * be paired. This is for sanity of maintaining and reasoning about the |
284 | * kernel code. | 284 | * kernel code. |
285 | */ | 285 | */ |
286 | #ifndef __HAVE_ARCH_ENTER_LAZY_CPU_MODE | 286 | #ifndef __HAVE_ARCH_ENTER_LAZY_CPU_MODE |
287 | #define arch_enter_lazy_cpu_mode() do {} while (0) | 287 | #define arch_enter_lazy_cpu_mode() do {} while (0) |
288 | #define arch_leave_lazy_cpu_mode() do {} while (0) | 288 | #define arch_leave_lazy_cpu_mode() do {} while (0) |
289 | #define arch_flush_lazy_cpu_mode() do {} while (0) | 289 | #define arch_flush_lazy_cpu_mode() do {} while (0) |
290 | #endif | 290 | #endif |
291 | 291 | ||
292 | #endif /* !__ASSEMBLY__ */ | 292 | #endif /* !__ASSEMBLY__ */ |
293 | 293 | ||
294 | #endif /* _ASM_GENERIC_PGTABLE_H */ | 294 | #endif /* _ASM_GENERIC_PGTABLE_H */ |