Commit 9535239f6bc99f68e0cfae44505ad402b53ed24c
Committed by
Linus Torvalds
1 parent
73c59afc65
Exists in
master
and in
39 other branches
changing include/asm-generic/pgtable.h for non-mmu
There are some parts of include/asm-generic/pgtable.h that are relevant to the non-mmu architectures. To make it easier to include this from them I would like to ifdef the relevant parts. Without this there is a handful of functions that are referenced in here that are not defined on many non-mmu architectures. They could be defined out of course, as an alternative approach. Cc: David Howells <dhowells@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Showing 1 changed file with 38 additions and 35 deletions Side-by-side Diff
include/asm-generic/pgtable.h
... | ... | @@ -2,6 +2,7 @@ |
2 | 2 | #define _ASM_GENERIC_PGTABLE_H |
3 | 3 | |
4 | 4 | #ifndef __ASSEMBLY__ |
5 | +#ifdef CONFIG_MMU | |
5 | 6 | |
6 | 7 | #ifndef __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS |
7 | 8 | /* |
... | ... | @@ -133,41 +134,6 @@ |
133 | 134 | #endif |
134 | 135 | |
135 | 136 | /* |
136 | - * A facility to provide lazy MMU batching. This allows PTE updates and | |
137 | - * page invalidations to be delayed until a call to leave lazy MMU mode | |
138 | - * is issued. Some architectures may benefit from doing this, and it is | |
139 | - * beneficial for both shadow and direct mode hypervisors, which may batch | |
140 | - * the PTE updates which happen during this window. Note that using this | |
141 | - * interface requires that read hazards be removed from the code. A read | |
142 | - * hazard could result in the direct mode hypervisor case, since the actual | |
143 | - * write to the page tables may not yet have taken place, so reads though | |
144 | - * a raw PTE pointer after it has been modified are not guaranteed to be | |
145 | - * up to date. This mode can only be entered and left under the protection of | |
146 | - * the page table locks for all page tables which may be modified. In the UP | |
147 | - * case, this is required so that preemption is disabled, and in the SMP case, | |
148 | - * it must synchronize the delayed page table writes properly on other CPUs. | |
149 | - */ | |
150 | -#ifndef __HAVE_ARCH_ENTER_LAZY_MMU_MODE | |
151 | -#define arch_enter_lazy_mmu_mode() do {} while (0) | |
152 | -#define arch_leave_lazy_mmu_mode() do {} while (0) | |
153 | -#define arch_flush_lazy_mmu_mode() do {} while (0) | |
154 | -#endif | |
155 | - | |
156 | -/* | |
157 | - * A facility to provide batching of the reload of page tables with the | |
158 | - * actual context switch code for paravirtualized guests. By convention, | |
159 | - * only one of the lazy modes (CPU, MMU) should be active at any given | |
160 | - * time, entry should never be nested, and entry and exits should always | |
161 | - * be paired. This is for sanity of maintaining and reasoning about the | |
162 | - * kernel code. | |
163 | - */ | |
164 | -#ifndef __HAVE_ARCH_ENTER_LAZY_CPU_MODE | |
165 | -#define arch_enter_lazy_cpu_mode() do {} while (0) | |
166 | -#define arch_leave_lazy_cpu_mode() do {} while (0) | |
167 | -#define arch_flush_lazy_cpu_mode() do {} while (0) | |
168 | -#endif | |
169 | - | |
170 | -/* | |
171 | 137 | * When walking page tables, get the address of the next boundary, |
172 | 138 | * or the end address of the range if that comes earlier. Although no |
173 | 139 | * vma end wraps to 0, rounded up __boundary may wrap to 0 throughout. |
... | ... | @@ -233,6 +199,43 @@ |
233 | 199 | } |
234 | 200 | return 0; |
235 | 201 | } |
202 | +#endif /* CONFIG_MMU */ | |
203 | + | |
204 | +/* | |
205 | + * A facility to provide lazy MMU batching. This allows PTE updates and | |
206 | + * page invalidations to be delayed until a call to leave lazy MMU mode | |
207 | + * is issued. Some architectures may benefit from doing this, and it is | |
208 | + * beneficial for both shadow and direct mode hypervisors, which may batch | |
209 | + * the PTE updates which happen during this window. Note that using this | |
210 | + * interface requires that read hazards be removed from the code. A read | |
211 | + * hazard could result in the direct mode hypervisor case, since the actual | |
212 | + * write to the page tables may not yet have taken place, so reads though | |
213 | + * a raw PTE pointer after it has been modified are not guaranteed to be | |
214 | + * up to date. This mode can only be entered and left under the protection of | |
215 | + * the page table locks for all page tables which may be modified. In the UP | |
216 | + * case, this is required so that preemption is disabled, and in the SMP case, | |
217 | + * it must synchronize the delayed page table writes properly on other CPUs. | |
218 | + */ | |
219 | +#ifndef __HAVE_ARCH_ENTER_LAZY_MMU_MODE | |
220 | +#define arch_enter_lazy_mmu_mode() do {} while (0) | |
221 | +#define arch_leave_lazy_mmu_mode() do {} while (0) | |
222 | +#define arch_flush_lazy_mmu_mode() do {} while (0) | |
223 | +#endif | |
224 | + | |
225 | +/* | |
226 | + * A facility to provide batching of the reload of page tables with the | |
227 | + * actual context switch code for paravirtualized guests. By convention, | |
228 | + * only one of the lazy modes (CPU, MMU) should be active at any given | |
229 | + * time, entry should never be nested, and entry and exits should always | |
230 | + * be paired. This is for sanity of maintaining and reasoning about the | |
231 | + * kernel code. | |
232 | + */ | |
233 | +#ifndef __HAVE_ARCH_ENTER_LAZY_CPU_MODE | |
234 | +#define arch_enter_lazy_cpu_mode() do {} while (0) | |
235 | +#define arch_leave_lazy_cpu_mode() do {} while (0) | |
236 | +#define arch_flush_lazy_cpu_mode() do {} while (0) | |
237 | +#endif | |
238 | + | |
236 | 239 | #endif /* !__ASSEMBLY__ */ |
237 | 240 | |
238 | 241 | #endif /* _ASM_GENERIC_PGTABLE_H */ |