Commit 6729cf7967f6c11f6de6a0b43ec277905a00c146
Committed by
David S. Miller
1 parent
1ec8cf6233
Exists in
smarc-l5.0.0_1.0.0-ga
and in
5 other branches
sparc32: introduce run-time patching of srmmu access functions
LEON uses a different ASI than SUN for MMUREGS Signed-off-by: Sam Ravnborg <sam@ravnborg.org> Cc: Daniel Hellstrom <daniel@gaisler.com> Cc: Konrad Eisele <konrad@gaisler.com>
Showing 3 changed files with 90 additions and 61 deletions Inline Diff
arch/sparc/include/asm/pgtsrmmu.h
1 | /* | 1 | /* |
2 | * pgtsrmmu.h: SRMMU page table defines and code. | 2 | * pgtsrmmu.h: SRMMU page table defines and code. |
3 | * | 3 | * |
4 | * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu) | 4 | * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu) |
5 | */ | 5 | */ |
6 | 6 | ||
7 | #ifndef _SPARC_PGTSRMMU_H | 7 | #ifndef _SPARC_PGTSRMMU_H |
8 | #define _SPARC_PGTSRMMU_H | 8 | #define _SPARC_PGTSRMMU_H |
9 | 9 | ||
10 | #include <asm/page.h> | 10 | #include <asm/page.h> |
11 | 11 | ||
12 | #ifdef __ASSEMBLY__ | 12 | #ifdef __ASSEMBLY__ |
13 | #include <asm/thread_info.h> /* TI_UWINMASK for WINDOW_FLUSH */ | 13 | #include <asm/thread_info.h> /* TI_UWINMASK for WINDOW_FLUSH */ |
14 | #endif | 14 | #endif |
15 | 15 | ||
16 | /* Number of contexts is implementation-dependent; 64k is the most we support */ | 16 | /* Number of contexts is implementation-dependent; 64k is the most we support */ |
17 | #define SRMMU_MAX_CONTEXTS 65536 | 17 | #define SRMMU_MAX_CONTEXTS 65536 |
18 | 18 | ||
19 | /* PMD_SHIFT determines the size of the area a second-level page table entry can map */ | 19 | /* PMD_SHIFT determines the size of the area a second-level page table entry can map */ |
20 | #define SRMMU_REAL_PMD_SHIFT 18 | 20 | #define SRMMU_REAL_PMD_SHIFT 18 |
21 | #define SRMMU_REAL_PMD_SIZE (1UL << SRMMU_REAL_PMD_SHIFT) | 21 | #define SRMMU_REAL_PMD_SIZE (1UL << SRMMU_REAL_PMD_SHIFT) |
22 | #define SRMMU_REAL_PMD_MASK (~(SRMMU_REAL_PMD_SIZE-1)) | 22 | #define SRMMU_REAL_PMD_MASK (~(SRMMU_REAL_PMD_SIZE-1)) |
23 | #define SRMMU_REAL_PMD_ALIGN(__addr) (((__addr)+SRMMU_REAL_PMD_SIZE-1)&SRMMU_REAL_PMD_MASK) | 23 | #define SRMMU_REAL_PMD_ALIGN(__addr) (((__addr)+SRMMU_REAL_PMD_SIZE-1)&SRMMU_REAL_PMD_MASK) |
24 | 24 | ||
25 | /* PGDIR_SHIFT determines what a third-level page table entry can map */ | 25 | /* PGDIR_SHIFT determines what a third-level page table entry can map */ |
26 | #define SRMMU_PGDIR_SHIFT 24 | 26 | #define SRMMU_PGDIR_SHIFT 24 |
27 | #define SRMMU_PGDIR_SIZE (1UL << SRMMU_PGDIR_SHIFT) | 27 | #define SRMMU_PGDIR_SIZE (1UL << SRMMU_PGDIR_SHIFT) |
28 | #define SRMMU_PGDIR_MASK (~(SRMMU_PGDIR_SIZE-1)) | 28 | #define SRMMU_PGDIR_MASK (~(SRMMU_PGDIR_SIZE-1)) |
29 | #define SRMMU_PGDIR_ALIGN(addr) (((addr)+SRMMU_PGDIR_SIZE-1)&SRMMU_PGDIR_MASK) | 29 | #define SRMMU_PGDIR_ALIGN(addr) (((addr)+SRMMU_PGDIR_SIZE-1)&SRMMU_PGDIR_MASK) |
30 | 30 | ||
31 | #define SRMMU_REAL_PTRS_PER_PTE 64 | 31 | #define SRMMU_REAL_PTRS_PER_PTE 64 |
32 | #define SRMMU_REAL_PTRS_PER_PMD 64 | 32 | #define SRMMU_REAL_PTRS_PER_PMD 64 |
33 | #define SRMMU_PTRS_PER_PGD 256 | 33 | #define SRMMU_PTRS_PER_PGD 256 |
34 | 34 | ||
35 | #define SRMMU_REAL_PTE_TABLE_SIZE (SRMMU_REAL_PTRS_PER_PTE*4) | 35 | #define SRMMU_REAL_PTE_TABLE_SIZE (SRMMU_REAL_PTRS_PER_PTE*4) |
36 | #define SRMMU_PMD_TABLE_SIZE (SRMMU_REAL_PTRS_PER_PMD*4) | 36 | #define SRMMU_PMD_TABLE_SIZE (SRMMU_REAL_PTRS_PER_PMD*4) |
37 | #define SRMMU_PGD_TABLE_SIZE (SRMMU_PTRS_PER_PGD*4) | 37 | #define SRMMU_PGD_TABLE_SIZE (SRMMU_PTRS_PER_PGD*4) |
38 | 38 | ||
39 | /* | 39 | /* |
40 | * To support pagetables in highmem, Linux introduces APIs which | 40 | * To support pagetables in highmem, Linux introduces APIs which |
41 | * return struct page* and generally manipulate page tables when | 41 | * return struct page* and generally manipulate page tables when |
42 | * they are not mapped into kernel space. Our hardware page tables | 42 | * they are not mapped into kernel space. Our hardware page tables |
43 | * are smaller than pages. We lump hardware tabes into big, page sized | 43 | * are smaller than pages. We lump hardware tabes into big, page sized |
44 | * software tables. | 44 | * software tables. |
45 | * | 45 | * |
46 | * PMD_SHIFT determines the size of the area a second-level page table entry | 46 | * PMD_SHIFT determines the size of the area a second-level page table entry |
47 | * can map, and our pmd_t is 16 times larger than normal. The values which | 47 | * can map, and our pmd_t is 16 times larger than normal. The values which |
48 | * were once defined here are now generic for 4c and srmmu, so they're | 48 | * were once defined here are now generic for 4c and srmmu, so they're |
49 | * found in pgtable.h. | 49 | * found in pgtable.h. |
50 | */ | 50 | */ |
51 | #define SRMMU_PTRS_PER_PMD 4 | 51 | #define SRMMU_PTRS_PER_PMD 4 |
52 | 52 | ||
53 | /* Definition of the values in the ET field of PTD's and PTE's */ | 53 | /* Definition of the values in the ET field of PTD's and PTE's */ |
54 | #define SRMMU_ET_MASK 0x3 | 54 | #define SRMMU_ET_MASK 0x3 |
55 | #define SRMMU_ET_INVALID 0x0 | 55 | #define SRMMU_ET_INVALID 0x0 |
56 | #define SRMMU_ET_PTD 0x1 | 56 | #define SRMMU_ET_PTD 0x1 |
57 | #define SRMMU_ET_PTE 0x2 | 57 | #define SRMMU_ET_PTE 0x2 |
58 | #define SRMMU_ET_REPTE 0x3 /* AIEEE, SuperSparc II reverse endian page! */ | 58 | #define SRMMU_ET_REPTE 0x3 /* AIEEE, SuperSparc II reverse endian page! */ |
59 | 59 | ||
60 | /* Physical page extraction from PTP's and PTE's. */ | 60 | /* Physical page extraction from PTP's and PTE's. */ |
61 | #define SRMMU_CTX_PMASK 0xfffffff0 | 61 | #define SRMMU_CTX_PMASK 0xfffffff0 |
62 | #define SRMMU_PTD_PMASK 0xfffffff0 | 62 | #define SRMMU_PTD_PMASK 0xfffffff0 |
63 | #define SRMMU_PTE_PMASK 0xffffff00 | 63 | #define SRMMU_PTE_PMASK 0xffffff00 |
64 | 64 | ||
65 | /* The pte non-page bits. Some notes: | 65 | /* The pte non-page bits. Some notes: |
66 | * 1) cache, dirty, valid, and ref are frobbable | 66 | * 1) cache, dirty, valid, and ref are frobbable |
67 | * for both supervisor and user pages. | 67 | * for both supervisor and user pages. |
68 | * 2) exec and write will only give the desired effect | 68 | * 2) exec and write will only give the desired effect |
69 | * on user pages | 69 | * on user pages |
70 | * 3) use priv and priv_readonly for changing the | 70 | * 3) use priv and priv_readonly for changing the |
71 | * characteristics of supervisor ptes | 71 | * characteristics of supervisor ptes |
72 | */ | 72 | */ |
73 | #define SRMMU_CACHE 0x80 | 73 | #define SRMMU_CACHE 0x80 |
74 | #define SRMMU_DIRTY 0x40 | 74 | #define SRMMU_DIRTY 0x40 |
75 | #define SRMMU_REF 0x20 | 75 | #define SRMMU_REF 0x20 |
76 | #define SRMMU_NOREAD 0x10 | 76 | #define SRMMU_NOREAD 0x10 |
77 | #define SRMMU_EXEC 0x08 | 77 | #define SRMMU_EXEC 0x08 |
78 | #define SRMMU_WRITE 0x04 | 78 | #define SRMMU_WRITE 0x04 |
79 | #define SRMMU_VALID 0x02 /* SRMMU_ET_PTE */ | 79 | #define SRMMU_VALID 0x02 /* SRMMU_ET_PTE */ |
80 | #define SRMMU_PRIV 0x1c | 80 | #define SRMMU_PRIV 0x1c |
81 | #define SRMMU_PRIV_RDONLY 0x18 | 81 | #define SRMMU_PRIV_RDONLY 0x18 |
82 | 82 | ||
83 | #define SRMMU_FILE 0x40 /* Implemented in software */ | 83 | #define SRMMU_FILE 0x40 /* Implemented in software */ |
84 | 84 | ||
85 | #define SRMMU_PTE_FILE_SHIFT 8 /* == 32-PTE_FILE_MAX_BITS */ | 85 | #define SRMMU_PTE_FILE_SHIFT 8 /* == 32-PTE_FILE_MAX_BITS */ |
86 | 86 | ||
87 | #define SRMMU_CHG_MASK (0xffffff00 | SRMMU_REF | SRMMU_DIRTY) | 87 | #define SRMMU_CHG_MASK (0xffffff00 | SRMMU_REF | SRMMU_DIRTY) |
88 | 88 | ||
89 | /* SRMMU swap entry encoding | 89 | /* SRMMU swap entry encoding |
90 | * | 90 | * |
91 | * We use 5 bits for the type and 19 for the offset. This gives us | 91 | * We use 5 bits for the type and 19 for the offset. This gives us |
92 | * 32 swapfiles of 4GB each. Encoding looks like: | 92 | * 32 swapfiles of 4GB each. Encoding looks like: |
93 | * | 93 | * |
94 | * oooooooooooooooooootttttRRRRRRRR | 94 | * oooooooooooooooooootttttRRRRRRRR |
95 | * fedcba9876543210fedcba9876543210 | 95 | * fedcba9876543210fedcba9876543210 |
96 | * | 96 | * |
97 | * The bottom 8 bits are reserved for protection and status bits, especially | 97 | * The bottom 8 bits are reserved for protection and status bits, especially |
98 | * FILE and PRESENT. | 98 | * FILE and PRESENT. |
99 | */ | 99 | */ |
100 | #define SRMMU_SWP_TYPE_MASK 0x1f | 100 | #define SRMMU_SWP_TYPE_MASK 0x1f |
101 | #define SRMMU_SWP_TYPE_SHIFT SRMMU_PTE_FILE_SHIFT | 101 | #define SRMMU_SWP_TYPE_SHIFT SRMMU_PTE_FILE_SHIFT |
102 | #define SRMMU_SWP_OFF_MASK 0x7ffff | 102 | #define SRMMU_SWP_OFF_MASK 0x7ffff |
103 | #define SRMMU_SWP_OFF_SHIFT (SRMMU_PTE_FILE_SHIFT + 5) | 103 | #define SRMMU_SWP_OFF_SHIFT (SRMMU_PTE_FILE_SHIFT + 5) |
104 | 104 | ||
105 | /* Some day I will implement true fine grained access bits for | 105 | /* Some day I will implement true fine grained access bits for |
106 | * user pages because the SRMMU gives us the capabilities to | 106 | * user pages because the SRMMU gives us the capabilities to |
107 | * enforce all the protection levels that vma's can have. | 107 | * enforce all the protection levels that vma's can have. |
108 | * XXX But for now... | 108 | * XXX But for now... |
109 | */ | 109 | */ |
110 | #define SRMMU_PAGE_NONE __pgprot(SRMMU_CACHE | \ | 110 | #define SRMMU_PAGE_NONE __pgprot(SRMMU_CACHE | \ |
111 | SRMMU_PRIV | SRMMU_REF) | 111 | SRMMU_PRIV | SRMMU_REF) |
112 | #define SRMMU_PAGE_SHARED __pgprot(SRMMU_VALID | SRMMU_CACHE | \ | 112 | #define SRMMU_PAGE_SHARED __pgprot(SRMMU_VALID | SRMMU_CACHE | \ |
113 | SRMMU_EXEC | SRMMU_WRITE | SRMMU_REF) | 113 | SRMMU_EXEC | SRMMU_WRITE | SRMMU_REF) |
114 | #define SRMMU_PAGE_COPY __pgprot(SRMMU_VALID | SRMMU_CACHE | \ | 114 | #define SRMMU_PAGE_COPY __pgprot(SRMMU_VALID | SRMMU_CACHE | \ |
115 | SRMMU_EXEC | SRMMU_REF) | 115 | SRMMU_EXEC | SRMMU_REF) |
116 | #define SRMMU_PAGE_RDONLY __pgprot(SRMMU_VALID | SRMMU_CACHE | \ | 116 | #define SRMMU_PAGE_RDONLY __pgprot(SRMMU_VALID | SRMMU_CACHE | \ |
117 | SRMMU_EXEC | SRMMU_REF) | 117 | SRMMU_EXEC | SRMMU_REF) |
118 | #define SRMMU_PAGE_KERNEL __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_PRIV | \ | 118 | #define SRMMU_PAGE_KERNEL __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_PRIV | \ |
119 | SRMMU_DIRTY | SRMMU_REF) | 119 | SRMMU_DIRTY | SRMMU_REF) |
120 | 120 | ||
121 | /* SRMMU Register addresses in ASI 0x4. These are valid for all | 121 | /* SRMMU Register addresses in ASI 0x4. These are valid for all |
122 | * current SRMMU implementations that exist. | 122 | * current SRMMU implementations that exist. |
123 | */ | 123 | */ |
124 | #define SRMMU_CTRL_REG 0x00000000 | 124 | #define SRMMU_CTRL_REG 0x00000000 |
125 | #define SRMMU_CTXTBL_PTR 0x00000100 | 125 | #define SRMMU_CTXTBL_PTR 0x00000100 |
126 | #define SRMMU_CTX_REG 0x00000200 | 126 | #define SRMMU_CTX_REG 0x00000200 |
127 | #define SRMMU_FAULT_STATUS 0x00000300 | 127 | #define SRMMU_FAULT_STATUS 0x00000300 |
128 | #define SRMMU_FAULT_ADDR 0x00000400 | 128 | #define SRMMU_FAULT_ADDR 0x00000400 |
129 | 129 | ||
130 | #define WINDOW_FLUSH(tmp1, tmp2) \ | 130 | #define WINDOW_FLUSH(tmp1, tmp2) \ |
131 | mov 0, tmp1; \ | 131 | mov 0, tmp1; \ |
132 | 98: ld [%g6 + TI_UWINMASK], tmp2; \ | 132 | 98: ld [%g6 + TI_UWINMASK], tmp2; \ |
133 | orcc %g0, tmp2, %g0; \ | 133 | orcc %g0, tmp2, %g0; \ |
134 | add tmp1, 1, tmp1; \ | 134 | add tmp1, 1, tmp1; \ |
135 | bne 98b; \ | 135 | bne 98b; \ |
136 | save %sp, -64, %sp; \ | 136 | save %sp, -64, %sp; \ |
137 | 99: subcc tmp1, 1, tmp1; \ | 137 | 99: subcc tmp1, 1, tmp1; \ |
138 | bne 99b; \ | 138 | bne 99b; \ |
139 | restore %g0, %g0, %g0; | 139 | restore %g0, %g0, %g0; |
140 | 140 | ||
141 | #ifndef __ASSEMBLY__ | 141 | #ifndef __ASSEMBLY__ |
142 | extern unsigned long last_valid_pfn; | 142 | extern unsigned long last_valid_pfn; |
143 | 143 | ||
144 | /* This makes sense. Honest it does - Anton */ | 144 | /* This makes sense. Honest it does - Anton */ |
145 | /* XXX Yes but it's ugly as sin. FIXME. -KMW */ | 145 | /* XXX Yes but it's ugly as sin. FIXME. -KMW */ |
146 | extern void *srmmu_nocache_pool; | 146 | extern void *srmmu_nocache_pool; |
147 | #define __nocache_pa(VADDR) (((unsigned long)VADDR) - SRMMU_NOCACHE_VADDR + __pa((unsigned long)srmmu_nocache_pool)) | 147 | #define __nocache_pa(VADDR) (((unsigned long)VADDR) - SRMMU_NOCACHE_VADDR + __pa((unsigned long)srmmu_nocache_pool)) |
148 | #define __nocache_va(PADDR) (__va((unsigned long)PADDR) - (unsigned long)srmmu_nocache_pool + SRMMU_NOCACHE_VADDR) | 148 | #define __nocache_va(PADDR) (__va((unsigned long)PADDR) - (unsigned long)srmmu_nocache_pool + SRMMU_NOCACHE_VADDR) |
149 | #define __nocache_fix(VADDR) __va(__nocache_pa(VADDR)) | 149 | #define __nocache_fix(VADDR) __va(__nocache_pa(VADDR)) |
150 | 150 | ||
151 | /* Accessing the MMU control register. */ | 151 | /* Accessing the MMU control register. */ |
152 | static inline unsigned int srmmu_get_mmureg(void) | 152 | unsigned int srmmu_get_mmureg(void); |
153 | { | 153 | void srmmu_set_mmureg(unsigned long regval); |
154 | unsigned int retval; | 154 | void srmmu_set_ctable_ptr(unsigned long paddr); |
155 | __asm__ __volatile__("lda [%%g0] %1, %0\n\t" : | 155 | void srmmu_set_context(int context); |
156 | "=r" (retval) : | 156 | int srmmu_get_context(void); |
157 | "i" (ASI_M_MMUREGS)); | 157 | unsigned int srmmu_get_fstatus(void); |
158 | return retval; | 158 | unsigned int srmmu_get_faddr(void); |
159 | } | ||
160 | |||
161 | static inline void srmmu_set_mmureg(unsigned long regval) | ||
162 | { | ||
163 | __asm__ __volatile__("sta %0, [%%g0] %1\n\t" : : | ||
164 | "r" (regval), "i" (ASI_M_MMUREGS) : "memory"); | ||
165 | |||
166 | } | ||
167 | |||
168 | static inline void srmmu_set_ctable_ptr(unsigned long paddr) | ||
169 | { | ||
170 | paddr = ((paddr >> 4) & SRMMU_CTX_PMASK); | ||
171 | __asm__ __volatile__("sta %0, [%1] %2\n\t" : : | ||
172 | "r" (paddr), "r" (SRMMU_CTXTBL_PTR), | ||
173 | "i" (ASI_M_MMUREGS) : | ||
174 | "memory"); | ||
175 | } | ||
176 | |||
177 | static inline void srmmu_set_context(int context) | ||
178 | { | ||
179 | __asm__ __volatile__("sta %0, [%1] %2\n\t" : : | ||
180 | "r" (context), "r" (SRMMU_CTX_REG), | ||
181 | "i" (ASI_M_MMUREGS) : "memory"); | ||
182 | } | ||
183 | |||
184 | static inline int srmmu_get_context(void) | ||
185 | { | ||
186 | register int retval; | ||
187 | __asm__ __volatile__("lda [%1] %2, %0\n\t" : | ||
188 | "=r" (retval) : | ||
189 | "r" (SRMMU_CTX_REG), | ||
190 | "i" (ASI_M_MMUREGS)); | ||
191 | return retval; | ||
192 | } | ||
193 | |||
194 | static inline unsigned int srmmu_get_fstatus(void) | ||
195 | { | ||
196 | unsigned int retval; | ||
197 | |||
198 | __asm__ __volatile__("lda [%1] %2, %0\n\t" : | ||
199 | "=r" (retval) : | ||
200 | "r" (SRMMU_FAULT_STATUS), "i" (ASI_M_MMUREGS)); | ||
201 | return retval; | ||
202 | } | ||
203 | |||
204 | static inline unsigned int srmmu_get_faddr(void) | ||
205 | { | ||
206 | unsigned int retval; | ||
207 | |||
208 | __asm__ __volatile__("lda [%1] %2, %0\n\t" : | ||
209 | "=r" (retval) : | ||
210 | "r" (SRMMU_FAULT_ADDR), "i" (ASI_M_MMUREGS)); | ||
211 | return retval; | ||
212 | } | ||
213 | 159 | ||
214 | /* This is guaranteed on all SRMMU's. */ | 160 | /* This is guaranteed on all SRMMU's. */ |
215 | static inline void srmmu_flush_whole_tlb(void) | 161 | static inline void srmmu_flush_whole_tlb(void) |
216 | { | 162 | { |
217 | __asm__ __volatile__("sta %%g0, [%0] %1\n\t": : | 163 | __asm__ __volatile__("sta %%g0, [%0] %1\n\t": : |
218 | "r" (0x400), /* Flush entire TLB!! */ | 164 | "r" (0x400), /* Flush entire TLB!! */ |
219 | "i" (ASI_M_FLUSH_PROBE) : "memory"); | 165 | "i" (ASI_M_FLUSH_PROBE) : "memory"); |
220 | 166 | ||
221 | } | 167 | } |
222 | 168 | ||
223 | /* These flush types are not available on all chips... */ | 169 | /* These flush types are not available on all chips... */ |
224 | #ifndef CONFIG_SPARC_LEON | 170 | #ifndef CONFIG_SPARC_LEON |
225 | static inline unsigned long srmmu_hwprobe(unsigned long vaddr) | 171 | static inline unsigned long srmmu_hwprobe(unsigned long vaddr) |
226 | { | 172 | { |
227 | unsigned long retval; | 173 | unsigned long retval; |
228 | 174 | ||
229 | vaddr &= PAGE_MASK; | 175 | vaddr &= PAGE_MASK; |
230 | __asm__ __volatile__("lda [%1] %2, %0\n\t" : | 176 | __asm__ __volatile__("lda [%1] %2, %0\n\t" : |
231 | "=r" (retval) : | 177 | "=r" (retval) : |
232 | "r" (vaddr | 0x400), "i" (ASI_M_FLUSH_PROBE)); | 178 | "r" (vaddr | 0x400), "i" (ASI_M_FLUSH_PROBE)); |
233 | 179 | ||
234 | return retval; | 180 | return retval; |
235 | } | 181 | } |
236 | #else | 182 | #else |
237 | #define srmmu_hwprobe(addr) srmmu_swprobe(addr, 0) | 183 | #define srmmu_hwprobe(addr) srmmu_swprobe(addr, 0) |
238 | #endif | 184 | #endif |
239 | 185 | ||
240 | static inline int | 186 | static inline int |
241 | srmmu_get_pte (unsigned long addr) | 187 | srmmu_get_pte (unsigned long addr) |
242 | { | 188 | { |
243 | register unsigned long entry; | 189 | register unsigned long entry; |
244 | 190 | ||
245 | __asm__ __volatile__("\n\tlda [%1] %2,%0\n\t" : | 191 | __asm__ __volatile__("\n\tlda [%1] %2,%0\n\t" : |
246 | "=r" (entry): | 192 | "=r" (entry): |
247 | "r" ((addr & 0xfffff000) | 0x400), "i" (ASI_M_FLUSH_PROBE)); | 193 | "r" ((addr & 0xfffff000) | 0x400), "i" (ASI_M_FLUSH_PROBE)); |
248 | return entry; | 194 | return entry; |
249 | } | 195 | } |
250 | 196 | ||
251 | #endif /* !(__ASSEMBLY__) */ | 197 | #endif /* !(__ASSEMBLY__) */ |
252 | 198 | ||
253 | #endif /* !(_SPARC_PGTSRMMU_H) */ | 199 | #endif /* !(_SPARC_PGTSRMMU_H) */ |
254 | 200 |
arch/sparc/mm/Makefile
1 | # Makefile for the linux Sparc-specific parts of the memory manager. | 1 | # Makefile for the linux Sparc-specific parts of the memory manager. |
2 | # | 2 | # |
3 | 3 | ||
4 | asflags-y := -ansi | 4 | asflags-y := -ansi |
5 | ccflags-y := -Werror | 5 | ccflags-y := -Werror |
6 | 6 | ||
7 | obj-$(CONFIG_SPARC64) += ultra.o tlb.o tsb.o gup.o | 7 | obj-$(CONFIG_SPARC64) += ultra.o tlb.o tsb.o gup.o |
8 | obj-y += fault_$(BITS).o | 8 | obj-y += fault_$(BITS).o |
9 | obj-y += init_$(BITS).o | 9 | obj-y += init_$(BITS).o |
10 | obj-$(CONFIG_SPARC32) += extable.o srmmu.o iommu.o io-unit.o | 10 | obj-$(CONFIG_SPARC32) += extable.o srmmu.o iommu.o io-unit.o |
11 | obj-$(CONFIG_SPARC32) += srmmu_access.o | ||
11 | obj-$(CONFIG_SPARC32) += hypersparc.o viking.o tsunami.o swift.o | 12 | obj-$(CONFIG_SPARC32) += hypersparc.o viking.o tsunami.o swift.o |
12 | obj-$(CONFIG_SPARC32) += leon_mm.o | 13 | obj-$(CONFIG_SPARC32) += leon_mm.o |
13 | 14 | ||
14 | # Only used by sparc64 | 15 | # Only used by sparc64 |
15 | obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o | 16 | obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o |
16 | 17 | ||
17 | # Only used by sparc32 | 18 | # Only used by sparc32 |
18 | obj-$(CONFIG_HIGHMEM) += highmem.o | 19 | obj-$(CONFIG_HIGHMEM) += highmem.o |
19 | 20 |
arch/sparc/mm/srmmu_access.S
File was created | 1 | /* Assembler variants of srmmu access functions. | |
2 | * Implemented in assembler to allow run-time patching. | ||
3 | * LEON uses a different ASI for MMUREGS than SUN. | ||
4 | * | ||
5 | * The leon_1insn_patch infrastructure is used | ||
6 | * for the run-time patching. | ||
7 | */ | ||
8 | |||
9 | #include <linux/linkage.h> | ||
10 | |||
11 | #include <asm/asmmacro.h> | ||
12 | #include <asm/pgtsrmmu.h> | ||
13 | #include <asm/asi.h> | ||
14 | |||
15 | /* unsigned int srmmu_get_mmureg(void) */ | ||
16 | ENTRY(srmmu_get_mmureg) | ||
17 | LEON_PI(lda [%g0] ASI_LEON_MMUREGS, %o0) | ||
18 | SUN_PI_(lda [%g0] ASI_M_MMUREGS, %o0) | ||
19 | retl | ||
20 | nop | ||
21 | ENDPROC(srmmu_get_mmureg) | ||
22 | |||
23 | /* void srmmu_set_mmureg(unsigned long regval) */ | ||
24 | ENTRY(srmmu_set_mmureg) | ||
25 | LEON_PI(sta %o0, [%g0] ASI_LEON_MMUREGS) | ||
26 | SUN_PI_(sta %o0, [%g0] ASI_M_MMUREGS) | ||
27 | retl | ||
28 | nop | ||
29 | ENDPROC(srmmu_set_mmureg) | ||
30 | |||
31 | /* void srmmu_set_ctable_ptr(unsigned long paddr) */ | ||
32 | ENTRY(srmmu_set_ctable_ptr) | ||
33 | /* paddr = ((paddr >> 4) & SRMMU_CTX_PMASK); */ | ||
34 | srl %o0, 4, %g1 | ||
35 | and %g1, SRMMU_CTX_PMASK, %g1 | ||
36 | |||
37 | mov SRMMU_CTXTBL_PTR, %g2 | ||
38 | LEON_PI(sta %g1, [%g2] ASI_LEON_MMUREGS) | ||
39 | SUN_PI_(sta %g1, [%g2] ASI_M_MMUREGS) | ||
40 | retl | ||
41 | nop | ||
42 | ENDPROC(srmmu_set_ctable_ptr) | ||
43 | |||
44 | |||
45 | /* void srmmu_set_context(int context) */ | ||
46 | ENTRY(srmmu_set_context) | ||
47 | mov SRMMU_CTX_REG, %g1 | ||
48 | LEON_PI(sta %o0, [%g1] ASI_LEON_MMUREGS) | ||
49 | SUN_PI_(sta %o0, [%g1] ASI_M_MMUREGS) | ||
50 | retl | ||
51 | nop | ||
52 | ENDPROC(srmmu_set_context) | ||
53 | |||
54 | |||
55 | /* int srmmu_get_context(void) */ | ||
56 | ENTRY(srmmu_get_context) | ||
57 | mov SRMMU_CTX_REG, %o0 | ||
58 | LEON_PI(lda [%o0] ASI_LEON_MMUREGS, %o0) | ||
59 | SUN_PI_(lda [%o0] ASI_M_MMUREGS, %o0) | ||
60 | retl | ||
61 | nop | ||
62 | ENDPROC(srmmu_get_context) | ||
63 | |||
64 | |||
65 | /* unsigned int srmmu_get_fstatus(void) */ | ||
66 | ENTRY(srmmu_get_fstatus) | ||
67 | mov SRMMU_FAULT_STATUS, %o0 | ||
68 | LEON_PI(lda [%o0] ASI_LEON_MMUREGS, %o0) | ||
69 | SUN_PI_(lda [%o0] ASI_M_MMUREGS, %o0) | ||
70 | retl | ||
71 | nop | ||
72 | ENDPROC(srmmu_get_fstatus) | ||
73 | |||
74 | |||
75 | /* unsigned int srmmu_get_faddr(void) */ | ||
76 | ENTRY(srmmu_get_faddr) | ||
77 | mov SRMMU_FAULT_ADDR, %o0 | ||
78 | LEON_PI(lda [%o0] ASI_LEON_MMUREGS, %o0) | ||
79 | SUN_PI_(lda [%o0] ASI_M_MMUREGS, %o0) | ||
80 | retl | ||
81 | nop | ||
82 | ENDPROC(srmmu_get_faddr) | ||
83 |