Commit 06d43c808d61580d977526deca328e33382b40c8
Committed by
Tom Rini
1 parent
2b373cb83c
Exists in
v2017.01-smarct4x
and in
25 other branches
arm: Set TTB XN bit in case DCACHE_OFF for LPAE mode
While we setup the mmu initially we mark set_section_dcache with DCACHE_OFF flag. In case of non-LPAE mode the DCACHE_OFF macro is rightly defined with TTB_SECT_XN_MASK set so as to mark all the 4GB XN. In case of LPAE mode XN(Execute-never) bit is not set with DCACHE_OFF. Hence XN bit is not set by default for DCACHE_OFF which keeps all the regions execute okay and this leads to random speculative fetches in random memory regions which was eventually caught by kernel omap-l3-noc driver. Fix this to mark the regions as XN by default. Signed-off-by: Keerthy <j-keerthy@ti.com> Reviewed-by: Alexander Graf <agraf@suse.de> Reviewed-by: Tom Rini <trini@konsulko.com>
Showing 2 changed files with 6 additions and 1 deletions Inline Diff
arch/arm/include/asm/system.h
1 | #ifndef __ASM_ARM_SYSTEM_H | 1 | #ifndef __ASM_ARM_SYSTEM_H |
2 | #define __ASM_ARM_SYSTEM_H | 2 | #define __ASM_ARM_SYSTEM_H |
3 | 3 | ||
4 | #include <common.h> | 4 | #include <common.h> |
5 | #include <linux/compiler.h> | 5 | #include <linux/compiler.h> |
6 | #include <asm/barriers.h> | 6 | #include <asm/barriers.h> |
7 | 7 | ||
8 | #ifdef CONFIG_ARM64 | 8 | #ifdef CONFIG_ARM64 |
9 | 9 | ||
10 | /* | 10 | /* |
11 | * SCTLR_EL1/SCTLR_EL2/SCTLR_EL3 bits definitions | 11 | * SCTLR_EL1/SCTLR_EL2/SCTLR_EL3 bits definitions |
12 | */ | 12 | */ |
13 | #define CR_M (1 << 0) /* MMU enable */ | 13 | #define CR_M (1 << 0) /* MMU enable */ |
14 | #define CR_A (1 << 1) /* Alignment abort enable */ | 14 | #define CR_A (1 << 1) /* Alignment abort enable */ |
15 | #define CR_C (1 << 2) /* Dcache enable */ | 15 | #define CR_C (1 << 2) /* Dcache enable */ |
16 | #define CR_SA (1 << 3) /* Stack Alignment Check Enable */ | 16 | #define CR_SA (1 << 3) /* Stack Alignment Check Enable */ |
17 | #define CR_I (1 << 12) /* Icache enable */ | 17 | #define CR_I (1 << 12) /* Icache enable */ |
18 | #define CR_WXN (1 << 19) /* Write Permision Imply XN */ | 18 | #define CR_WXN (1 << 19) /* Write Permision Imply XN */ |
19 | #define CR_EE (1 << 25) /* Exception (Big) Endian */ | 19 | #define CR_EE (1 << 25) /* Exception (Big) Endian */ |
20 | 20 | ||
21 | #ifndef __ASSEMBLY__ | 21 | #ifndef __ASSEMBLY__ |
22 | 22 | ||
23 | u64 get_page_table_size(void); | 23 | u64 get_page_table_size(void); |
24 | #define PGTABLE_SIZE get_page_table_size() | 24 | #define PGTABLE_SIZE get_page_table_size() |
25 | 25 | ||
26 | /* 2MB granularity */ | 26 | /* 2MB granularity */ |
27 | #define MMU_SECTION_SHIFT 21 | 27 | #define MMU_SECTION_SHIFT 21 |
28 | #define MMU_SECTION_SIZE (1 << MMU_SECTION_SHIFT) | 28 | #define MMU_SECTION_SIZE (1 << MMU_SECTION_SHIFT) |
29 | 29 | ||
30 | /* These constants need to be synced to the MT_ types in asm/armv8/mmu.h */ | 30 | /* These constants need to be synced to the MT_ types in asm/armv8/mmu.h */ |
31 | enum dcache_option { | 31 | enum dcache_option { |
32 | DCACHE_OFF = 0 << 2, | 32 | DCACHE_OFF = 0 << 2, |
33 | DCACHE_WRITETHROUGH = 3 << 2, | 33 | DCACHE_WRITETHROUGH = 3 << 2, |
34 | DCACHE_WRITEBACK = 4 << 2, | 34 | DCACHE_WRITEBACK = 4 << 2, |
35 | DCACHE_WRITEALLOC = 4 << 2, | 35 | DCACHE_WRITEALLOC = 4 << 2, |
36 | }; | 36 | }; |
37 | 37 | ||
38 | #define wfi() \ | 38 | #define wfi() \ |
39 | ({asm volatile( \ | 39 | ({asm volatile( \ |
40 | "wfi" : : : "memory"); \ | 40 | "wfi" : : : "memory"); \ |
41 | }) | 41 | }) |
42 | 42 | ||
43 | static inline unsigned int current_el(void) | 43 | static inline unsigned int current_el(void) |
44 | { | 44 | { |
45 | unsigned int el; | 45 | unsigned int el; |
46 | asm volatile("mrs %0, CurrentEL" : "=r" (el) : : "cc"); | 46 | asm volatile("mrs %0, CurrentEL" : "=r" (el) : : "cc"); |
47 | return el >> 2; | 47 | return el >> 2; |
48 | } | 48 | } |
49 | 49 | ||
50 | static inline unsigned int get_sctlr(void) | 50 | static inline unsigned int get_sctlr(void) |
51 | { | 51 | { |
52 | unsigned int el, val; | 52 | unsigned int el, val; |
53 | 53 | ||
54 | el = current_el(); | 54 | el = current_el(); |
55 | if (el == 1) | 55 | if (el == 1) |
56 | asm volatile("mrs %0, sctlr_el1" : "=r" (val) : : "cc"); | 56 | asm volatile("mrs %0, sctlr_el1" : "=r" (val) : : "cc"); |
57 | else if (el == 2) | 57 | else if (el == 2) |
58 | asm volatile("mrs %0, sctlr_el2" : "=r" (val) : : "cc"); | 58 | asm volatile("mrs %0, sctlr_el2" : "=r" (val) : : "cc"); |
59 | else | 59 | else |
60 | asm volatile("mrs %0, sctlr_el3" : "=r" (val) : : "cc"); | 60 | asm volatile("mrs %0, sctlr_el3" : "=r" (val) : : "cc"); |
61 | 61 | ||
62 | return val; | 62 | return val; |
63 | } | 63 | } |
64 | 64 | ||
65 | static inline void set_sctlr(unsigned int val) | 65 | static inline void set_sctlr(unsigned int val) |
66 | { | 66 | { |
67 | unsigned int el; | 67 | unsigned int el; |
68 | 68 | ||
69 | el = current_el(); | 69 | el = current_el(); |
70 | if (el == 1) | 70 | if (el == 1) |
71 | asm volatile("msr sctlr_el1, %0" : : "r" (val) : "cc"); | 71 | asm volatile("msr sctlr_el1, %0" : : "r" (val) : "cc"); |
72 | else if (el == 2) | 72 | else if (el == 2) |
73 | asm volatile("msr sctlr_el2, %0" : : "r" (val) : "cc"); | 73 | asm volatile("msr sctlr_el2, %0" : : "r" (val) : "cc"); |
74 | else | 74 | else |
75 | asm volatile("msr sctlr_el3, %0" : : "r" (val) : "cc"); | 75 | asm volatile("msr sctlr_el3, %0" : : "r" (val) : "cc"); |
76 | 76 | ||
77 | asm volatile("isb"); | 77 | asm volatile("isb"); |
78 | } | 78 | } |
79 | 79 | ||
80 | static inline unsigned long read_mpidr(void) | 80 | static inline unsigned long read_mpidr(void) |
81 | { | 81 | { |
82 | unsigned long val; | 82 | unsigned long val; |
83 | 83 | ||
84 | asm volatile("mrs %0, mpidr_el1" : "=r" (val)); | 84 | asm volatile("mrs %0, mpidr_el1" : "=r" (val)); |
85 | 85 | ||
86 | return val; | 86 | return val; |
87 | } | 87 | } |
88 | 88 | ||
89 | #define BSP_COREID 0 | 89 | #define BSP_COREID 0 |
90 | 90 | ||
91 | void __asm_flush_dcache_all(void); | 91 | void __asm_flush_dcache_all(void); |
92 | void __asm_invalidate_dcache_all(void); | 92 | void __asm_invalidate_dcache_all(void); |
93 | void __asm_flush_dcache_range(u64 start, u64 end); | 93 | void __asm_flush_dcache_range(u64 start, u64 end); |
94 | void __asm_invalidate_tlb_all(void); | 94 | void __asm_invalidate_tlb_all(void); |
95 | void __asm_invalidate_icache_all(void); | 95 | void __asm_invalidate_icache_all(void); |
96 | int __asm_invalidate_l3_dcache(void); | 96 | int __asm_invalidate_l3_dcache(void); |
97 | int __asm_flush_l3_dcache(void); | 97 | int __asm_flush_l3_dcache(void); |
98 | int __asm_invalidate_l3_icache(void); | 98 | int __asm_invalidate_l3_icache(void); |
99 | void __asm_switch_ttbr(u64 new_ttbr); | 99 | void __asm_switch_ttbr(u64 new_ttbr); |
100 | 100 | ||
101 | void armv8_switch_to_el2(void); | 101 | void armv8_switch_to_el2(void); |
102 | void armv8_switch_to_el1(void); | 102 | void armv8_switch_to_el1(void); |
103 | void gic_init(void); | 103 | void gic_init(void); |
104 | void gic_send_sgi(unsigned long sgino); | 104 | void gic_send_sgi(unsigned long sgino); |
105 | void wait_for_wakeup(void); | 105 | void wait_for_wakeup(void); |
106 | void protect_secure_region(void); | 106 | void protect_secure_region(void); |
107 | void smp_kick_all_cpus(void); | 107 | void smp_kick_all_cpus(void); |
108 | 108 | ||
109 | void flush_l3_cache(void); | 109 | void flush_l3_cache(void); |
110 | 110 | ||
111 | /* | 111 | /* |
112 | *Issue a secure monitor call in accordance with ARM "SMC Calling convention", | 112 | *Issue a secure monitor call in accordance with ARM "SMC Calling convention", |
113 | * DEN0028A | 113 | * DEN0028A |
114 | * | 114 | * |
115 | * @args: input and output arguments | 115 | * @args: input and output arguments |
116 | * | 116 | * |
117 | */ | 117 | */ |
118 | void smc_call(struct pt_regs *args); | 118 | void smc_call(struct pt_regs *args); |
119 | 119 | ||
120 | void __noreturn psci_system_reset(void); | 120 | void __noreturn psci_system_reset(void); |
121 | void __noreturn psci_system_off(void); | 121 | void __noreturn psci_system_off(void); |
122 | 122 | ||
123 | #endif /* __ASSEMBLY__ */ | 123 | #endif /* __ASSEMBLY__ */ |
124 | 124 | ||
125 | #else /* CONFIG_ARM64 */ | 125 | #else /* CONFIG_ARM64 */ |
126 | 126 | ||
127 | #ifdef __KERNEL__ | 127 | #ifdef __KERNEL__ |
128 | 128 | ||
129 | #define CPU_ARCH_UNKNOWN 0 | 129 | #define CPU_ARCH_UNKNOWN 0 |
130 | #define CPU_ARCH_ARMv3 1 | 130 | #define CPU_ARCH_ARMv3 1 |
131 | #define CPU_ARCH_ARMv4 2 | 131 | #define CPU_ARCH_ARMv4 2 |
132 | #define CPU_ARCH_ARMv4T 3 | 132 | #define CPU_ARCH_ARMv4T 3 |
133 | #define CPU_ARCH_ARMv5 4 | 133 | #define CPU_ARCH_ARMv5 4 |
134 | #define CPU_ARCH_ARMv5T 5 | 134 | #define CPU_ARCH_ARMv5T 5 |
135 | #define CPU_ARCH_ARMv5TE 6 | 135 | #define CPU_ARCH_ARMv5TE 6 |
136 | #define CPU_ARCH_ARMv5TEJ 7 | 136 | #define CPU_ARCH_ARMv5TEJ 7 |
137 | #define CPU_ARCH_ARMv6 8 | 137 | #define CPU_ARCH_ARMv6 8 |
138 | #define CPU_ARCH_ARMv7 9 | 138 | #define CPU_ARCH_ARMv7 9 |
139 | 139 | ||
140 | /* | 140 | /* |
141 | * CR1 bits (CP#15 CR1) | 141 | * CR1 bits (CP#15 CR1) |
142 | */ | 142 | */ |
143 | #define CR_M (1 << 0) /* MMU enable */ | 143 | #define CR_M (1 << 0) /* MMU enable */ |
144 | #define CR_A (1 << 1) /* Alignment abort enable */ | 144 | #define CR_A (1 << 1) /* Alignment abort enable */ |
145 | #define CR_C (1 << 2) /* Dcache enable */ | 145 | #define CR_C (1 << 2) /* Dcache enable */ |
146 | #define CR_W (1 << 3) /* Write buffer enable */ | 146 | #define CR_W (1 << 3) /* Write buffer enable */ |
147 | #define CR_P (1 << 4) /* 32-bit exception handler */ | 147 | #define CR_P (1 << 4) /* 32-bit exception handler */ |
148 | #define CR_D (1 << 5) /* 32-bit data address range */ | 148 | #define CR_D (1 << 5) /* 32-bit data address range */ |
149 | #define CR_L (1 << 6) /* Implementation defined */ | 149 | #define CR_L (1 << 6) /* Implementation defined */ |
150 | #define CR_B (1 << 7) /* Big endian */ | 150 | #define CR_B (1 << 7) /* Big endian */ |
151 | #define CR_S (1 << 8) /* System MMU protection */ | 151 | #define CR_S (1 << 8) /* System MMU protection */ |
152 | #define CR_R (1 << 9) /* ROM MMU protection */ | 152 | #define CR_R (1 << 9) /* ROM MMU protection */ |
153 | #define CR_F (1 << 10) /* Implementation defined */ | 153 | #define CR_F (1 << 10) /* Implementation defined */ |
154 | #define CR_Z (1 << 11) /* Implementation defined */ | 154 | #define CR_Z (1 << 11) /* Implementation defined */ |
155 | #define CR_I (1 << 12) /* Icache enable */ | 155 | #define CR_I (1 << 12) /* Icache enable */ |
156 | #define CR_V (1 << 13) /* Vectors relocated to 0xffff0000 */ | 156 | #define CR_V (1 << 13) /* Vectors relocated to 0xffff0000 */ |
157 | #define CR_RR (1 << 14) /* Round Robin cache replacement */ | 157 | #define CR_RR (1 << 14) /* Round Robin cache replacement */ |
158 | #define CR_L4 (1 << 15) /* LDR pc can set T bit */ | 158 | #define CR_L4 (1 << 15) /* LDR pc can set T bit */ |
159 | #define CR_DT (1 << 16) | 159 | #define CR_DT (1 << 16) |
160 | #define CR_IT (1 << 18) | 160 | #define CR_IT (1 << 18) |
161 | #define CR_ST (1 << 19) | 161 | #define CR_ST (1 << 19) |
162 | #define CR_FI (1 << 21) /* Fast interrupt (lower latency mode) */ | 162 | #define CR_FI (1 << 21) /* Fast interrupt (lower latency mode) */ |
163 | #define CR_U (1 << 22) /* Unaligned access operation */ | 163 | #define CR_U (1 << 22) /* Unaligned access operation */ |
164 | #define CR_XP (1 << 23) /* Extended page tables */ | 164 | #define CR_XP (1 << 23) /* Extended page tables */ |
165 | #define CR_VE (1 << 24) /* Vectored interrupts */ | 165 | #define CR_VE (1 << 24) /* Vectored interrupts */ |
166 | #define CR_EE (1 << 25) /* Exception (Big) Endian */ | 166 | #define CR_EE (1 << 25) /* Exception (Big) Endian */ |
167 | #define CR_TRE (1 << 28) /* TEX remap enable */ | 167 | #define CR_TRE (1 << 28) /* TEX remap enable */ |
168 | #define CR_AFE (1 << 29) /* Access flag enable */ | 168 | #define CR_AFE (1 << 29) /* Access flag enable */ |
169 | #define CR_TE (1 << 30) /* Thumb exception enable */ | 169 | #define CR_TE (1 << 30) /* Thumb exception enable */ |
170 | 170 | ||
171 | #if defined(CONFIG_ARMV7_LPAE) && !defined(PGTABLE_SIZE) | 171 | #if defined(CONFIG_ARMV7_LPAE) && !defined(PGTABLE_SIZE) |
172 | #define PGTABLE_SIZE (4096 * 5) | 172 | #define PGTABLE_SIZE (4096 * 5) |
173 | #elif !defined(PGTABLE_SIZE) | 173 | #elif !defined(PGTABLE_SIZE) |
174 | #define PGTABLE_SIZE (4096 * 4) | 174 | #define PGTABLE_SIZE (4096 * 4) |
175 | #endif | 175 | #endif |
176 | 176 | ||
177 | /* | 177 | /* |
178 | * This is used to ensure the compiler did actually allocate the register we | 178 | * This is used to ensure the compiler did actually allocate the register we |
179 | * asked it for some inline assembly sequences. Apparently we can't trust | 179 | * asked it for some inline assembly sequences. Apparently we can't trust |
180 | * the compiler from one version to another so a bit of paranoia won't hurt. | 180 | * the compiler from one version to another so a bit of paranoia won't hurt. |
181 | * This string is meant to be concatenated with the inline asm string and | 181 | * This string is meant to be concatenated with the inline asm string and |
182 | * will cause compilation to stop on mismatch. | 182 | * will cause compilation to stop on mismatch. |
183 | * (for details, see gcc PR 15089) | 183 | * (for details, see gcc PR 15089) |
184 | */ | 184 | */ |
185 | #define __asmeq(x, y) ".ifnc " x "," y " ; .err ; .endif\n\t" | 185 | #define __asmeq(x, y) ".ifnc " x "," y " ; .err ; .endif\n\t" |
186 | 186 | ||
187 | #ifndef __ASSEMBLY__ | 187 | #ifndef __ASSEMBLY__ |
188 | 188 | ||
189 | /** | 189 | /** |
190 | * save_boot_params() - Save boot parameters before starting reset sequence | 190 | * save_boot_params() - Save boot parameters before starting reset sequence |
191 | * | 191 | * |
192 | * If you provide this function it will be called immediately U-Boot starts, | 192 | * If you provide this function it will be called immediately U-Boot starts, |
193 | * both for SPL and U-Boot proper. | 193 | * both for SPL and U-Boot proper. |
194 | * | 194 | * |
195 | * All registers are unchanged from U-Boot entry. No registers need be | 195 | * All registers are unchanged from U-Boot entry. No registers need be |
196 | * preserved. | 196 | * preserved. |
197 | * | 197 | * |
198 | * This is not a normal C function. There is no stack. Return by branching to | 198 | * This is not a normal C function. There is no stack. Return by branching to |
199 | * save_boot_params_ret. | 199 | * save_boot_params_ret. |
200 | * | 200 | * |
201 | * void save_boot_params(u32 r0, u32 r1, u32 r2, u32 r3); | 201 | * void save_boot_params(u32 r0, u32 r1, u32 r2, u32 r3); |
202 | */ | 202 | */ |
203 | 203 | ||
204 | /** | 204 | /** |
205 | * save_boot_params_ret() - Return from save_boot_params() | 205 | * save_boot_params_ret() - Return from save_boot_params() |
206 | * | 206 | * |
207 | * If you provide save_boot_params(), then you should jump back to this | 207 | * If you provide save_boot_params(), then you should jump back to this |
208 | * function when done. Try to preserve all registers. | 208 | * function when done. Try to preserve all registers. |
209 | * | 209 | * |
210 | * If your implementation of save_boot_params() is in C then it is acceptable | 210 | * If your implementation of save_boot_params() is in C then it is acceptable |
211 | * to simply call save_boot_params_ret() at the end of your function. Since | 211 | * to simply call save_boot_params_ret() at the end of your function. Since |
212 | * there is no link register set up, you cannot just exit the function. U-Boot | 212 | * there is no link register set up, you cannot just exit the function. U-Boot |
213 | * will return to the (initialised) value of lr, and likely crash/hang. | 213 | * will return to the (initialised) value of lr, and likely crash/hang. |
214 | * | 214 | * |
215 | * If your implementation of save_boot_params() is in assembler then you | 215 | * If your implementation of save_boot_params() is in assembler then you |
216 | * should use 'b' or 'bx' to return to save_boot_params_ret. | 216 | * should use 'b' or 'bx' to return to save_boot_params_ret. |
217 | */ | 217 | */ |
218 | void save_boot_params_ret(void); | 218 | void save_boot_params_ret(void); |
219 | 219 | ||
220 | #ifdef CONFIG_ARMV7_LPAE | 220 | #ifdef CONFIG_ARMV7_LPAE |
221 | void switch_to_hypervisor_ret(void); | 221 | void switch_to_hypervisor_ret(void); |
222 | #endif | 222 | #endif |
223 | 223 | ||
224 | #define nop() __asm__ __volatile__("mov\tr0,r0\t@ nop\n\t"); | 224 | #define nop() __asm__ __volatile__("mov\tr0,r0\t@ nop\n\t"); |
225 | 225 | ||
226 | #ifdef __ARM_ARCH_7A__ | 226 | #ifdef __ARM_ARCH_7A__ |
227 | #define wfi() __asm__ __volatile__ ("wfi" : : : "memory") | 227 | #define wfi() __asm__ __volatile__ ("wfi" : : : "memory") |
228 | #else | 228 | #else |
229 | #define wfi() | 229 | #define wfi() |
230 | #endif | 230 | #endif |
231 | 231 | ||
232 | static inline unsigned long get_cpsr(void) | 232 | static inline unsigned long get_cpsr(void) |
233 | { | 233 | { |
234 | unsigned long cpsr; | 234 | unsigned long cpsr; |
235 | 235 | ||
236 | asm volatile("mrs %0, cpsr" : "=r"(cpsr): ); | 236 | asm volatile("mrs %0, cpsr" : "=r"(cpsr): ); |
237 | return cpsr; | 237 | return cpsr; |
238 | } | 238 | } |
239 | 239 | ||
240 | static inline int is_hyp(void) | 240 | static inline int is_hyp(void) |
241 | { | 241 | { |
242 | #ifdef CONFIG_ARMV7_LPAE | 242 | #ifdef CONFIG_ARMV7_LPAE |
243 | /* HYP mode requires LPAE ... */ | 243 | /* HYP mode requires LPAE ... */ |
244 | return ((get_cpsr() & 0x1f) == 0x1a); | 244 | return ((get_cpsr() & 0x1f) == 0x1a); |
245 | #else | 245 | #else |
246 | /* ... so without LPAE support we can optimize all hyp code away */ | 246 | /* ... so without LPAE support we can optimize all hyp code away */ |
247 | return 0; | 247 | return 0; |
248 | #endif | 248 | #endif |
249 | } | 249 | } |
250 | 250 | ||
251 | static inline unsigned int get_cr(void) | 251 | static inline unsigned int get_cr(void) |
252 | { | 252 | { |
253 | unsigned int val; | 253 | unsigned int val; |
254 | 254 | ||
255 | if (is_hyp()) | 255 | if (is_hyp()) |
256 | asm volatile("mrc p15, 4, %0, c1, c0, 0 @ get CR" : "=r" (val) | 256 | asm volatile("mrc p15, 4, %0, c1, c0, 0 @ get CR" : "=r" (val) |
257 | : | 257 | : |
258 | : "cc"); | 258 | : "cc"); |
259 | else | 259 | else |
260 | asm volatile("mrc p15, 0, %0, c1, c0, 0 @ get CR" : "=r" (val) | 260 | asm volatile("mrc p15, 0, %0, c1, c0, 0 @ get CR" : "=r" (val) |
261 | : | 261 | : |
262 | : "cc"); | 262 | : "cc"); |
263 | return val; | 263 | return val; |
264 | } | 264 | } |
265 | 265 | ||
266 | static inline void set_cr(unsigned int val) | 266 | static inline void set_cr(unsigned int val) |
267 | { | 267 | { |
268 | if (is_hyp()) | 268 | if (is_hyp()) |
269 | asm volatile("mcr p15, 4, %0, c1, c0, 0 @ set CR" : | 269 | asm volatile("mcr p15, 4, %0, c1, c0, 0 @ set CR" : |
270 | : "r" (val) | 270 | : "r" (val) |
271 | : "cc"); | 271 | : "cc"); |
272 | else | 272 | else |
273 | asm volatile("mcr p15, 0, %0, c1, c0, 0 @ set CR" : | 273 | asm volatile("mcr p15, 0, %0, c1, c0, 0 @ set CR" : |
274 | : "r" (val) | 274 | : "r" (val) |
275 | : "cc"); | 275 | : "cc"); |
276 | isb(); | 276 | isb(); |
277 | } | 277 | } |
278 | 278 | ||
279 | static inline unsigned int get_dacr(void) | 279 | static inline unsigned int get_dacr(void) |
280 | { | 280 | { |
281 | unsigned int val; | 281 | unsigned int val; |
282 | asm("mrc p15, 0, %0, c3, c0, 0 @ get DACR" : "=r" (val) : : "cc"); | 282 | asm("mrc p15, 0, %0, c3, c0, 0 @ get DACR" : "=r" (val) : : "cc"); |
283 | return val; | 283 | return val; |
284 | } | 284 | } |
285 | 285 | ||
286 | static inline void set_dacr(unsigned int val) | 286 | static inline void set_dacr(unsigned int val) |
287 | { | 287 | { |
288 | asm volatile("mcr p15, 0, %0, c3, c0, 0 @ set DACR" | 288 | asm volatile("mcr p15, 0, %0, c3, c0, 0 @ set DACR" |
289 | : : "r" (val) : "cc"); | 289 | : : "r" (val) : "cc"); |
290 | isb(); | 290 | isb(); |
291 | } | 291 | } |
292 | 292 | ||
293 | #ifdef CONFIG_ARMV7_LPAE | 293 | #ifdef CONFIG_ARMV7_LPAE |
294 | /* Long-Descriptor Translation Table Level 1/2 Bits */ | 294 | /* Long-Descriptor Translation Table Level 1/2 Bits */ |
295 | #define TTB_SECT_XN_MASK (1ULL << 54) | 295 | #define TTB_SECT_XN_MASK (1ULL << 54) |
296 | #define TTB_SECT_NG_MASK (1 << 11) | 296 | #define TTB_SECT_NG_MASK (1 << 11) |
297 | #define TTB_SECT_AF (1 << 10) | 297 | #define TTB_SECT_AF (1 << 10) |
298 | #define TTB_SECT_SH_MASK (3 << 8) | 298 | #define TTB_SECT_SH_MASK (3 << 8) |
299 | #define TTB_SECT_NS_MASK (1 << 5) | 299 | #define TTB_SECT_NS_MASK (1 << 5) |
300 | #define TTB_SECT_AP (1 << 6) | 300 | #define TTB_SECT_AP (1 << 6) |
301 | /* Note: TTB AP bits are set elsewhere */ | 301 | /* Note: TTB AP bits are set elsewhere */ |
302 | #define TTB_SECT_MAIR(x) ((x & 0x7) << 2) /* Index into MAIR */ | 302 | #define TTB_SECT_MAIR(x) ((x & 0x7) << 2) /* Index into MAIR */ |
303 | #define TTB_SECT (1 << 0) | 303 | #define TTB_SECT (1 << 0) |
304 | #define TTB_PAGETABLE (3 << 0) | 304 | #define TTB_PAGETABLE (3 << 0) |
305 | 305 | ||
306 | /* TTBCR flags */ | 306 | /* TTBCR flags */ |
307 | #define TTBCR_EAE (1 << 31) | 307 | #define TTBCR_EAE (1 << 31) |
308 | #define TTBCR_T0SZ(x) ((x) << 0) | 308 | #define TTBCR_T0SZ(x) ((x) << 0) |
309 | #define TTBCR_T1SZ(x) ((x) << 16) | 309 | #define TTBCR_T1SZ(x) ((x) << 16) |
310 | #define TTBCR_USING_TTBR0 (TTBCR_T0SZ(0) | TTBCR_T1SZ(0)) | 310 | #define TTBCR_USING_TTBR0 (TTBCR_T0SZ(0) | TTBCR_T1SZ(0)) |
311 | #define TTBCR_IRGN0_NC (0 << 8) | 311 | #define TTBCR_IRGN0_NC (0 << 8) |
312 | #define TTBCR_IRGN0_WBWA (1 << 8) | 312 | #define TTBCR_IRGN0_WBWA (1 << 8) |
313 | #define TTBCR_IRGN0_WT (2 << 8) | 313 | #define TTBCR_IRGN0_WT (2 << 8) |
314 | #define TTBCR_IRGN0_WBNWA (3 << 8) | 314 | #define TTBCR_IRGN0_WBNWA (3 << 8) |
315 | #define TTBCR_IRGN0_MASK (3 << 8) | 315 | #define TTBCR_IRGN0_MASK (3 << 8) |
316 | #define TTBCR_ORGN0_NC (0 << 10) | 316 | #define TTBCR_ORGN0_NC (0 << 10) |
317 | #define TTBCR_ORGN0_WBWA (1 << 10) | 317 | #define TTBCR_ORGN0_WBWA (1 << 10) |
318 | #define TTBCR_ORGN0_WT (2 << 10) | 318 | #define TTBCR_ORGN0_WT (2 << 10) |
319 | #define TTBCR_ORGN0_WBNWA (3 << 10) | 319 | #define TTBCR_ORGN0_WBNWA (3 << 10) |
320 | #define TTBCR_ORGN0_MASK (3 << 10) | 320 | #define TTBCR_ORGN0_MASK (3 << 10) |
321 | #define TTBCR_SHARED_NON (0 << 12) | 321 | #define TTBCR_SHARED_NON (0 << 12) |
322 | #define TTBCR_SHARED_OUTER (2 << 12) | 322 | #define TTBCR_SHARED_OUTER (2 << 12) |
323 | #define TTBCR_SHARED_INNER (3 << 12) | 323 | #define TTBCR_SHARED_INNER (3 << 12) |
324 | #define TTBCR_EPD0 (0 << 7) | 324 | #define TTBCR_EPD0 (0 << 7) |
325 | 325 | ||
326 | /* | 326 | /* |
327 | * Memory types | 327 | * Memory types |
328 | */ | 328 | */ |
329 | #define MEMORY_ATTRIBUTES ((0x00 << (0 * 8)) | (0x88 << (1 * 8)) | \ | 329 | #define MEMORY_ATTRIBUTES ((0x00 << (0 * 8)) | (0x88 << (1 * 8)) | \ |
330 | (0xcc << (2 * 8)) | (0xff << (3 * 8))) | 330 | (0xcc << (2 * 8)) | (0xff << (3 * 8))) |
331 | 331 | ||
332 | /* options available for data cache on each page */ | 332 | /* options available for data cache on each page */ |
333 | enum dcache_option { | 333 | enum dcache_option { |
334 | DCACHE_OFF = TTB_SECT | TTB_SECT_MAIR(0), | 334 | DCACHE_OFF = TTB_SECT | TTB_SECT_MAIR(0) | TTB_SECT_XN_MASK, |
335 | DCACHE_WRITETHROUGH = TTB_SECT | TTB_SECT_MAIR(1), | 335 | DCACHE_WRITETHROUGH = TTB_SECT | TTB_SECT_MAIR(1), |
336 | DCACHE_WRITEBACK = TTB_SECT | TTB_SECT_MAIR(2), | 336 | DCACHE_WRITEBACK = TTB_SECT | TTB_SECT_MAIR(2), |
337 | DCACHE_WRITEALLOC = TTB_SECT | TTB_SECT_MAIR(3), | 337 | DCACHE_WRITEALLOC = TTB_SECT | TTB_SECT_MAIR(3), |
338 | }; | 338 | }; |
339 | #elif defined(CONFIG_CPU_V7) | 339 | #elif defined(CONFIG_CPU_V7) |
340 | /* Short-Descriptor Translation Table Level 1 Bits */ | 340 | /* Short-Descriptor Translation Table Level 1 Bits */ |
341 | #define TTB_SECT_NS_MASK (1 << 19) | 341 | #define TTB_SECT_NS_MASK (1 << 19) |
342 | #define TTB_SECT_NG_MASK (1 << 17) | 342 | #define TTB_SECT_NG_MASK (1 << 17) |
343 | #define TTB_SECT_S_MASK (1 << 16) | 343 | #define TTB_SECT_S_MASK (1 << 16) |
344 | /* Note: TTB AP bits are set elsewhere */ | 344 | /* Note: TTB AP bits are set elsewhere */ |
345 | #define TTB_SECT_AP (3 << 10) | 345 | #define TTB_SECT_AP (3 << 10) |
346 | #define TTB_SECT_TEX(x) ((x & 0x7) << 12) | 346 | #define TTB_SECT_TEX(x) ((x & 0x7) << 12) |
347 | #define TTB_SECT_DOMAIN(x) ((x & 0xf) << 5) | 347 | #define TTB_SECT_DOMAIN(x) ((x & 0xf) << 5) |
348 | #define TTB_SECT_XN_MASK (1 << 4) | 348 | #define TTB_SECT_XN_MASK (1 << 4) |
349 | #define TTB_SECT_C_MASK (1 << 3) | 349 | #define TTB_SECT_C_MASK (1 << 3) |
350 | #define TTB_SECT_B_MASK (1 << 2) | 350 | #define TTB_SECT_B_MASK (1 << 2) |
351 | #define TTB_SECT (2 << 0) | 351 | #define TTB_SECT (2 << 0) |
352 | 352 | ||
353 | /* options available for data cache on each page */ | 353 | /* options available for data cache on each page */ |
354 | enum dcache_option { | 354 | enum dcache_option { |
355 | DCACHE_OFF = TTB_SECT_DOMAIN(0) | TTB_SECT_XN_MASK | TTB_SECT, | 355 | DCACHE_OFF = TTB_SECT_DOMAIN(0) | TTB_SECT_XN_MASK | TTB_SECT, |
356 | DCACHE_WRITETHROUGH = DCACHE_OFF | TTB_SECT_C_MASK, | 356 | DCACHE_WRITETHROUGH = DCACHE_OFF | TTB_SECT_C_MASK, |
357 | DCACHE_WRITEBACK = DCACHE_WRITETHROUGH | TTB_SECT_B_MASK, | 357 | DCACHE_WRITEBACK = DCACHE_WRITETHROUGH | TTB_SECT_B_MASK, |
358 | DCACHE_WRITEALLOC = DCACHE_WRITEBACK | TTB_SECT_TEX(1), | 358 | DCACHE_WRITEALLOC = DCACHE_WRITEBACK | TTB_SECT_TEX(1), |
359 | }; | 359 | }; |
360 | #else | 360 | #else |
361 | #define TTB_SECT_AP (3 << 10) | 361 | #define TTB_SECT_AP (3 << 10) |
362 | /* options available for data cache on each page */ | 362 | /* options available for data cache on each page */ |
363 | enum dcache_option { | 363 | enum dcache_option { |
364 | DCACHE_OFF = 0x12, | 364 | DCACHE_OFF = 0x12, |
365 | DCACHE_WRITETHROUGH = 0x1a, | 365 | DCACHE_WRITETHROUGH = 0x1a, |
366 | DCACHE_WRITEBACK = 0x1e, | 366 | DCACHE_WRITEBACK = 0x1e, |
367 | DCACHE_WRITEALLOC = 0x16, | 367 | DCACHE_WRITEALLOC = 0x16, |
368 | }; | 368 | }; |
369 | #endif | 369 | #endif |
370 | 370 | ||
371 | /* Size of an MMU section */ | 371 | /* Size of an MMU section */ |
372 | enum { | 372 | enum { |
373 | #ifdef CONFIG_ARMV7_LPAE | 373 | #ifdef CONFIG_ARMV7_LPAE |
374 | MMU_SECTION_SHIFT = 21, /* 2MB */ | 374 | MMU_SECTION_SHIFT = 21, /* 2MB */ |
375 | #else | 375 | #else |
376 | MMU_SECTION_SHIFT = 20, /* 1MB */ | 376 | MMU_SECTION_SHIFT = 20, /* 1MB */ |
377 | #endif | 377 | #endif |
378 | MMU_SECTION_SIZE = 1 << MMU_SECTION_SHIFT, | 378 | MMU_SECTION_SIZE = 1 << MMU_SECTION_SHIFT, |
379 | }; | 379 | }; |
380 | 380 | ||
381 | #ifdef CONFIG_CPU_V7 | 381 | #ifdef CONFIG_CPU_V7 |
382 | /* TTBR0 bits */ | 382 | /* TTBR0 bits */ |
383 | #define TTBR0_BASE_ADDR_MASK 0xFFFFC000 | 383 | #define TTBR0_BASE_ADDR_MASK 0xFFFFC000 |
384 | #define TTBR0_RGN_NC (0 << 3) | 384 | #define TTBR0_RGN_NC (0 << 3) |
385 | #define TTBR0_RGN_WBWA (1 << 3) | 385 | #define TTBR0_RGN_WBWA (1 << 3) |
386 | #define TTBR0_RGN_WT (2 << 3) | 386 | #define TTBR0_RGN_WT (2 << 3) |
387 | #define TTBR0_RGN_WB (3 << 3) | 387 | #define TTBR0_RGN_WB (3 << 3) |
388 | /* TTBR0[6] is IRGN[0] and TTBR[0] is IRGN[1] */ | 388 | /* TTBR0[6] is IRGN[0] and TTBR[0] is IRGN[1] */ |
389 | #define TTBR0_IRGN_NC (0 << 0 | 0 << 6) | 389 | #define TTBR0_IRGN_NC (0 << 0 | 0 << 6) |
390 | #define TTBR0_IRGN_WBWA (0 << 0 | 1 << 6) | 390 | #define TTBR0_IRGN_WBWA (0 << 0 | 1 << 6) |
391 | #define TTBR0_IRGN_WT (1 << 0 | 0 << 6) | 391 | #define TTBR0_IRGN_WT (1 << 0 | 0 << 6) |
392 | #define TTBR0_IRGN_WB (1 << 0 | 1 << 6) | 392 | #define TTBR0_IRGN_WB (1 << 0 | 1 << 6) |
393 | #endif | 393 | #endif |
394 | 394 | ||
395 | /** | 395 | /** |
396 | * Register an update to the page tables, and flush the TLB | 396 | * Register an update to the page tables, and flush the TLB |
397 | * | 397 | * |
398 | * \param start start address of update in page table | 398 | * \param start start address of update in page table |
399 | * \param stop stop address of update in page table | 399 | * \param stop stop address of update in page table |
400 | */ | 400 | */ |
401 | void mmu_page_table_flush(unsigned long start, unsigned long stop); | 401 | void mmu_page_table_flush(unsigned long start, unsigned long stop); |
402 | 402 | ||
403 | #endif /* __ASSEMBLY__ */ | 403 | #endif /* __ASSEMBLY__ */ |
404 | 404 | ||
405 | #define arch_align_stack(x) (x) | 405 | #define arch_align_stack(x) (x) |
406 | 406 | ||
407 | #endif /* __KERNEL__ */ | 407 | #endif /* __KERNEL__ */ |
408 | 408 | ||
409 | #endif /* CONFIG_ARM64 */ | 409 | #endif /* CONFIG_ARM64 */ |
410 | 410 | ||
411 | #ifndef __ASSEMBLY__ | 411 | #ifndef __ASSEMBLY__ |
412 | /** | 412 | /** |
413 | * Change the cache settings for a region. | 413 | * Change the cache settings for a region. |
414 | * | 414 | * |
415 | * \param start start address of memory region to change | 415 | * \param start start address of memory region to change |
416 | * \param size size of memory region to change | 416 | * \param size size of memory region to change |
417 | * \param option dcache option to select | 417 | * \param option dcache option to select |
418 | */ | 418 | */ |
419 | void mmu_set_region_dcache_behaviour(phys_addr_t start, size_t size, | 419 | void mmu_set_region_dcache_behaviour(phys_addr_t start, size_t size, |
420 | enum dcache_option option); | 420 | enum dcache_option option); |
421 | 421 | ||
422 | #ifdef CONFIG_SYS_NONCACHED_MEMORY | 422 | #ifdef CONFIG_SYS_NONCACHED_MEMORY |
423 | void noncached_init(void); | 423 | void noncached_init(void); |
424 | phys_addr_t noncached_alloc(size_t size, size_t align); | 424 | phys_addr_t noncached_alloc(size_t size, size_t align); |
425 | #endif /* CONFIG_SYS_NONCACHED_MEMORY */ | 425 | #endif /* CONFIG_SYS_NONCACHED_MEMORY */ |
426 | 426 | ||
427 | #endif /* __ASSEMBLY__ */ | 427 | #endif /* __ASSEMBLY__ */ |
428 | 428 | ||
429 | #endif | 429 | #endif |
430 | 430 |
arch/arm/lib/cache-cp15.c
1 | /* | 1 | /* |
2 | * (C) Copyright 2002 | 2 | * (C) Copyright 2002 |
3 | * Wolfgang Denk, DENX Software Engineering, wd@denx.de. | 3 | * Wolfgang Denk, DENX Software Engineering, wd@denx.de. |
4 | * | 4 | * |
5 | * SPDX-License-Identifier: GPL-2.0+ | 5 | * SPDX-License-Identifier: GPL-2.0+ |
6 | */ | 6 | */ |
7 | 7 | ||
8 | #include <common.h> | 8 | #include <common.h> |
9 | #include <asm/system.h> | 9 | #include <asm/system.h> |
10 | #include <asm/cache.h> | 10 | #include <asm/cache.h> |
11 | #include <linux/compiler.h> | 11 | #include <linux/compiler.h> |
12 | 12 | ||
13 | #if !(defined(CONFIG_SYS_ICACHE_OFF) && defined(CONFIG_SYS_DCACHE_OFF)) | 13 | #if !(defined(CONFIG_SYS_ICACHE_OFF) && defined(CONFIG_SYS_DCACHE_OFF)) |
14 | 14 | ||
15 | DECLARE_GLOBAL_DATA_PTR; | 15 | DECLARE_GLOBAL_DATA_PTR; |
16 | 16 | ||
17 | __weak void arm_init_before_mmu(void) | 17 | __weak void arm_init_before_mmu(void) |
18 | { | 18 | { |
19 | } | 19 | } |
20 | 20 | ||
21 | __weak void arm_init_domains(void) | 21 | __weak void arm_init_domains(void) |
22 | { | 22 | { |
23 | } | 23 | } |
24 | 24 | ||
25 | static void cp_delay (void) | 25 | static void cp_delay (void) |
26 | { | 26 | { |
27 | volatile int i; | 27 | volatile int i; |
28 | 28 | ||
29 | /* copro seems to need some delay between reading and writing */ | 29 | /* copro seems to need some delay between reading and writing */ |
30 | for (i = 0; i < 100; i++) | 30 | for (i = 0; i < 100; i++) |
31 | nop(); | 31 | nop(); |
32 | asm volatile("" : : : "memory"); | 32 | asm volatile("" : : : "memory"); |
33 | } | 33 | } |
34 | 34 | ||
35 | void set_section_dcache(int section, enum dcache_option option) | 35 | void set_section_dcache(int section, enum dcache_option option) |
36 | { | 36 | { |
37 | #ifdef CONFIG_ARMV7_LPAE | 37 | #ifdef CONFIG_ARMV7_LPAE |
38 | u64 *page_table = (u64 *)gd->arch.tlb_addr; | 38 | u64 *page_table = (u64 *)gd->arch.tlb_addr; |
39 | /* Need to set the access flag to not fault */ | 39 | /* Need to set the access flag to not fault */ |
40 | u64 value = TTB_SECT_AP | TTB_SECT_AF; | 40 | u64 value = TTB_SECT_AP | TTB_SECT_AF; |
41 | #else | 41 | #else |
42 | u32 *page_table = (u32 *)gd->arch.tlb_addr; | 42 | u32 *page_table = (u32 *)gd->arch.tlb_addr; |
43 | u32 value = TTB_SECT_AP; | 43 | u32 value = TTB_SECT_AP; |
44 | #endif | 44 | #endif |
45 | 45 | ||
46 | /* Add the page offset */ | 46 | /* Add the page offset */ |
47 | value |= ((u32)section << MMU_SECTION_SHIFT); | 47 | value |= ((u32)section << MMU_SECTION_SHIFT); |
48 | 48 | ||
49 | /* Add caching bits */ | 49 | /* Add caching bits */ |
50 | value |= option; | 50 | value |= option; |
51 | 51 | ||
52 | /* Set PTE */ | 52 | /* Set PTE */ |
53 | page_table[section] = value; | 53 | page_table[section] = value; |
54 | } | 54 | } |
55 | 55 | ||
56 | __weak void mmu_page_table_flush(unsigned long start, unsigned long stop) | 56 | __weak void mmu_page_table_flush(unsigned long start, unsigned long stop) |
57 | { | 57 | { |
58 | debug("%s: Warning: not implemented\n", __func__); | 58 | debug("%s: Warning: not implemented\n", __func__); |
59 | } | 59 | } |
60 | 60 | ||
61 | void mmu_set_region_dcache_behaviour(phys_addr_t start, size_t size, | 61 | void mmu_set_region_dcache_behaviour(phys_addr_t start, size_t size, |
62 | enum dcache_option option) | 62 | enum dcache_option option) |
63 | { | 63 | { |
64 | #ifdef CONFIG_ARMV7_LPAE | 64 | #ifdef CONFIG_ARMV7_LPAE |
65 | u64 *page_table = (u64 *)gd->arch.tlb_addr; | 65 | u64 *page_table = (u64 *)gd->arch.tlb_addr; |
66 | #else | 66 | #else |
67 | u32 *page_table = (u32 *)gd->arch.tlb_addr; | 67 | u32 *page_table = (u32 *)gd->arch.tlb_addr; |
68 | #endif | 68 | #endif |
69 | unsigned long startpt, stoppt; | 69 | unsigned long startpt, stoppt; |
70 | unsigned long upto, end; | 70 | unsigned long upto, end; |
71 | 71 | ||
72 | end = ALIGN(start + size, MMU_SECTION_SIZE) >> MMU_SECTION_SHIFT; | 72 | end = ALIGN(start + size, MMU_SECTION_SIZE) >> MMU_SECTION_SHIFT; |
73 | start = start >> MMU_SECTION_SHIFT; | 73 | start = start >> MMU_SECTION_SHIFT; |
74 | #ifdef CONFIG_ARMV7_LPAE | ||
75 | debug("%s: start=%pa, size=%zu, option=%llx\n", __func__, &start, size, | ||
76 | option); | ||
77 | #else | ||
74 | debug("%s: start=%pa, size=%zu, option=0x%x\n", __func__, &start, size, | 78 | debug("%s: start=%pa, size=%zu, option=0x%x\n", __func__, &start, size, |
75 | option); | 79 | option); |
80 | #endif | ||
76 | for (upto = start; upto < end; upto++) | 81 | for (upto = start; upto < end; upto++) |
77 | set_section_dcache(upto, option); | 82 | set_section_dcache(upto, option); |
78 | 83 | ||
79 | /* | 84 | /* |
80 | * Make sure range is cache line aligned | 85 | * Make sure range is cache line aligned |
81 | * Only CPU maintains page tables, hence it is safe to always | 86 | * Only CPU maintains page tables, hence it is safe to always |
82 | * flush complete cache lines... | 87 | * flush complete cache lines... |
83 | */ | 88 | */ |
84 | 89 | ||
85 | startpt = (unsigned long)&page_table[start]; | 90 | startpt = (unsigned long)&page_table[start]; |
86 | startpt &= ~(CONFIG_SYS_CACHELINE_SIZE - 1); | 91 | startpt &= ~(CONFIG_SYS_CACHELINE_SIZE - 1); |
87 | stoppt = (unsigned long)&page_table[end]; | 92 | stoppt = (unsigned long)&page_table[end]; |
88 | stoppt = ALIGN(stoppt, CONFIG_SYS_CACHELINE_SIZE); | 93 | stoppt = ALIGN(stoppt, CONFIG_SYS_CACHELINE_SIZE); |
89 | mmu_page_table_flush(startpt, stoppt); | 94 | mmu_page_table_flush(startpt, stoppt); |
90 | } | 95 | } |
91 | 96 | ||
92 | __weak void dram_bank_mmu_setup(int bank) | 97 | __weak void dram_bank_mmu_setup(int bank) |
93 | { | 98 | { |
94 | bd_t *bd = gd->bd; | 99 | bd_t *bd = gd->bd; |
95 | int i; | 100 | int i; |
96 | 101 | ||
97 | debug("%s: bank: %d\n", __func__, bank); | 102 | debug("%s: bank: %d\n", __func__, bank); |
98 | for (i = bd->bi_dram[bank].start >> MMU_SECTION_SHIFT; | 103 | for (i = bd->bi_dram[bank].start >> MMU_SECTION_SHIFT; |
99 | i < (bd->bi_dram[bank].start >> MMU_SECTION_SHIFT) + | 104 | i < (bd->bi_dram[bank].start >> MMU_SECTION_SHIFT) + |
100 | (bd->bi_dram[bank].size >> MMU_SECTION_SHIFT); | 105 | (bd->bi_dram[bank].size >> MMU_SECTION_SHIFT); |
101 | i++) { | 106 | i++) { |
102 | #if defined(CONFIG_SYS_ARM_CACHE_WRITETHROUGH) | 107 | #if defined(CONFIG_SYS_ARM_CACHE_WRITETHROUGH) |
103 | set_section_dcache(i, DCACHE_WRITETHROUGH); | 108 | set_section_dcache(i, DCACHE_WRITETHROUGH); |
104 | #elif defined(CONFIG_SYS_ARM_CACHE_WRITEALLOC) | 109 | #elif defined(CONFIG_SYS_ARM_CACHE_WRITEALLOC) |
105 | set_section_dcache(i, DCACHE_WRITEALLOC); | 110 | set_section_dcache(i, DCACHE_WRITEALLOC); |
106 | #else | 111 | #else |
107 | set_section_dcache(i, DCACHE_WRITEBACK); | 112 | set_section_dcache(i, DCACHE_WRITEBACK); |
108 | #endif | 113 | #endif |
109 | } | 114 | } |
110 | } | 115 | } |
111 | 116 | ||
112 | /* to activate the MMU we need to set up virtual memory: use 1M areas */ | 117 | /* to activate the MMU we need to set up virtual memory: use 1M areas */ |
113 | static inline void mmu_setup(void) | 118 | static inline void mmu_setup(void) |
114 | { | 119 | { |
115 | int i; | 120 | int i; |
116 | u32 reg; | 121 | u32 reg; |
117 | 122 | ||
118 | arm_init_before_mmu(); | 123 | arm_init_before_mmu(); |
119 | /* Set up an identity-mapping for all 4GB, rw for everyone */ | 124 | /* Set up an identity-mapping for all 4GB, rw for everyone */ |
120 | for (i = 0; i < ((4096ULL * 1024 * 1024) >> MMU_SECTION_SHIFT); i++) | 125 | for (i = 0; i < ((4096ULL * 1024 * 1024) >> MMU_SECTION_SHIFT); i++) |
121 | set_section_dcache(i, DCACHE_OFF); | 126 | set_section_dcache(i, DCACHE_OFF); |
122 | 127 | ||
123 | for (i = 0; i < CONFIG_NR_DRAM_BANKS; i++) { | 128 | for (i = 0; i < CONFIG_NR_DRAM_BANKS; i++) { |
124 | dram_bank_mmu_setup(i); | 129 | dram_bank_mmu_setup(i); |
125 | } | 130 | } |
126 | 131 | ||
127 | #ifdef CONFIG_ARMV7_LPAE | 132 | #ifdef CONFIG_ARMV7_LPAE |
128 | /* Set up 4 PTE entries pointing to our 4 1GB page tables */ | 133 | /* Set up 4 PTE entries pointing to our 4 1GB page tables */ |
129 | for (i = 0; i < 4; i++) { | 134 | for (i = 0; i < 4; i++) { |
130 | u64 *page_table = (u64 *)(gd->arch.tlb_addr + (4096 * 4)); | 135 | u64 *page_table = (u64 *)(gd->arch.tlb_addr + (4096 * 4)); |
131 | u64 tpt = gd->arch.tlb_addr + (4096 * i); | 136 | u64 tpt = gd->arch.tlb_addr + (4096 * i); |
132 | page_table[i] = tpt | TTB_PAGETABLE; | 137 | page_table[i] = tpt | TTB_PAGETABLE; |
133 | } | 138 | } |
134 | 139 | ||
135 | reg = TTBCR_EAE; | 140 | reg = TTBCR_EAE; |
136 | #if defined(CONFIG_SYS_ARM_CACHE_WRITETHROUGH) | 141 | #if defined(CONFIG_SYS_ARM_CACHE_WRITETHROUGH) |
137 | reg |= TTBCR_ORGN0_WT | TTBCR_IRGN0_WT; | 142 | reg |= TTBCR_ORGN0_WT | TTBCR_IRGN0_WT; |
138 | #elif defined(CONFIG_SYS_ARM_CACHE_WRITEALLOC) | 143 | #elif defined(CONFIG_SYS_ARM_CACHE_WRITEALLOC) |
139 | reg |= TTBCR_ORGN0_WBWA | TTBCR_IRGN0_WBWA; | 144 | reg |= TTBCR_ORGN0_WBWA | TTBCR_IRGN0_WBWA; |
140 | #else | 145 | #else |
141 | reg |= TTBCR_ORGN0_WBNWA | TTBCR_IRGN0_WBNWA; | 146 | reg |= TTBCR_ORGN0_WBNWA | TTBCR_IRGN0_WBNWA; |
142 | #endif | 147 | #endif |
143 | 148 | ||
144 | if (is_hyp()) { | 149 | if (is_hyp()) { |
145 | /* Set HCTR to enable LPAE */ | 150 | /* Set HCTR to enable LPAE */ |
146 | asm volatile("mcr p15, 4, %0, c2, c0, 2" | 151 | asm volatile("mcr p15, 4, %0, c2, c0, 2" |
147 | : : "r" (reg) : "memory"); | 152 | : : "r" (reg) : "memory"); |
148 | /* Set HTTBR0 */ | 153 | /* Set HTTBR0 */ |
149 | asm volatile("mcrr p15, 4, %0, %1, c2" | 154 | asm volatile("mcrr p15, 4, %0, %1, c2" |
150 | : | 155 | : |
151 | : "r"(gd->arch.tlb_addr + (4096 * 4)), "r"(0) | 156 | : "r"(gd->arch.tlb_addr + (4096 * 4)), "r"(0) |
152 | : "memory"); | 157 | : "memory"); |
153 | /* Set HMAIR */ | 158 | /* Set HMAIR */ |
154 | asm volatile("mcr p15, 4, %0, c10, c2, 0" | 159 | asm volatile("mcr p15, 4, %0, c10, c2, 0" |
155 | : : "r" (MEMORY_ATTRIBUTES) : "memory"); | 160 | : : "r" (MEMORY_ATTRIBUTES) : "memory"); |
156 | } else { | 161 | } else { |
157 | /* Set TTBCR to enable LPAE */ | 162 | /* Set TTBCR to enable LPAE */ |
158 | asm volatile("mcr p15, 0, %0, c2, c0, 2" | 163 | asm volatile("mcr p15, 0, %0, c2, c0, 2" |
159 | : : "r" (reg) : "memory"); | 164 | : : "r" (reg) : "memory"); |
160 | /* Set 64-bit TTBR0 */ | 165 | /* Set 64-bit TTBR0 */ |
161 | asm volatile("mcrr p15, 0, %0, %1, c2" | 166 | asm volatile("mcrr p15, 0, %0, %1, c2" |
162 | : | 167 | : |
163 | : "r"(gd->arch.tlb_addr + (4096 * 4)), "r"(0) | 168 | : "r"(gd->arch.tlb_addr + (4096 * 4)), "r"(0) |
164 | : "memory"); | 169 | : "memory"); |
165 | /* Set MAIR */ | 170 | /* Set MAIR */ |
166 | asm volatile("mcr p15, 0, %0, c10, c2, 0" | 171 | asm volatile("mcr p15, 0, %0, c10, c2, 0" |
167 | : : "r" (MEMORY_ATTRIBUTES) : "memory"); | 172 | : : "r" (MEMORY_ATTRIBUTES) : "memory"); |
168 | } | 173 | } |
169 | #elif defined(CONFIG_CPU_V7) | 174 | #elif defined(CONFIG_CPU_V7) |
170 | /* Set TTBR0 */ | 175 | /* Set TTBR0 */ |
171 | reg = gd->arch.tlb_addr & TTBR0_BASE_ADDR_MASK; | 176 | reg = gd->arch.tlb_addr & TTBR0_BASE_ADDR_MASK; |
172 | #if defined(CONFIG_SYS_ARM_CACHE_WRITETHROUGH) | 177 | #if defined(CONFIG_SYS_ARM_CACHE_WRITETHROUGH) |
173 | reg |= TTBR0_RGN_WT | TTBR0_IRGN_WT; | 178 | reg |= TTBR0_RGN_WT | TTBR0_IRGN_WT; |
174 | #elif defined(CONFIG_SYS_ARM_CACHE_WRITEALLOC) | 179 | #elif defined(CONFIG_SYS_ARM_CACHE_WRITEALLOC) |
175 | reg |= TTBR0_RGN_WBWA | TTBR0_IRGN_WBWA; | 180 | reg |= TTBR0_RGN_WBWA | TTBR0_IRGN_WBWA; |
176 | #else | 181 | #else |
177 | reg |= TTBR0_RGN_WB | TTBR0_IRGN_WB; | 182 | reg |= TTBR0_RGN_WB | TTBR0_IRGN_WB; |
178 | #endif | 183 | #endif |
179 | asm volatile("mcr p15, 0, %0, c2, c0, 0" | 184 | asm volatile("mcr p15, 0, %0, c2, c0, 0" |
180 | : : "r" (reg) : "memory"); | 185 | : : "r" (reg) : "memory"); |
181 | #else | 186 | #else |
182 | /* Copy the page table address to cp15 */ | 187 | /* Copy the page table address to cp15 */ |
183 | asm volatile("mcr p15, 0, %0, c2, c0, 0" | 188 | asm volatile("mcr p15, 0, %0, c2, c0, 0" |
184 | : : "r" (gd->arch.tlb_addr) : "memory"); | 189 | : : "r" (gd->arch.tlb_addr) : "memory"); |
185 | #endif | 190 | #endif |
186 | /* Set the access control to all-supervisor */ | 191 | /* Set the access control to all-supervisor */ |
187 | asm volatile("mcr p15, 0, %0, c3, c0, 0" | 192 | asm volatile("mcr p15, 0, %0, c3, c0, 0" |
188 | : : "r" (~0)); | 193 | : : "r" (~0)); |
189 | 194 | ||
190 | arm_init_domains(); | 195 | arm_init_domains(); |
191 | 196 | ||
192 | /* and enable the mmu */ | 197 | /* and enable the mmu */ |
193 | reg = get_cr(); /* get control reg. */ | 198 | reg = get_cr(); /* get control reg. */ |
194 | cp_delay(); | 199 | cp_delay(); |
195 | set_cr(reg | CR_M); | 200 | set_cr(reg | CR_M); |
196 | } | 201 | } |
197 | 202 | ||
198 | static int mmu_enabled(void) | 203 | static int mmu_enabled(void) |
199 | { | 204 | { |
200 | return get_cr() & CR_M; | 205 | return get_cr() & CR_M; |
201 | } | 206 | } |
202 | 207 | ||
203 | /* cache_bit must be either CR_I or CR_C */ | 208 | /* cache_bit must be either CR_I or CR_C */ |
204 | static void cache_enable(uint32_t cache_bit) | 209 | static void cache_enable(uint32_t cache_bit) |
205 | { | 210 | { |
206 | uint32_t reg; | 211 | uint32_t reg; |
207 | 212 | ||
208 | /* The data cache is not active unless the mmu is enabled too */ | 213 | /* The data cache is not active unless the mmu is enabled too */ |
209 | if ((cache_bit == CR_C) && !mmu_enabled()) | 214 | if ((cache_bit == CR_C) && !mmu_enabled()) |
210 | mmu_setup(); | 215 | mmu_setup(); |
211 | reg = get_cr(); /* get control reg. */ | 216 | reg = get_cr(); /* get control reg. */ |
212 | cp_delay(); | 217 | cp_delay(); |
213 | set_cr(reg | cache_bit); | 218 | set_cr(reg | cache_bit); |
214 | } | 219 | } |
215 | 220 | ||
216 | /* cache_bit must be either CR_I or CR_C */ | 221 | /* cache_bit must be either CR_I or CR_C */ |
217 | static void cache_disable(uint32_t cache_bit) | 222 | static void cache_disable(uint32_t cache_bit) |
218 | { | 223 | { |
219 | uint32_t reg; | 224 | uint32_t reg; |
220 | 225 | ||
221 | reg = get_cr(); | 226 | reg = get_cr(); |
222 | cp_delay(); | 227 | cp_delay(); |
223 | 228 | ||
224 | if (cache_bit == CR_C) { | 229 | if (cache_bit == CR_C) { |
225 | /* if cache isn;t enabled no need to disable */ | 230 | /* if cache isn;t enabled no need to disable */ |
226 | if ((reg & CR_C) != CR_C) | 231 | if ((reg & CR_C) != CR_C) |
227 | return; | 232 | return; |
228 | /* if disabling data cache, disable mmu too */ | 233 | /* if disabling data cache, disable mmu too */ |
229 | cache_bit |= CR_M; | 234 | cache_bit |= CR_M; |
230 | } | 235 | } |
231 | reg = get_cr(); | 236 | reg = get_cr(); |
232 | cp_delay(); | 237 | cp_delay(); |
233 | if (cache_bit == (CR_C | CR_M)) | 238 | if (cache_bit == (CR_C | CR_M)) |
234 | flush_dcache_all(); | 239 | flush_dcache_all(); |
235 | set_cr(reg & ~cache_bit); | 240 | set_cr(reg & ~cache_bit); |
236 | } | 241 | } |
237 | #endif | 242 | #endif |
238 | 243 | ||
239 | #ifdef CONFIG_SYS_ICACHE_OFF | 244 | #ifdef CONFIG_SYS_ICACHE_OFF |
240 | void icache_enable (void) | 245 | void icache_enable (void) |
241 | { | 246 | { |
242 | return; | 247 | return; |
243 | } | 248 | } |
244 | 249 | ||
245 | void icache_disable (void) | 250 | void icache_disable (void) |
246 | { | 251 | { |
247 | return; | 252 | return; |
248 | } | 253 | } |
249 | 254 | ||
250 | int icache_status (void) | 255 | int icache_status (void) |
251 | { | 256 | { |
252 | return 0; /* always off */ | 257 | return 0; /* always off */ |
253 | } | 258 | } |
254 | #else | 259 | #else |
255 | void icache_enable(void) | 260 | void icache_enable(void) |
256 | { | 261 | { |
257 | cache_enable(CR_I); | 262 | cache_enable(CR_I); |
258 | } | 263 | } |
259 | 264 | ||
260 | void icache_disable(void) | 265 | void icache_disable(void) |
261 | { | 266 | { |
262 | cache_disable(CR_I); | 267 | cache_disable(CR_I); |
263 | } | 268 | } |
264 | 269 | ||
265 | int icache_status(void) | 270 | int icache_status(void) |
266 | { | 271 | { |
267 | return (get_cr() & CR_I) != 0; | 272 | return (get_cr() & CR_I) != 0; |
268 | } | 273 | } |
269 | #endif | 274 | #endif |
270 | 275 | ||
271 | #ifdef CONFIG_SYS_DCACHE_OFF | 276 | #ifdef CONFIG_SYS_DCACHE_OFF |
272 | void dcache_enable (void) | 277 | void dcache_enable (void) |
273 | { | 278 | { |
274 | return; | 279 | return; |
275 | } | 280 | } |
276 | 281 | ||
277 | void dcache_disable (void) | 282 | void dcache_disable (void) |
278 | { | 283 | { |
279 | return; | 284 | return; |
280 | } | 285 | } |
281 | 286 | ||
282 | int dcache_status (void) | 287 | int dcache_status (void) |
283 | { | 288 | { |
284 | return 0; /* always off */ | 289 | return 0; /* always off */ |
285 | } | 290 | } |
286 | #else | 291 | #else |
287 | void dcache_enable(void) | 292 | void dcache_enable(void) |
288 | { | 293 | { |
289 | cache_enable(CR_C); | 294 | cache_enable(CR_C); |
290 | } | 295 | } |
291 | 296 | ||
292 | void dcache_disable(void) | 297 | void dcache_disable(void) |
293 | { | 298 | { |
294 | cache_disable(CR_C); | 299 | cache_disable(CR_C); |
295 | } | 300 | } |
296 | 301 | ||
297 | int dcache_status(void) | 302 | int dcache_status(void) |
298 | { | 303 | { |
299 | return (get_cr() & CR_C) != 0; | 304 | return (get_cr() & CR_C) != 0; |
300 | } | 305 | } |
301 | #endif | 306 | #endif |
302 | 307 |