Commit 3a592a1349ac3961b0f4f2db0a8d9f128225d897
1 parent
b8e599746c
Exists in
v2017.01-smarct4x
and in
27 other branches
Revert "armv8: Enable CPUECTLR.SMPEN for coherency"
Upon further review this breaks most other platforms as we need to check what core we're running on before touching it at all. This reverts commit d73718f3236c520a92efa401084c658e6cc067f3. Signed-off-by: Tom Rini <trini@konsulko.com>
Showing 1 changed file with 0 additions and 8 deletions Inline Diff
arch/arm/cpu/armv8/start.S
1 | /* | 1 | /* |
2 | * (C) Copyright 2013 | 2 | * (C) Copyright 2013 |
3 | * David Feng <fenghua@phytium.com.cn> | 3 | * David Feng <fenghua@phytium.com.cn> |
4 | * | 4 | * |
5 | * SPDX-License-Identifier: GPL-2.0+ | 5 | * SPDX-License-Identifier: GPL-2.0+ |
6 | */ | 6 | */ |
7 | 7 | ||
8 | #include <asm-offsets.h> | 8 | #include <asm-offsets.h> |
9 | #include <config.h> | 9 | #include <config.h> |
10 | #include <linux/linkage.h> | 10 | #include <linux/linkage.h> |
11 | #include <asm/macro.h> | 11 | #include <asm/macro.h> |
12 | #include <asm/armv8/mmu.h> | 12 | #include <asm/armv8/mmu.h> |
13 | 13 | ||
14 | /************************************************************************* | 14 | /************************************************************************* |
15 | * | 15 | * |
16 | * Startup Code (reset vector) | 16 | * Startup Code (reset vector) |
17 | * | 17 | * |
18 | *************************************************************************/ | 18 | *************************************************************************/ |
19 | 19 | ||
20 | .globl _start | 20 | .globl _start |
21 | _start: | 21 | _start: |
22 | b reset | 22 | b reset |
23 | 23 | ||
24 | #ifdef CONFIG_ENABLE_ARM_SOC_BOOT0_HOOK | 24 | #ifdef CONFIG_ENABLE_ARM_SOC_BOOT0_HOOK |
25 | /* | 25 | /* |
26 | * Various SoCs need something special and SoC-specific up front in | 26 | * Various SoCs need something special and SoC-specific up front in |
27 | * order to boot, allow them to set that in their boot0.h file and then | 27 | * order to boot, allow them to set that in their boot0.h file and then |
28 | * use it here. | 28 | * use it here. |
29 | */ | 29 | */ |
30 | #include <asm/arch/boot0.h> | 30 | #include <asm/arch/boot0.h> |
31 | ARM_SOC_BOOT0_HOOK | 31 | ARM_SOC_BOOT0_HOOK |
32 | #endif | 32 | #endif |
33 | 33 | ||
34 | .align 3 | 34 | .align 3 |
35 | 35 | ||
36 | .globl _TEXT_BASE | 36 | .globl _TEXT_BASE |
37 | _TEXT_BASE: | 37 | _TEXT_BASE: |
38 | .quad CONFIG_SYS_TEXT_BASE | 38 | .quad CONFIG_SYS_TEXT_BASE |
39 | 39 | ||
40 | /* | 40 | /* |
41 | * These are defined in the linker script. | 41 | * These are defined in the linker script. |
42 | */ | 42 | */ |
43 | .globl _end_ofs | 43 | .globl _end_ofs |
44 | _end_ofs: | 44 | _end_ofs: |
45 | .quad _end - _start | 45 | .quad _end - _start |
46 | 46 | ||
47 | .globl _bss_start_ofs | 47 | .globl _bss_start_ofs |
48 | _bss_start_ofs: | 48 | _bss_start_ofs: |
49 | .quad __bss_start - _start | 49 | .quad __bss_start - _start |
50 | 50 | ||
51 | .globl _bss_end_ofs | 51 | .globl _bss_end_ofs |
52 | _bss_end_ofs: | 52 | _bss_end_ofs: |
53 | .quad __bss_end - _start | 53 | .quad __bss_end - _start |
54 | 54 | ||
55 | reset: | 55 | reset: |
56 | #ifdef CONFIG_SYS_RESET_SCTRL | 56 | #ifdef CONFIG_SYS_RESET_SCTRL |
57 | bl reset_sctrl | 57 | bl reset_sctrl |
58 | #endif | 58 | #endif |
59 | /* | 59 | /* |
60 | * Could be EL3/EL2/EL1, Initial State: | 60 | * Could be EL3/EL2/EL1, Initial State: |
61 | * Little Endian, MMU Disabled, i/dCache Disabled | 61 | * Little Endian, MMU Disabled, i/dCache Disabled |
62 | */ | 62 | */ |
63 | adr x0, vectors | 63 | adr x0, vectors |
64 | switch_el x1, 3f, 2f, 1f | 64 | switch_el x1, 3f, 2f, 1f |
65 | 3: msr vbar_el3, x0 | 65 | 3: msr vbar_el3, x0 |
66 | mrs x0, scr_el3 | 66 | mrs x0, scr_el3 |
67 | orr x0, x0, #0xf /* SCR_EL3.NS|IRQ|FIQ|EA */ | 67 | orr x0, x0, #0xf /* SCR_EL3.NS|IRQ|FIQ|EA */ |
68 | msr scr_el3, x0 | 68 | msr scr_el3, x0 |
69 | msr cptr_el3, xzr /* Enable FP/SIMD */ | 69 | msr cptr_el3, xzr /* Enable FP/SIMD */ |
70 | #ifdef COUNTER_FREQUENCY | 70 | #ifdef COUNTER_FREQUENCY |
71 | ldr x0, =COUNTER_FREQUENCY | 71 | ldr x0, =COUNTER_FREQUENCY |
72 | msr cntfrq_el0, x0 /* Initialize CNTFRQ */ | 72 | msr cntfrq_el0, x0 /* Initialize CNTFRQ */ |
73 | #endif | 73 | #endif |
74 | b 0f | 74 | b 0f |
75 | 2: msr vbar_el2, x0 | 75 | 2: msr vbar_el2, x0 |
76 | mov x0, #0x33ff | 76 | mov x0, #0x33ff |
77 | msr cptr_el2, x0 /* Enable FP/SIMD */ | 77 | msr cptr_el2, x0 /* Enable FP/SIMD */ |
78 | b 0f | 78 | b 0f |
79 | 1: msr vbar_el1, x0 | 79 | 1: msr vbar_el1, x0 |
80 | mov x0, #3 << 20 | 80 | mov x0, #3 << 20 |
81 | msr cpacr_el1, x0 /* Enable FP/SIMD */ | 81 | msr cpacr_el1, x0 /* Enable FP/SIMD */ |
82 | 0: | 82 | 0: |
83 | 83 | ||
84 | /* Enalbe SMPEN bit for coherency. | ||
85 | * This register is not architectural but at the moment | ||
86 | * this bit should be set for A53/A57/A72. | ||
87 | */ | ||
88 | mrs x0, S3_1_c15_c2_1 /* cpuactlr_el1 */ | ||
89 | orr x0, x0, #0x40 | ||
90 | msr S3_1_c15_c2_1, x0 | ||
91 | |||
92 | /* Apply ARM core specific erratas */ | 84 | /* Apply ARM core specific erratas */ |
93 | bl apply_core_errata | 85 | bl apply_core_errata |
94 | 86 | ||
95 | /* | 87 | /* |
96 | * Cache/BPB/TLB Invalidate | 88 | * Cache/BPB/TLB Invalidate |
97 | * i-cache is invalidated before enabled in icache_enable() | 89 | * i-cache is invalidated before enabled in icache_enable() |
98 | * tlb is invalidated before mmu is enabled in dcache_enable() | 90 | * tlb is invalidated before mmu is enabled in dcache_enable() |
99 | * d-cache is invalidated before enabled in dcache_enable() | 91 | * d-cache is invalidated before enabled in dcache_enable() |
100 | */ | 92 | */ |
101 | 93 | ||
102 | /* Processor specific initialization */ | 94 | /* Processor specific initialization */ |
103 | bl lowlevel_init | 95 | bl lowlevel_init |
104 | 96 | ||
105 | #ifdef CONFIG_ARMV8_MULTIENTRY | 97 | #ifdef CONFIG_ARMV8_MULTIENTRY |
106 | branch_if_master x0, x1, master_cpu | 98 | branch_if_master x0, x1, master_cpu |
107 | 99 | ||
108 | /* | 100 | /* |
109 | * Slave CPUs | 101 | * Slave CPUs |
110 | */ | 102 | */ |
111 | slave_cpu: | 103 | slave_cpu: |
112 | wfe | 104 | wfe |
113 | ldr x1, =CPU_RELEASE_ADDR | 105 | ldr x1, =CPU_RELEASE_ADDR |
114 | ldr x0, [x1] | 106 | ldr x0, [x1] |
115 | cbz x0, slave_cpu | 107 | cbz x0, slave_cpu |
116 | br x0 /* branch to the given address */ | 108 | br x0 /* branch to the given address */ |
117 | master_cpu: | 109 | master_cpu: |
118 | /* On the master CPU */ | 110 | /* On the master CPU */ |
119 | #endif /* CONFIG_ARMV8_MULTIENTRY */ | 111 | #endif /* CONFIG_ARMV8_MULTIENTRY */ |
120 | 112 | ||
121 | bl _main | 113 | bl _main |
122 | 114 | ||
123 | #ifdef CONFIG_SYS_RESET_SCTRL | 115 | #ifdef CONFIG_SYS_RESET_SCTRL |
124 | reset_sctrl: | 116 | reset_sctrl: |
125 | switch_el x1, 3f, 2f, 1f | 117 | switch_el x1, 3f, 2f, 1f |
126 | 3: | 118 | 3: |
127 | mrs x0, sctlr_el3 | 119 | mrs x0, sctlr_el3 |
128 | b 0f | 120 | b 0f |
129 | 2: | 121 | 2: |
130 | mrs x0, sctlr_el2 | 122 | mrs x0, sctlr_el2 |
131 | b 0f | 123 | b 0f |
132 | 1: | 124 | 1: |
133 | mrs x0, sctlr_el1 | 125 | mrs x0, sctlr_el1 |
134 | 126 | ||
135 | 0: | 127 | 0: |
136 | ldr x1, =0xfdfffffa | 128 | ldr x1, =0xfdfffffa |
137 | and x0, x0, x1 | 129 | and x0, x0, x1 |
138 | 130 | ||
139 | switch_el x1, 6f, 5f, 4f | 131 | switch_el x1, 6f, 5f, 4f |
140 | 6: | 132 | 6: |
141 | msr sctlr_el3, x0 | 133 | msr sctlr_el3, x0 |
142 | b 7f | 134 | b 7f |
143 | 5: | 135 | 5: |
144 | msr sctlr_el2, x0 | 136 | msr sctlr_el2, x0 |
145 | b 7f | 137 | b 7f |
146 | 4: | 138 | 4: |
147 | msr sctlr_el1, x0 | 139 | msr sctlr_el1, x0 |
148 | 140 | ||
149 | 7: | 141 | 7: |
150 | dsb sy | 142 | dsb sy |
151 | isb | 143 | isb |
152 | b __asm_invalidate_tlb_all | 144 | b __asm_invalidate_tlb_all |
153 | ret | 145 | ret |
154 | #endif | 146 | #endif |
155 | 147 | ||
156 | /*-----------------------------------------------------------------------*/ | 148 | /*-----------------------------------------------------------------------*/ |
157 | 149 | ||
158 | WEAK(apply_core_errata) | 150 | WEAK(apply_core_errata) |
159 | 151 | ||
160 | mov x29, lr /* Save LR */ | 152 | mov x29, lr /* Save LR */ |
161 | /* For now, we support Cortex-A57 specific errata only */ | 153 | /* For now, we support Cortex-A57 specific errata only */ |
162 | 154 | ||
163 | /* Check if we are running on a Cortex-A57 core */ | 155 | /* Check if we are running on a Cortex-A57 core */ |
164 | branch_if_a57_core x0, apply_a57_core_errata | 156 | branch_if_a57_core x0, apply_a57_core_errata |
165 | 0: | 157 | 0: |
166 | mov lr, x29 /* Restore LR */ | 158 | mov lr, x29 /* Restore LR */ |
167 | ret | 159 | ret |
168 | 160 | ||
169 | apply_a57_core_errata: | 161 | apply_a57_core_errata: |
170 | 162 | ||
171 | #ifdef CONFIG_ARM_ERRATA_828024 | 163 | #ifdef CONFIG_ARM_ERRATA_828024 |
172 | mrs x0, S3_1_c15_c2_0 /* cpuactlr_el1 */ | 164 | mrs x0, S3_1_c15_c2_0 /* cpuactlr_el1 */ |
173 | /* Disable non-allocate hint of w-b-n-a memory type */ | 165 | /* Disable non-allocate hint of w-b-n-a memory type */ |
174 | orr x0, x0, #1 << 49 | 166 | orr x0, x0, #1 << 49 |
175 | /* Disable write streaming no L1-allocate threshold */ | 167 | /* Disable write streaming no L1-allocate threshold */ |
176 | orr x0, x0, #3 << 25 | 168 | orr x0, x0, #3 << 25 |
177 | /* Disable write streaming no-allocate threshold */ | 169 | /* Disable write streaming no-allocate threshold */ |
178 | orr x0, x0, #3 << 27 | 170 | orr x0, x0, #3 << 27 |
179 | msr S3_1_c15_c2_0, x0 /* cpuactlr_el1 */ | 171 | msr S3_1_c15_c2_0, x0 /* cpuactlr_el1 */ |
180 | #endif | 172 | #endif |
181 | 173 | ||
182 | #ifdef CONFIG_ARM_ERRATA_826974 | 174 | #ifdef CONFIG_ARM_ERRATA_826974 |
183 | mrs x0, S3_1_c15_c2_0 /* cpuactlr_el1 */ | 175 | mrs x0, S3_1_c15_c2_0 /* cpuactlr_el1 */ |
184 | /* Disable speculative load execution ahead of a DMB */ | 176 | /* Disable speculative load execution ahead of a DMB */ |
185 | orr x0, x0, #1 << 59 | 177 | orr x0, x0, #1 << 59 |
186 | msr S3_1_c15_c2_0, x0 /* cpuactlr_el1 */ | 178 | msr S3_1_c15_c2_0, x0 /* cpuactlr_el1 */ |
187 | #endif | 179 | #endif |
188 | 180 | ||
189 | #ifdef CONFIG_ARM_ERRATA_833471 | 181 | #ifdef CONFIG_ARM_ERRATA_833471 |
190 | mrs x0, S3_1_c15_c2_0 /* cpuactlr_el1 */ | 182 | mrs x0, S3_1_c15_c2_0 /* cpuactlr_el1 */ |
191 | /* FPSCR write flush. | 183 | /* FPSCR write flush. |
192 | * Note that in some cases where a flush is unnecessary this | 184 | * Note that in some cases where a flush is unnecessary this |
193 | could impact performance. */ | 185 | could impact performance. */ |
194 | orr x0, x0, #1 << 38 | 186 | orr x0, x0, #1 << 38 |
195 | msr S3_1_c15_c2_0, x0 /* cpuactlr_el1 */ | 187 | msr S3_1_c15_c2_0, x0 /* cpuactlr_el1 */ |
196 | #endif | 188 | #endif |
197 | 189 | ||
198 | #ifdef CONFIG_ARM_ERRATA_829520 | 190 | #ifdef CONFIG_ARM_ERRATA_829520 |
199 | mrs x0, S3_1_c15_c2_0 /* cpuactlr_el1 */ | 191 | mrs x0, S3_1_c15_c2_0 /* cpuactlr_el1 */ |
200 | /* Disable Indirect Predictor bit will prevent this erratum | 192 | /* Disable Indirect Predictor bit will prevent this erratum |
201 | from occurring | 193 | from occurring |
202 | * Note that in some cases where a flush is unnecessary this | 194 | * Note that in some cases where a flush is unnecessary this |
203 | could impact performance. */ | 195 | could impact performance. */ |
204 | orr x0, x0, #1 << 4 | 196 | orr x0, x0, #1 << 4 |
205 | msr S3_1_c15_c2_0, x0 /* cpuactlr_el1 */ | 197 | msr S3_1_c15_c2_0, x0 /* cpuactlr_el1 */ |
206 | #endif | 198 | #endif |
207 | 199 | ||
208 | #ifdef CONFIG_ARM_ERRATA_833069 | 200 | #ifdef CONFIG_ARM_ERRATA_833069 |
209 | mrs x0, S3_1_c15_c2_0 /* cpuactlr_el1 */ | 201 | mrs x0, S3_1_c15_c2_0 /* cpuactlr_el1 */ |
210 | /* Disable Enable Invalidates of BTB bit */ | 202 | /* Disable Enable Invalidates of BTB bit */ |
211 | and x0, x0, #0xE | 203 | and x0, x0, #0xE |
212 | msr S3_1_c15_c2_0, x0 /* cpuactlr_el1 */ | 204 | msr S3_1_c15_c2_0, x0 /* cpuactlr_el1 */ |
213 | #endif | 205 | #endif |
214 | b 0b | 206 | b 0b |
215 | ENDPROC(apply_core_errata) | 207 | ENDPROC(apply_core_errata) |
216 | 208 | ||
217 | /*-----------------------------------------------------------------------*/ | 209 | /*-----------------------------------------------------------------------*/ |
218 | 210 | ||
219 | WEAK(lowlevel_init) | 211 | WEAK(lowlevel_init) |
220 | mov x29, lr /* Save LR */ | 212 | mov x29, lr /* Save LR */ |
221 | 213 | ||
222 | #if defined(CONFIG_GICV2) || defined(CONFIG_GICV3) | 214 | #if defined(CONFIG_GICV2) || defined(CONFIG_GICV3) |
223 | branch_if_slave x0, 1f | 215 | branch_if_slave x0, 1f |
224 | ldr x0, =GICD_BASE | 216 | ldr x0, =GICD_BASE |
225 | bl gic_init_secure | 217 | bl gic_init_secure |
226 | 1: | 218 | 1: |
227 | #if defined(CONFIG_GICV3) | 219 | #if defined(CONFIG_GICV3) |
228 | ldr x0, =GICR_BASE | 220 | ldr x0, =GICR_BASE |
229 | bl gic_init_secure_percpu | 221 | bl gic_init_secure_percpu |
230 | #elif defined(CONFIG_GICV2) | 222 | #elif defined(CONFIG_GICV2) |
231 | ldr x0, =GICD_BASE | 223 | ldr x0, =GICD_BASE |
232 | ldr x1, =GICC_BASE | 224 | ldr x1, =GICC_BASE |
233 | bl gic_init_secure_percpu | 225 | bl gic_init_secure_percpu |
234 | #endif | 226 | #endif |
235 | #endif | 227 | #endif |
236 | 228 | ||
237 | #ifdef CONFIG_ARMV8_MULTIENTRY | 229 | #ifdef CONFIG_ARMV8_MULTIENTRY |
238 | branch_if_master x0, x1, 2f | 230 | branch_if_master x0, x1, 2f |
239 | 231 | ||
240 | /* | 232 | /* |
241 | * Slave should wait for master clearing spin table. | 233 | * Slave should wait for master clearing spin table. |
242 | * This sync prevent salves observing incorrect | 234 | * This sync prevent salves observing incorrect |
243 | * value of spin table and jumping to wrong place. | 235 | * value of spin table and jumping to wrong place. |
244 | */ | 236 | */ |
245 | #if defined(CONFIG_GICV2) || defined(CONFIG_GICV3) | 237 | #if defined(CONFIG_GICV2) || defined(CONFIG_GICV3) |
246 | #ifdef CONFIG_GICV2 | 238 | #ifdef CONFIG_GICV2 |
247 | ldr x0, =GICC_BASE | 239 | ldr x0, =GICC_BASE |
248 | #endif | 240 | #endif |
249 | bl gic_wait_for_interrupt | 241 | bl gic_wait_for_interrupt |
250 | #endif | 242 | #endif |
251 | 243 | ||
252 | /* | 244 | /* |
253 | * All slaves will enter EL2 and optionally EL1. | 245 | * All slaves will enter EL2 and optionally EL1. |
254 | */ | 246 | */ |
255 | bl armv8_switch_to_el2 | 247 | bl armv8_switch_to_el2 |
256 | #ifdef CONFIG_ARMV8_SWITCH_TO_EL1 | 248 | #ifdef CONFIG_ARMV8_SWITCH_TO_EL1 |
257 | bl armv8_switch_to_el1 | 249 | bl armv8_switch_to_el1 |
258 | #endif | 250 | #endif |
259 | 251 | ||
260 | #endif /* CONFIG_ARMV8_MULTIENTRY */ | 252 | #endif /* CONFIG_ARMV8_MULTIENTRY */ |
261 | 253 | ||
262 | 2: | 254 | 2: |
263 | mov lr, x29 /* Restore LR */ | 255 | mov lr, x29 /* Restore LR */ |
264 | ret | 256 | ret |
265 | ENDPROC(lowlevel_init) | 257 | ENDPROC(lowlevel_init) |
266 | 258 | ||
267 | WEAK(smp_kick_all_cpus) | 259 | WEAK(smp_kick_all_cpus) |
268 | /* Kick secondary cpus up by SGI 0 interrupt */ | 260 | /* Kick secondary cpus up by SGI 0 interrupt */ |
269 | #if defined(CONFIG_GICV2) || defined(CONFIG_GICV3) | 261 | #if defined(CONFIG_GICV2) || defined(CONFIG_GICV3) |
270 | ldr x0, =GICD_BASE | 262 | ldr x0, =GICD_BASE |
271 | b gic_kick_secondary_cpus | 263 | b gic_kick_secondary_cpus |
272 | #endif | 264 | #endif |
273 | ret | 265 | ret |
274 | ENDPROC(smp_kick_all_cpus) | 266 | ENDPROC(smp_kick_all_cpus) |
275 | 267 | ||
276 | /*-----------------------------------------------------------------------*/ | 268 | /*-----------------------------------------------------------------------*/ |
277 | 269 | ||
278 | ENTRY(c_runtime_cpu_setup) | 270 | ENTRY(c_runtime_cpu_setup) |
279 | /* Relocate vBAR */ | 271 | /* Relocate vBAR */ |
280 | adr x0, vectors | 272 | adr x0, vectors |
281 | switch_el x1, 3f, 2f, 1f | 273 | switch_el x1, 3f, 2f, 1f |
282 | 3: msr vbar_el3, x0 | 274 | 3: msr vbar_el3, x0 |
283 | b 0f | 275 | b 0f |
284 | 2: msr vbar_el2, x0 | 276 | 2: msr vbar_el2, x0 |
285 | b 0f | 277 | b 0f |
286 | 1: msr vbar_el1, x0 | 278 | 1: msr vbar_el1, x0 |
287 | 0: | 279 | 0: |
288 | 280 | ||
289 | ret | 281 | ret |
290 | ENDPROC(c_runtime_cpu_setup) | 282 | ENDPROC(c_runtime_cpu_setup) |
291 | 283 |