Commit ec6617c39741adc6c54952564579e32c3c09c66f

Authored by Alison Wang
Committed by York Sun
1 parent 95e74a3df7

armv8: Support loading 32-bit OS in AArch32 execution state

To support loading a 32-bit OS, the execution state will change from
AArch64 to AArch32 when jumping to kernel.

The architecture information will be got through checking FIT image,
then U-Boot will load 32-bit OS or 64-bit OS automatically.

Signed-off-by: Ebony Zhu <ebony.zhu@nxp.com>
Signed-off-by: Alison Wang <alison.wang@nxp.com>
Signed-off-by: Chenhui Zhao <chenhui.zhao@nxp.com>
Reviewed-by: York Sun <york.sun@nxp.com>

Showing 11 changed files with 416 additions and 71 deletions Side-by-side Diff

... ... @@ -126,6 +126,12 @@
126 126 ARM_SOC_BOOT0_HOOK which contains the required assembler
127 127 preprocessor code.
128 128  
  129 +config ARM64_SUPPORT_AARCH32
  130 + bool "ARM64 system support AArch32 execution state"
  131 + default y if ARM64 && !TARGET_THUNDERX_88XX
  132 + help
  133 + This ARM64 system supports AArch32 execution state.
  134 +
129 135 choice
130 136 prompt "Target select"
131 137 default TARGET_HIKEY
arch/arm/cpu/armv8/fsl-layerscape/lowlevel.S
... ... @@ -17,6 +17,7 @@
17 17 #include <asm/arch-fsl-layerscape/immap_lsch3.h>
18 18 #include <asm/arch-fsl-layerscape/soc.h>
19 19 #endif
  20 +#include <asm/u-boot.h>
20 21  
21 22 ENTRY(lowlevel_init)
22 23 mov x29, lr /* Save LR */
... ... @@ -359,11 +360,6 @@
359 360 gic_wait_for_interrupt_m x0, w1
360 361 #endif
361 362  
362   - bl secondary_switch_to_el2
363   -#ifdef CONFIG_ARMV8_SWITCH_TO_EL1
364   - bl secondary_switch_to_el1
365   -#endif
366   -
367 363 slave_cpu:
368 364 wfe
369 365 ldr x0, [x11]
370 366  
371 367  
372 368  
373 369  
... ... @@ -376,19 +372,64 @@
376 372 tbz x1, #25, cpu_is_le
377 373 rev x0, x0 /* BE to LE conversion */
378 374 cpu_is_le:
379   - br x0 /* branch to the given address */
  375 + ldr x5, [x11, #24]
  376 + ldr x6, =IH_ARCH_DEFAULT
  377 + cmp x6, x5
  378 + b.eq 1f
  379 +
  380 +#ifdef CONFIG_ARMV8_SWITCH_TO_EL1
  381 + adr x3, secondary_switch_to_el1
  382 + ldr x4, =ES_TO_AARCH64
  383 +#else
  384 + ldr x3, [x11]
  385 + ldr x4, =ES_TO_AARCH32
  386 +#endif
  387 + bl secondary_switch_to_el2
  388 +
  389 +1:
  390 +#ifdef CONFIG_ARMV8_SWITCH_TO_EL1
  391 + adr x3, secondary_switch_to_el1
  392 +#else
  393 + ldr x3, [x11]
  394 +#endif
  395 + ldr x4, =ES_TO_AARCH64
  396 + bl secondary_switch_to_el2
  397 +
380 398 ENDPROC(secondary_boot_func)
381 399  
382 400 ENTRY(secondary_switch_to_el2)
383   - switch_el x0, 1f, 0f, 0f
  401 + switch_el x5, 1f, 0f, 0f
384 402 0: ret
385   -1: armv8_switch_to_el2_m x0
  403 +1: armv8_switch_to_el2_m x3, x4, x5
386 404 ENDPROC(secondary_switch_to_el2)
387 405  
388 406 ENTRY(secondary_switch_to_el1)
389   - switch_el x0, 0f, 1f, 0f
  407 + mrs x0, mpidr_el1
  408 + ubfm x1, x0, #8, #15
  409 + ubfm x2, x0, #0, #1
  410 + orr x10, x2, x1, lsl #2 /* x10 has LPID */
  411 +
  412 + lsl x1, x10, #6
  413 + ldr x0, =__spin_table
  414 + /* physical address of this cpus spin table element */
  415 + add x11, x1, x0
  416 +
  417 + ldr x3, [x11]
  418 +
  419 + ldr x5, [x11, #24]
  420 + ldr x6, =IH_ARCH_DEFAULT
  421 + cmp x6, x5
  422 + b.eq 2f
  423 +
  424 + ldr x4, =ES_TO_AARCH32
  425 + bl switch_to_el1
  426 +
  427 +2: ldr x4, =ES_TO_AARCH64
  428 +
  429 +switch_to_el1:
  430 + switch_el x5, 0f, 1f, 0f
390 431 0: ret
391   -1: armv8_switch_to_el1_m x0, x1
  432 +1: armv8_switch_to_el1_m x3, x4, x5
392 433 ENDPROC(secondary_switch_to_el1)
393 434  
394 435 /* Ensure that the literals used by the secondary boot code are
arch/arm/cpu/armv8/start.S
... ... @@ -251,9 +251,17 @@
251 251 /*
252 252 * All slaves will enter EL2 and optionally EL1.
253 253 */
  254 + adr x3, lowlevel_in_el2
  255 + ldr x4, =ES_TO_AARCH64
254 256 bl armv8_switch_to_el2
  257 +
  258 +lowlevel_in_el2:
255 259 #ifdef CONFIG_ARMV8_SWITCH_TO_EL1
  260 + adr x3, lowlevel_in_el1
  261 + ldr x4, =ES_TO_AARCH64
256 262 bl armv8_switch_to_el1
  263 +
  264 +lowlevel_in_el1:
257 265 #endif
258 266  
259 267 #endif /* CONFIG_ARMV8_MULTIENTRY */
arch/arm/cpu/armv8/transition.S
... ... @@ -11,14 +11,25 @@
11 11 #include <asm/macro.h>
12 12  
13 13 ENTRY(armv8_switch_to_el2)
14   - switch_el x0, 1f, 0f, 0f
15   -0: ret
16   -1: armv8_switch_to_el2_m x0
  14 + switch_el x5, 1f, 0f, 0f
  15 +0:
  16 + /*
  17 + * x3 is kernel entry point or switch_to_el1
  18 + * if CONFIG_ARMV8_SWITCH_TO_EL1 is defined.
  19 + * When running in EL2 now, jump to the
  20 + * address saved in x3.
  21 + */
  22 + br x3
  23 +1: armv8_switch_to_el2_m x3, x4, x5
17 24 ENDPROC(armv8_switch_to_el2)
18 25  
19 26 ENTRY(armv8_switch_to_el1)
20   - switch_el x0, 0f, 1f, 0f
21   -0: ret
22   -1: armv8_switch_to_el1_m x0, x1
  27 + switch_el x5, 0f, 1f, 0f
  28 +0:
  29 + /* x3 is kernel entry point. When running in EL1
  30 + * now, jump to the address saved in x3.
  31 + */
  32 + br x3
  33 +1: armv8_switch_to_el1_m x3, x4, x5
23 34 ENDPROC(armv8_switch_to_el1)
arch/arm/include/asm/arch-fsl-layerscape/mp.h
... ... @@ -36,5 +36,9 @@
36 36 int is_core_online(u64 cpu_id);
37 37 u32 cpu_pos_mask(void);
38 38 #endif
  39 +
  40 +#define IH_ARCH_ARM 2 /* ARM */
  41 +#define IH_ARCH_ARM64 22 /* ARM64 */
  42 +
39 43 #endif /* _FSL_LAYERSCAPE_MP_H */
arch/arm/include/asm/macro.h
... ... @@ -8,6 +8,11 @@
8 8  
9 9 #ifndef __ASM_ARM_MACRO_H__
10 10 #define __ASM_ARM_MACRO_H__
  11 +
  12 +#ifdef CONFIG_ARM64
  13 +#include <asm/system.h>
  14 +#endif
  15 +
11 16 #ifdef __ASSEMBLY__
12 17  
13 18 /*
14 19  
... ... @@ -135,13 +140,21 @@
135 140 #endif
136 141 .endm
137 142  
138   -.macro armv8_switch_to_el2_m, xreg1
139   - /* 64bit EL2 | HCE | SMD | RES1 (Bits[5:4]) | Non-secure EL0/EL1 */
140   - mov \xreg1, #0x5b1
141   - msr scr_el3, \xreg1
  143 +/*
  144 + * Switch from EL3 to EL2 for ARMv8
  145 + * @ep: kernel entry point
  146 + * @flag: The execution state flag for lower exception
  147 + * level, ES_TO_AARCH64 or ES_TO_AARCH32
  148 + * @tmp: temporary register
  149 + *
  150 + * For loading 32-bit OS, x1 is machine nr and x2 is ftaddr.
  151 + * For loading 64-bit OS, x0 is physical address to the FDT blob.
  152 + * They will be passed to the guest.
  153 + */
  154 +.macro armv8_switch_to_el2_m, ep, flag, tmp
142 155 msr cptr_el3, xzr /* Disable coprocessor traps to EL3 */
143   - mov \xreg1, #0x33ff
144   - msr cptr_el2, \xreg1 /* Disable coprocessor traps to EL2 */
  156 + mov \tmp, #CPTR_EL2_RES1
  157 + msr cptr_el2, \tmp /* Disable coprocessor traps to EL2 */
145 158  
146 159 /* Initialize Generic Timers */
147 160 msr cntvoff_el2, xzr
148 161  
149 162  
150 163  
151 164  
152 165  
153 166  
154 167  
155 168  
156 169  
... ... @@ -152,46 +165,91 @@
152 165 * and RES0 bits (31,30,27,26,24,21,20,17,15-13,10-6) +
153 166 * EE,WXN,I,SA,C,A,M to 0
154 167 */
155   - mov \xreg1, #0x0830
156   - movk \xreg1, #0x30C5, lsl #16
157   - msr sctlr_el2, \xreg1
  168 + ldr \tmp, =(SCTLR_EL2_RES1 | SCTLR_EL2_EE_LE |\
  169 + SCTLR_EL2_WXN_DIS | SCTLR_EL2_ICACHE_DIS |\
  170 + SCTLR_EL2_SA_DIS | SCTLR_EL2_DCACHE_DIS |\
  171 + SCTLR_EL2_ALIGN_DIS | SCTLR_EL2_MMU_DIS)
  172 + msr sctlr_el2, \tmp
158 173  
  174 + mov \tmp, sp
  175 + msr sp_el2, \tmp /* Migrate SP */
  176 + mrs \tmp, vbar_el3
  177 + msr vbar_el2, \tmp /* Migrate VBAR */
  178 +
  179 + /* Check switch to AArch64 EL2 or AArch32 Hypervisor mode */
  180 + cmp \flag, #ES_TO_AARCH32
  181 + b.eq 1f
  182 +
  183 + /*
  184 + * The next lower exception level is AArch64, 64bit EL2 | HCE |
  185 + * SMD | RES1 (Bits[5:4]) | Non-secure EL0/EL1.
  186 + */
  187 + ldr \tmp, =(SCR_EL3_RW_AARCH64 | SCR_EL3_HCE_EN |\
  188 + SCR_EL3_SMD_DIS | SCR_EL3_RES1 |\
  189 + SCR_EL3_NS_EN)
  190 + msr scr_el3, \tmp
  191 +
159 192 /* Return to the EL2_SP2 mode from EL3 */
160   - mov \xreg1, sp
161   - msr sp_el2, \xreg1 /* Migrate SP */
162   - mrs \xreg1, vbar_el3
163   - msr vbar_el2, \xreg1 /* Migrate VBAR */
164   - mov \xreg1, #0x3c9
165   - msr spsr_el3, \xreg1 /* EL2_SP2 | D | A | I | F */
166   - msr elr_el3, lr
  193 + ldr \tmp, =(SPSR_EL_DEBUG_MASK | SPSR_EL_SERR_MASK |\
  194 + SPSR_EL_IRQ_MASK | SPSR_EL_FIQ_MASK |\
  195 + SPSR_EL_M_AARCH64 | SPSR_EL_M_EL2H)
  196 + msr spsr_el3, \tmp
  197 + msr elr_el3, \ep
167 198 eret
  199 +
  200 +1:
  201 + /*
  202 + * The next lower exception level is AArch32, 32bit EL2 | HCE |
  203 + * SMD | RES1 (Bits[5:4]) | Non-secure EL0/EL1.
  204 + */
  205 + ldr \tmp, =(SCR_EL3_RW_AARCH32 | SCR_EL3_HCE_EN |\
  206 + SCR_EL3_SMD_DIS | SCR_EL3_RES1 |\
  207 + SCR_EL3_NS_EN)
  208 + msr scr_el3, \tmp
  209 +
  210 + /* Return to AArch32 Hypervisor mode */
  211 + ldr \tmp, =(SPSR_EL_END_LE | SPSR_EL_ASYN_MASK |\
  212 + SPSR_EL_IRQ_MASK | SPSR_EL_FIQ_MASK |\
  213 + SPSR_EL_T_A32 | SPSR_EL_M_AARCH32 |\
  214 + SPSR_EL_M_HYP)
  215 + msr spsr_el3, \tmp
  216 + msr elr_el3, \ep
  217 + eret
168 218 .endm
169 219  
170   -.macro armv8_switch_to_el1_m, xreg1, xreg2
  220 +/*
  221 + * Switch from EL2 to EL1 for ARMv8
  222 + * @ep: kernel entry point
  223 + * @flag: The execution state flag for lower exception
  224 + * level, ES_TO_AARCH64 or ES_TO_AARCH32
  225 + * @tmp: temporary register
  226 + *
  227 + * For loading 32-bit OS, x1 is machine nr and x2 is ftaddr.
  228 + * For loading 64-bit OS, x0 is physical address to the FDT blob.
  229 + * They will be passed to the guest.
  230 + */
  231 +.macro armv8_switch_to_el1_m, ep, flag, tmp
171 232 /* Initialize Generic Timers */
172   - mrs \xreg1, cnthctl_el2
173   - orr \xreg1, \xreg1, #0x3 /* Enable EL1 access to timers */
174   - msr cnthctl_el2, \xreg1
  233 + mrs \tmp, cnthctl_el2
  234 + /* Enable EL1 access to timers */
  235 + orr \tmp, \tmp, #(CNTHCTL_EL2_EL1PCEN_EN |\
  236 + CNTHCTL_EL2_EL1PCTEN_EN)
  237 + msr cnthctl_el2, \tmp
175 238 msr cntvoff_el2, xzr
176 239  
177 240 /* Initilize MPID/MPIDR registers */
178   - mrs \xreg1, midr_el1
179   - mrs \xreg2, mpidr_el1
180   - msr vpidr_el2, \xreg1
181   - msr vmpidr_el2, \xreg2
  241 + mrs \tmp, midr_el1
  242 + msr vpidr_el2, \tmp
  243 + mrs \tmp, mpidr_el1
  244 + msr vmpidr_el2, \tmp
182 245  
183 246 /* Disable coprocessor traps */
184   - mov \xreg1, #0x33ff
185   - msr cptr_el2, \xreg1 /* Disable coprocessor traps to EL2 */
  247 + mov \tmp, #CPTR_EL2_RES1
  248 + msr cptr_el2, \tmp /* Disable coprocessor traps to EL2 */
186 249 msr hstr_el2, xzr /* Disable coprocessor traps to EL2 */
187   - mov \xreg1, #3 << 20
188   - msr cpacr_el1, \xreg1 /* Enable FP/SIMD at EL1 */
  250 + mov \tmp, #CPACR_EL1_FPEN_EN
  251 + msr cpacr_el1, \tmp /* Enable FP/SIMD at EL1 */
189 252  
190   - /* Initialize HCR_EL2 */
191   - mov \xreg1, #(1 << 31) /* 64bit EL1 */
192   - orr \xreg1, \xreg1, #(1 << 29) /* Disable HVC */
193   - msr hcr_el2, \xreg1
194   -
195 253 /* SCTLR_EL1 initialization
196 254 *
197 255 * setting RES1 bits (29,28,23,22,20,11) to 1
198 256  
199 257  
... ... @@ -199,18 +257,50 @@
199 257 * UCI,EE,EOE,WXN,nTWE,nTWI,UCT,DZE,I,UMA,SED,ITD,
200 258 * CP15BEN,SA0,SA,C,A,M to 0
201 259 */
202   - mov \xreg1, #0x0800
203   - movk \xreg1, #0x30d0, lsl #16
204   - msr sctlr_el1, \xreg1
  260 + ldr \tmp, =(SCTLR_EL1_RES1 | SCTLR_EL1_UCI_DIS |\
  261 + SCTLR_EL1_EE_LE | SCTLR_EL1_WXN_DIS |\
  262 + SCTLR_EL1_NTWE_DIS | SCTLR_EL1_NTWI_DIS |\
  263 + SCTLR_EL1_UCT_DIS | SCTLR_EL1_DZE_DIS |\
  264 + SCTLR_EL1_ICACHE_DIS | SCTLR_EL1_UMA_DIS |\
  265 + SCTLR_EL1_SED_EN | SCTLR_EL1_ITD_EN |\
  266 + SCTLR_EL1_CP15BEN_DIS | SCTLR_EL1_SA0_DIS |\
  267 + SCTLR_EL1_SA_DIS | SCTLR_EL1_DCACHE_DIS |\
  268 + SCTLR_EL1_ALIGN_DIS | SCTLR_EL1_MMU_DIS)
  269 + msr sctlr_el1, \tmp
205 270  
  271 + mov \tmp, sp
  272 + msr sp_el1, \tmp /* Migrate SP */
  273 + mrs \tmp, vbar_el2
  274 + msr vbar_el1, \tmp /* Migrate VBAR */
  275 +
  276 + /* Check switch to AArch64 EL1 or AArch32 Supervisor mode */
  277 + cmp \flag, #ES_TO_AARCH32
  278 + b.eq 1f
  279 +
  280 + /* Initialize HCR_EL2 */
  281 + ldr \tmp, =(HCR_EL2_RW_AARCH64 | HCR_EL2_HCD_DIS)
  282 + msr hcr_el2, \tmp
  283 +
206 284 /* Return to the EL1_SP1 mode from EL2 */
207   - mov \xreg1, sp
208   - msr sp_el1, \xreg1 /* Migrate SP */
209   - mrs \xreg1, vbar_el2
210   - msr vbar_el1, \xreg1 /* Migrate VBAR */
211   - mov \xreg1, #0x3c5
212   - msr spsr_el2, \xreg1 /* EL1_SP1 | D | A | I | F */
213   - msr elr_el2, lr
  285 + ldr \tmp, =(SPSR_EL_DEBUG_MASK | SPSR_EL_SERR_MASK |\
  286 + SPSR_EL_IRQ_MASK | SPSR_EL_FIQ_MASK |\
  287 + SPSR_EL_M_AARCH64 | SPSR_EL_M_EL1H)
  288 + msr spsr_el2, \tmp
  289 + msr elr_el2, \ep
  290 + eret
  291 +
  292 +1:
  293 + /* Initialize HCR_EL2 */
  294 + ldr \tmp, =(HCR_EL2_RW_AARCH32 | HCR_EL2_HCD_DIS)
  295 + msr hcr_el2, \tmp
  296 +
  297 + /* Return to AArch32 Supervisor mode from EL2 */
  298 + ldr \tmp, =(SPSR_EL_END_LE | SPSR_EL_ASYN_MASK |\
  299 + SPSR_EL_IRQ_MASK | SPSR_EL_FIQ_MASK |\
  300 + SPSR_EL_T_A32 | SPSR_EL_M_AARCH32 |\
  301 + SPSR_EL_M_SVC)
  302 + msr spsr_el2, \tmp
  303 + msr elr_el2, \ep
214 304 eret
215 305 .endm
216 306  
arch/arm/include/asm/system.h
... ... @@ -18,6 +18,95 @@
18 18 #define CR_WXN (1 << 19) /* Write Permision Imply XN */
19 19 #define CR_EE (1 << 25) /* Exception (Big) Endian */
20 20  
  21 +#define ES_TO_AARCH64 1
  22 +#define ES_TO_AARCH32 0
  23 +
  24 +/*
  25 + * SCR_EL3 bits definitions
  26 + */
  27 +#define SCR_EL3_RW_AARCH64 (1 << 10) /* Next lower level is AArch64 */
  28 +#define SCR_EL3_RW_AARCH32 (0 << 10) /* Lower lowers level are AArch32 */
  29 +#define SCR_EL3_HCE_EN (1 << 8) /* Hypervisor Call enable */
  30 +#define SCR_EL3_SMD_DIS (1 << 7) /* Secure Monitor Call disable */
  31 +#define SCR_EL3_RES1 (3 << 4) /* Reserved, RES1 */
  32 +#define SCR_EL3_NS_EN (1 << 0) /* EL0 and EL1 in Non-scure state */
  33 +
  34 +/*
  35 + * SPSR_EL3/SPSR_EL2 bits definitions
  36 + */
  37 +#define SPSR_EL_END_LE (0 << 9) /* Exception Little-endian */
  38 +#define SPSR_EL_DEBUG_MASK (1 << 9) /* Debug exception masked */
  39 +#define SPSR_EL_ASYN_MASK (1 << 8) /* Asynchronous data abort masked */
  40 +#define SPSR_EL_SERR_MASK (1 << 8) /* System Error exception masked */
  41 +#define SPSR_EL_IRQ_MASK (1 << 7) /* IRQ exception masked */
  42 +#define SPSR_EL_FIQ_MASK (1 << 6) /* FIQ exception masked */
  43 +#define SPSR_EL_T_A32 (0 << 5) /* AArch32 instruction set A32 */
  44 +#define SPSR_EL_M_AARCH64 (0 << 4) /* Exception taken from AArch64 */
  45 +#define SPSR_EL_M_AARCH32 (1 << 4) /* Exception taken from AArch32 */
  46 +#define SPSR_EL_M_SVC (0x3) /* Exception taken from SVC mode */
  47 +#define SPSR_EL_M_HYP (0xa) /* Exception taken from HYP mode */
  48 +#define SPSR_EL_M_EL1H (5) /* Exception taken from EL1h mode */
  49 +#define SPSR_EL_M_EL2H (9) /* Exception taken from EL2h mode */
  50 +
  51 +/*
  52 + * CPTR_EL2 bits definitions
  53 + */
  54 +#define CPTR_EL2_RES1 (3 << 12 | 0x3ff) /* Reserved, RES1 */
  55 +
  56 +/*
  57 + * SCTLR_EL2 bits definitions
  58 + */
  59 +#define SCTLR_EL2_RES1 (3 << 28 | 3 << 22 | 1 << 18 | 1 << 16 |\
  60 + 1 << 11 | 3 << 4) /* Reserved, RES1 */
  61 +#define SCTLR_EL2_EE_LE (0 << 25) /* Exception Little-endian */
  62 +#define SCTLR_EL2_WXN_DIS (0 << 19) /* Write permission is not XN */
  63 +#define SCTLR_EL2_ICACHE_DIS (0 << 12) /* Instruction cache disabled */
  64 +#define SCTLR_EL2_SA_DIS (0 << 3) /* Stack Alignment Check disabled */
  65 +#define SCTLR_EL2_DCACHE_DIS (0 << 2) /* Data cache disabled */
  66 +#define SCTLR_EL2_ALIGN_DIS (0 << 1) /* Alignment check disabled */
  67 +#define SCTLR_EL2_MMU_DIS (0) /* MMU disabled */
  68 +
  69 +/*
  70 + * CNTHCTL_EL2 bits definitions
  71 + */
  72 +#define CNTHCTL_EL2_EL1PCEN_EN (1 << 1) /* Physical timer regs accessible */
  73 +#define CNTHCTL_EL2_EL1PCTEN_EN (1 << 0) /* Physical counter accessible */
  74 +
  75 +/*
  76 + * HCR_EL2 bits definitions
  77 + */
  78 +#define HCR_EL2_RW_AARCH64 (1 << 31) /* EL1 is AArch64 */
  79 +#define HCR_EL2_RW_AARCH32 (0 << 31) /* Lower levels are AArch32 */
  80 +#define HCR_EL2_HCD_DIS (1 << 29) /* Hypervisor Call disabled */
  81 +
  82 +/*
  83 + * CPACR_EL1 bits definitions
  84 + */
  85 +#define CPACR_EL1_FPEN_EN (3 << 20) /* SIMD and FP instruction enabled */
  86 +
  87 +/*
  88 + * SCTLR_EL1 bits definitions
  89 + */
  90 +#define SCTLR_EL1_RES1 (3 << 28 | 3 << 22 | 1 << 20 |\
  91 + 1 << 11) /* Reserved, RES1 */
  92 +#define SCTLR_EL1_UCI_DIS (0 << 26) /* Cache instruction disabled */
  93 +#define SCTLR_EL1_EE_LE (0 << 25) /* Exception Little-endian */
  94 +#define SCTLR_EL1_WXN_DIS (0 << 19) /* Write permission is not XN */
  95 +#define SCTLR_EL1_NTWE_DIS (0 << 18) /* WFE instruction disabled */
  96 +#define SCTLR_EL1_NTWI_DIS (0 << 16) /* WFI instruction disabled */
  97 +#define SCTLR_EL1_UCT_DIS (0 << 15) /* CTR_EL0 access disabled */
  98 +#define SCTLR_EL1_DZE_DIS (0 << 14) /* DC ZVA instruction disabled */
  99 +#define SCTLR_EL1_ICACHE_DIS (0 << 12) /* Instruction cache disabled */
  100 +#define SCTLR_EL1_UMA_DIS (0 << 9) /* User Mask Access disabled */
  101 +#define SCTLR_EL1_SED_EN (0 << 8) /* SETEND instruction enabled */
  102 +#define SCTLR_EL1_ITD_EN (0 << 7) /* IT instruction enabled */
  103 +#define SCTLR_EL1_CP15BEN_DIS (0 << 5) /* CP15 barrier operation disabled */
  104 +#define SCTLR_EL1_SA0_DIS (0 << 4) /* Stack Alignment EL0 disabled */
  105 +#define SCTLR_EL1_SA_DIS (0 << 3) /* Stack Alignment EL1 disabled */
  106 +#define SCTLR_EL1_DCACHE_DIS (0 << 2) /* Data cache disabled */
  107 +#define SCTLR_EL1_ALIGN_DIS (0 << 1) /* Alignment check disabled */
  108 +#define SCTLR_EL1_MMU_DIS (0) /* MMU disabled */
  109 +
21 110 #ifndef __ASSEMBLY__
22 111  
23 112 u64 get_page_table_size(void);
... ... @@ -98,8 +187,34 @@
98 187 int __asm_invalidate_l3_icache(void);
99 188 void __asm_switch_ttbr(u64 new_ttbr);
100 189  
101   -void armv8_switch_to_el2(void);
102   -void armv8_switch_to_el1(void);
  190 +/*
  191 + * Switch from EL3 to EL2 for ARMv8
  192 + *
  193 + * @args: For loading 64-bit OS, fdt address.
  194 + * For loading 32-bit OS, zero.
  195 + * @mach_nr: For loading 64-bit OS, zero.
  196 + * For loading 32-bit OS, machine nr
  197 + * @fdt_addr: For loading 64-bit OS, zero.
  198 + * For loading 32-bit OS, fdt address.
  199 + * @entry_point: kernel entry point
  200 + * @es_flag: execution state flag, ES_TO_AARCH64 or ES_TO_AARCH32
  201 + */
  202 +void armv8_switch_to_el2(u64 args, u64 mach_nr, u64 fdt_addr,
  203 + u64 entry_point, u64 es_flag);
  204 +/*
  205 + * Switch from EL2 to EL1 for ARMv8
  206 + *
  207 + * @args: For loading 64-bit OS, fdt address.
  208 + * For loading 32-bit OS, zero.
  209 + * @mach_nr: For loading 64-bit OS, zero.
  210 + * For loading 32-bit OS, machine nr
  211 + * @fdt_addr: For loading 64-bit OS, zero.
  212 + * For loading 32-bit OS, fdt address.
  213 + * @entry_point: kernel entry point
  214 + * @es_flag: execution state flag, ES_TO_AARCH64 or ES_TO_AARCH32
  215 + */
  216 +void armv8_switch_to_el1(u64 args, u64 mach_nr, u64 fdt_addr,
  217 + u64 entry_point, u64 es_flag);
103 218 void gic_init(void);
104 219 void gic_send_sgi(unsigned long sgino);
105 220 void wait_for_wakeup(void);
arch/arm/lib/bootm.c
... ... @@ -200,10 +200,6 @@
200 200 {
201 201 smp_kick_all_cpus();
202 202 dcache_disable(); /* flush cache before swtiching to EL2 */
203   - armv8_switch_to_el2();
204   -#ifdef CONFIG_ARMV8_SWITCH_TO_EL1
205   - armv8_switch_to_el1();
206   -#endif
207 203 }
208 204 #endif
209 205  
... ... @@ -280,6 +276,24 @@
280 276 }
281 277 #endif
282 278  
  279 +#ifdef CONFIG_ARM64
  280 +#ifdef CONFIG_ARMV8_SWITCH_TO_EL1
  281 +static void switch_to_el1(void)
  282 +{
  283 + if ((IH_ARCH_DEFAULT == IH_ARCH_ARM64) &&
  284 + (images.os.arch == IH_ARCH_ARM))
  285 + armv8_switch_to_el1(0, (u64)gd->bd->bi_arch_number,
  286 + (u64)images.ft_addr,
  287 + (u64)images.ep,
  288 + ES_TO_AARCH32);
  289 + else
  290 + armv8_switch_to_el1((u64)images.ft_addr, 0, 0,
  291 + images.ep,
  292 + ES_TO_AARCH64);
  293 +}
  294 +#endif
  295 +#endif
  296 +
283 297 /* Subcommand: GO */
284 298 static void boot_jump_linux(bootm_headers_t *images, int flag)
285 299 {
... ... @@ -299,7 +313,22 @@
299 313  
300 314 if (!fake) {
301 315 do_nonsec_virt_switch();
302   - kernel_entry(images->ft_addr, NULL, NULL, NULL);
  316 +
  317 +#ifdef CONFIG_ARMV8_SWITCH_TO_EL1
  318 + armv8_switch_to_el2((u64)images->ft_addr, 0, 0,
  319 + (u64)switch_to_el1, ES_TO_AARCH64);
  320 +#else
  321 + if ((IH_ARCH_DEFAULT == IH_ARCH_ARM64) &&
  322 + (images->os.arch == IH_ARCH_ARM))
  323 + armv8_switch_to_el2(0, (u64)gd->bd->bi_arch_number,
  324 + (u64)images->ft_addr,
  325 + (u64)images->ep,
  326 + ES_TO_AARCH32);
  327 + else
  328 + armv8_switch_to_el2((u64)images->ft_addr, 0, 0,
  329 + images->ep,
  330 + ES_TO_AARCH64);
  331 +#endif
303 332 }
304 333 #else
305 334 unsigned long machid = gd->bd->bi_arch_number;
arch/arm/mach-rmobile/lowlevel_init_gen3.S
... ... @@ -61,11 +61,18 @@
61 61 /*
62 62 * All slaves will enter EL2 and optionally EL1.
63 63 */
  64 + adr x3, lowlevel_in_el2
  65 + ldr x4, =ES_TO_AARCH64
64 66 bl armv8_switch_to_el2
  67 +
  68 +lowlevel_in_el2:
65 69 #ifdef CONFIG_ARMV8_SWITCH_TO_EL1
  70 + adr x3, lowlevel_in_el1
  71 + ldr x4, =ES_TO_AARCH64
66 72 bl armv8_switch_to_el1
67   -#endif
68 73  
  74 +lowlevel_in_el1:
  75 +#endif
69 76 #endif /* CONFIG_ARMV8_MULTIENTRY */
70 77  
71 78 bl s_init
... ... @@ -141,6 +141,18 @@
141 141 return new_fdt;
142 142 }
143 143  
  144 +#ifdef CONFIG_ARM64
  145 +static unsigned long efi_run_in_el2(ulong (*entry)(void *image_handle,
  146 + struct efi_system_table *st), void *image_handle,
  147 + struct efi_system_table *st)
  148 +{
  149 + /* Enable caches again */
  150 + dcache_enable();
  151 +
  152 + return entry(image_handle, st);
  153 +}
  154 +#endif
  155 +
144 156 /*
145 157 * Load an EFI payload into a newly allocated piece of memory, register all
146 158 * EFI objects it would want to access and jump to it.
... ... @@ -231,9 +243,14 @@
231 243 if (current_el() == 3) {
232 244 smp_kick_all_cpus();
233 245 dcache_disable(); /* flush cache before switch to EL2 */
234   - armv8_switch_to_el2();
235   - /* Enable caches again */
236   - dcache_enable();
  246 +
  247 + /* Move into EL2 and keep running there */
  248 + armv8_switch_to_el2((ulong)entry, (ulong)&loaded_image_info,
  249 + (ulong)&systab, (ulong)efi_run_in_el2,
  250 + ES_TO_AARCH64);
  251 +
  252 + /* Should never reach here, efi exits with longjmp */
  253 + while (1) { }
237 254 }
238 255 #endif
239 256  
... ... @@ -27,6 +27,7 @@
27 27 #include <u-boot/md5.h>
28 28 #include <u-boot/sha1.h>
29 29 #include <u-boot/sha256.h>
  30 +#include <generated/autoconf.h>
30 31  
31 32 /*****************************************************************************/
32 33 /* New uImage format routines */
33 34  
34 35  
... ... @@ -1161,11 +1162,18 @@
1161 1162 int fit_image_check_arch(const void *fit, int noffset, uint8_t arch)
1162 1163 {
1163 1164 uint8_t image_arch;
  1165 + int aarch32_support = 0;
1164 1166  
  1167 +#ifdef CONFIG_ARM64_SUPPORT_AARCH32
  1168 + aarch32_support = 1;
  1169 +#endif
  1170 +
1165 1171 if (fit_image_get_arch(fit, noffset, &image_arch))
1166 1172 return 0;
1167 1173 return (arch == image_arch) ||
1168   - (arch == IH_ARCH_I386 && image_arch == IH_ARCH_X86_64);
  1174 + (arch == IH_ARCH_I386 && image_arch == IH_ARCH_X86_64) ||
  1175 + (arch == IH_ARCH_ARM64 && image_arch == IH_ARCH_ARM &&
  1176 + aarch32_support);
1169 1177 }
1170 1178  
1171 1179 /**
... ... @@ -1614,6 +1622,9 @@
1614 1622 int type_ok, os_ok;
1615 1623 ulong load, data, len;
1616 1624 uint8_t os;
  1625 +#ifndef USE_HOSTCC
  1626 + uint8_t os_arch;
  1627 +#endif
1617 1628 const char *prop_name;
1618 1629 int ret;
1619 1630  
... ... @@ -1697,6 +1708,12 @@
1697 1708 return -ENOEXEC;
1698 1709 }
1699 1710 #endif
  1711 +
  1712 +#ifndef USE_HOSTCC
  1713 + fit_image_get_arch(fit, noffset, &os_arch);
  1714 + images->os.arch = os_arch;
  1715 +#endif
  1716 +
1700 1717 if (image_type == IH_TYPE_FLATDT &&
1701 1718 !fit_image_check_comp(fit, noffset, IH_COMP_NONE)) {
1702 1719 puts("FDT image is compressed");