Commit 40f8dec54d7803975aed1c88327002c95ea99908

Authored by York Sun
1 parent f43b4356a7

armv8/fsl-lsch3: Release secondary cores from boot hold off with Boot Page

Secondary cores need to be released from holdoff by boot release
registers. With GPP bootrom, they can boot from main memory
directly. Individual spin table is used for each core. Spin table
and the boot page is reserved in device tree so OS won't overwrite.

Signed-off-by: York Sun <yorksun@freescale.com>
Signed-off-by: Arnab Basu <arnab.basu@freescale.com>

Showing 12 changed files with 518 additions and 89 deletions Side-by-side Diff

arch/arm/cpu/armv8/fsl-lsch3/Makefile
... ... @@ -7,4 +7,6 @@
7 7 obj-y += cpu.o
8 8 obj-y += lowlevel.o
9 9 obj-y += speed.o
  10 +obj-$(CONFIG_MP) += mp.o
  11 +obj-$(CONFIG_OF_LIBFDT) += fdt.o
arch/arm/cpu/armv8/fsl-lsch3/cpu.c
... ... @@ -11,6 +11,7 @@
11 11 #include <asm/io.h>
12 12 #include <asm/arch-fsl-lsch3/immap_lsch3.h>
13 13 #include "cpu.h"
  14 +#include "mp.h"
14 15 #include "speed.h"
15 16 #include <fsl_mc.h>
16 17  
... ... @@ -433,5 +434,17 @@
433 434 error = mc_init(bis);
434 435 #endif
435 436 return error;
  437 +}
  438 +
  439 +
  440 +int arch_early_init_r(void)
  441 +{
  442 + int rv;
  443 + rv = fsl_lsch3_wake_seconday_cores();
  444 +
  445 + if (rv)
  446 + printf("Did not wake secondary cores\n");
  447 +
  448 + return 0;
436 449 }
arch/arm/cpu/armv8/fsl-lsch3/cpu.h
... ... @@ -5,4 +5,5 @@
5 5 */
6 6  
7 7 int fsl_qoriq_core_to_cluster(unsigned int core);
  8 +u32 cpu_mask(void);
arch/arm/cpu/armv8/fsl-lsch3/fdt.c
  1 +/*
  2 + * Copyright 2014 Freescale Semiconductor, Inc.
  3 + *
  4 + * SPDX-License-Identifier: GPL-2.0+
  5 + */
  6 +
  7 +#include <common.h>
  8 +#include <libfdt.h>
  9 +#include <fdt_support.h>
  10 +#include "mp.h"
  11 +
  12 +#ifdef CONFIG_MP
  13 +void ft_fixup_cpu(void *blob)
  14 +{
  15 + int off;
  16 + __maybe_unused u64 spin_tbl_addr = (u64)get_spin_tbl_addr();
  17 + fdt32_t *reg;
  18 + int addr_cells;
  19 + u64 val;
  20 + size_t *boot_code_size = &(__secondary_boot_code_size);
  21 +
  22 + off = fdt_path_offset(blob, "/cpus");
  23 + if (off < 0) {
  24 + puts("couldn't find /cpus node\n");
  25 + return;
  26 + }
  27 + of_bus_default_count_cells(blob, off, &addr_cells, NULL);
  28 +
  29 + off = fdt_node_offset_by_prop_value(blob, -1, "device_type", "cpu", 4);
  30 + while (off != -FDT_ERR_NOTFOUND) {
  31 + reg = (fdt32_t *)fdt_getprop(blob, off, "reg", 0);
  32 + if (reg) {
  33 + val = spin_tbl_addr;
  34 + val += id_to_core(of_read_number(reg, addr_cells))
  35 + * SPIN_TABLE_ELEM_SIZE;
  36 + val = cpu_to_fdt64(val);
  37 + fdt_setprop_string(blob, off, "enable-method",
  38 + "spin-table");
  39 + fdt_setprop(blob, off, "cpu-release-addr",
  40 + &val, sizeof(val));
  41 + } else {
  42 + puts("Warning: found cpu node without reg property\n");
  43 + }
  44 + off = fdt_node_offset_by_prop_value(blob, off, "device_type",
  45 + "cpu", 4);
  46 + }
  47 +
  48 + fdt_add_mem_rsv(blob, (uintptr_t)&secondary_boot_code,
  49 + *boot_code_size);
  50 +}
  51 +#endif
  52 +
  53 +void ft_cpu_setup(void *blob, bd_t *bd)
  54 +{
  55 +#ifdef CONFIG_MP
  56 + ft_fixup_cpu(blob);
  57 +#endif
  58 +}
arch/arm/cpu/armv8/fsl-lsch3/lowlevel.S
... ... @@ -8,7 +8,9 @@
8 8  
9 9 #include <config.h>
10 10 #include <linux/linkage.h>
  11 +#include <asm/gic.h>
11 12 #include <asm/macro.h>
  13 +#include "mp.h"
12 14  
13 15 ENTRY(lowlevel_init)
14 16 mov x29, lr /* Save LR */
15 17  
16 18  
17 19  
18 20  
19 21  
20 22  
21 23  
22 24  
... ... @@ -35,32 +37,115 @@
35 37 #endif
36 38 #endif
37 39  
38   - branch_if_master x0, x1, 1f
  40 + branch_if_master x0, x1, 2f
39 41  
  42 + ldr x0, =secondary_boot_func
  43 + blr x0
  44 +2:
  45 + mov lr, x29 /* Restore LR */
  46 + ret
  47 +ENDPROC(lowlevel_init)
  48 +
  49 + /* Keep literals not used by the secondary boot code outside it */
  50 + .ltorg
  51 +
  52 + /* Using 64 bit alignment since the spin table is accessed as data */
  53 + .align 4
  54 + .global secondary_boot_code
  55 + /* Secondary Boot Code starts here */
  56 +secondary_boot_code:
  57 + .global __spin_table
  58 +__spin_table:
  59 + .space CONFIG_MAX_CPUS*SPIN_TABLE_ELEM_SIZE
  60 +
  61 + .align 2
  62 +ENTRY(secondary_boot_func)
40 63 /*
41   - * Slave should wait for master clearing spin table.
42   - * This sync prevent salves observing incorrect
43   - * value of spin table and jumping to wrong place.
  64 + * MPIDR_EL1 Fields:
  65 + * MPIDR[1:0] = AFF0_CPUID <- Core ID (0,1)
  66 + * MPIDR[7:2] = AFF0_RES
  67 + * MPIDR[15:8] = AFF1_CLUSTERID <- Cluster ID (0,1,2,3)
  68 + * MPIDR[23:16] = AFF2_CLUSTERID
  69 + * MPIDR[24] = MT
  70 + * MPIDR[29:25] = RES0
  71 + * MPIDR[30] = U
  72 + * MPIDR[31] = ME
  73 + * MPIDR[39:32] = AFF3
  74 + *
  75 + * Linear Processor ID (LPID) calculation from MPIDR_EL1:
  76 + * (We only use AFF0_CPUID and AFF1_CLUSTERID for now
  77 + * until AFF2_CLUSTERID and AFF3 have non-zero values)
  78 + *
  79 + * LPID = MPIDR[15:8] | MPIDR[1:0]
44 80 */
45   -#if defined(CONFIG_GICV2) || defined(CONFIG_GICV3)
46   -#ifdef CONFIG_GICV2
47   - ldr x0, =GICC_BASE
48   -#endif
49   - bl gic_wait_for_interrupt
50   -#endif
51   -
  81 + mrs x0, mpidr_el1
  82 + ubfm x1, x0, #8, #15
  83 + ubfm x2, x0, #0, #1
  84 + orr x10, x2, x1, lsl #2 /* x10 has LPID */
  85 + ubfm x9, x0, #0, #15 /* x9 contains MPIDR[15:0] */
52 86 /*
53   - * All processors will enter EL2 and optionally EL1.
  87 + * offset of the spin table element for this core from start of spin
  88 + * table (each elem is padded to 64 bytes)
54 89 */
55   - bl armv8_switch_to_el2
  90 + lsl x1, x10, #6
  91 + ldr x0, =__spin_table
  92 + /* physical address of this cpus spin table element */
  93 + add x11, x1, x0
  94 +
  95 + str x9, [x11, #16] /* LPID */
  96 + mov x4, #1
  97 + str x4, [x11, #8] /* STATUS */
  98 + dsb sy
  99 +#if defined(CONFIG_GICV3)
  100 + gic_wait_for_interrupt_m x0
  101 +#elif defined(CONFIG_GICV2)
  102 + ldr x0, =GICC_BASE
  103 + gic_wait_for_interrupt_m x0, w1
  104 +#endif
  105 +
  106 + bl secondary_switch_to_el2
56 107 #ifdef CONFIG_ARMV8_SWITCH_TO_EL1
57   - bl armv8_switch_to_el1
  108 + bl secondary_switch_to_el1
58 109 #endif
59   - b 2f
60 110  
61   -1:
62   -2:
63   - mov lr, x29 /* Restore LR */
64   - ret
65   -ENDPROC(lowlevel_init)
  111 +slave_cpu:
  112 + wfe
  113 + ldr x0, [x11]
  114 + cbz x0, slave_cpu
  115 +#ifndef CONFIG_ARMV8_SWITCH_TO_EL1
  116 + mrs x1, sctlr_el2
  117 +#else
  118 + mrs x1, sctlr_el1
  119 +#endif
  120 + tbz x1, #25, cpu_is_le
  121 + rev x0, x0 /* BE to LE conversion */
  122 +cpu_is_le:
  123 + br x0 /* branch to the given address */
  124 +ENDPROC(secondary_boot_func)
  125 +
  126 +ENTRY(secondary_switch_to_el2)
  127 + switch_el x0, 1f, 0f, 0f
  128 +0: ret
  129 +1: armv8_switch_to_el2_m x0
  130 +ENDPROC(secondary_switch_to_el2)
  131 +
  132 +ENTRY(secondary_switch_to_el1)
  133 + switch_el x0, 0f, 1f, 0f
  134 +0: ret
  135 +1: armv8_switch_to_el1_m x0, x1
  136 +ENDPROC(secondary_switch_to_el1)
  137 +
  138 + /* Ensure that the literals used by the secondary boot code are
  139 + * assembled within it (this is required so that we can protect
  140 + * this area with a single memreserve region
  141 + */
  142 + .ltorg
  143 +
  144 + /* 64 bit alignment for elements accessed as data */
  145 + .align 4
  146 + .globl __secondary_boot_code_size
  147 + .type __secondary_boot_code_size, %object
  148 + /* Secondary Boot Code ends here */
  149 +__secondary_boot_code_size:
  150 + .quad .-secondary_boot_code
arch/arm/cpu/armv8/fsl-lsch3/mp.c
  1 +/*
  2 + * Copyright 2014 Freescale Semiconductor, Inc.
  3 + *
  4 + * SPDX-License-Identifier: GPL-2.0+
  5 + */
  6 +
  7 +#include <common.h>
  8 +#include <asm/io.h>
  9 +#include <asm/system.h>
  10 +#include <asm/io.h>
  11 +#include <asm/arch-fsl-lsch3/immap_lsch3.h>
  12 +#include "mp.h"
  13 +
  14 +DECLARE_GLOBAL_DATA_PTR;
  15 +
  16 +void *get_spin_tbl_addr(void)
  17 +{
  18 + return &__spin_table;
  19 +}
  20 +
  21 +phys_addr_t determine_mp_bootpg(void)
  22 +{
  23 + return (phys_addr_t)&secondary_boot_code;
  24 +}
  25 +
  26 +int fsl_lsch3_wake_seconday_cores(void)
  27 +{
  28 + struct ccsr_gur __iomem *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR);
  29 + struct ccsr_reset __iomem *rst = (void *)(CONFIG_SYS_FSL_RST_ADDR);
  30 + u32 cores, cpu_up_mask = 1;
  31 + int i, timeout = 10;
  32 + u64 *table = get_spin_tbl_addr();
  33 +
  34 + cores = cpu_mask();
  35 + /* Clear spin table so that secondary processors
  36 + * observe the correct value after waking up from wfe.
  37 + */
  38 + memset(table, 0, CONFIG_MAX_CPUS*SPIN_TABLE_ELEM_SIZE);
  39 + flush_dcache_range((unsigned long)table,
  40 + (unsigned long)table +
  41 + (CONFIG_MAX_CPUS*SPIN_TABLE_ELEM_SIZE));
  42 +
  43 + printf("Waking secondary cores to start from %lx\n", gd->relocaddr);
  44 + out_le32(&gur->bootlocptrh, (u32)(gd->relocaddr >> 32));
  45 + out_le32(&gur->bootlocptrl, (u32)gd->relocaddr);
  46 + out_le32(&gur->scratchrw[6], 1);
  47 + asm volatile("dsb st" : : : "memory");
  48 + rst->brrl = cores;
  49 + asm volatile("dsb st" : : : "memory");
  50 +
  51 + /* This is needed as a precautionary measure.
  52 + * If some code before this has accidentally released the secondary
  53 + * cores then the pre-bootloader code will trap them in a "wfe" unless
  54 + * the scratchrw[6] is set. In this case we need a sev here to get these
  55 + * cores moving again.
  56 + */
  57 + asm volatile("sev");
  58 +
  59 + while (timeout--) {
  60 + flush_dcache_range((unsigned long)table, (unsigned long)table +
  61 + CONFIG_MAX_CPUS * 64);
  62 + for (i = 1; i < CONFIG_MAX_CPUS; i++) {
  63 + if (table[i * WORDS_PER_SPIN_TABLE_ENTRY +
  64 + SPIN_TABLE_ELEM_STATUS_IDX])
  65 + cpu_up_mask |= 1 << i;
  66 + }
  67 + if (hweight32(cpu_up_mask) == hweight32(cores))
  68 + break;
  69 + udelay(10);
  70 + }
  71 + if (timeout <= 0) {
  72 + printf("Not all cores (0x%x) are up (0x%x)\n",
  73 + cores, cpu_up_mask);
  74 + return 1;
  75 + }
  76 + printf("All (%d) cores are up.\n", hweight32(cores));
  77 +
  78 + return 0;
  79 +}
  80 +
  81 +int is_core_valid(unsigned int core)
  82 +{
  83 + return !!((1 << core) & cpu_mask());
  84 +}
  85 +
  86 +int cpu_reset(int nr)
  87 +{
  88 + puts("Feature is not implemented.\n");
  89 +
  90 + return 0;
  91 +}
  92 +
  93 +int cpu_disable(int nr)
  94 +{
  95 + puts("Feature is not implemented.\n");
  96 +
  97 + return 0;
  98 +}
  99 +
  100 +int core_to_pos(int nr)
  101 +{
  102 + u32 cores = cpu_mask();
  103 + int i, count = 0;
  104 +
  105 + if (nr == 0) {
  106 + return 0;
  107 + } else if (nr >= hweight32(cores)) {
  108 + puts("Not a valid core number.\n");
  109 + return -1;
  110 + }
  111 +
  112 + for (i = 1; i < 32; i++) {
  113 + if (is_core_valid(i)) {
  114 + count++;
  115 + if (count == nr)
  116 + break;
  117 + }
  118 + }
  119 +
  120 + return count;
  121 +}
  122 +
  123 +int cpu_status(int nr)
  124 +{
  125 + u64 *table;
  126 + int pos;
  127 +
  128 + if (nr == 0) {
  129 + table = (u64 *)get_spin_tbl_addr();
  130 + printf("table base @ 0x%p\n", table);
  131 + } else {
  132 + pos = core_to_pos(nr);
  133 + if (pos < 0)
  134 + return -1;
  135 + table = (u64 *)get_spin_tbl_addr() + pos *
  136 + WORDS_PER_SPIN_TABLE_ENTRY;
  137 + printf("table @ 0x%p\n", table);
  138 + printf(" addr - 0x%016llx\n",
  139 + table[SPIN_TABLE_ELEM_ENTRY_ADDR_IDX]);
  140 + printf(" status - 0x%016llx\n",
  141 + table[SPIN_TABLE_ELEM_STATUS_IDX]);
  142 + printf(" lpid - 0x%016llx\n",
  143 + table[SPIN_TABLE_ELEM_LPID_IDX]);
  144 + }
  145 +
  146 + return 0;
  147 +}
  148 +
  149 +int cpu_release(int nr, int argc, char * const argv[])
  150 +{
  151 + u64 boot_addr;
  152 + u64 *table = (u64 *)get_spin_tbl_addr();
  153 + int pos;
  154 +
  155 + pos = core_to_pos(nr);
  156 + if (pos <= 0)
  157 + return -1;
  158 +
  159 + table += pos * WORDS_PER_SPIN_TABLE_ENTRY;
  160 + boot_addr = simple_strtoull(argv[0], NULL, 16);
  161 + table[SPIN_TABLE_ELEM_ENTRY_ADDR_IDX] = boot_addr;
  162 + flush_dcache_range((unsigned long)table,
  163 + (unsigned long)table + SPIN_TABLE_ELEM_SIZE);
  164 + asm volatile("dsb st");
  165 + smp_kick_all_cpus(); /* only those with entry addr set will run */
  166 +
  167 + return 0;
  168 +}
arch/arm/cpu/armv8/fsl-lsch3/mp.h
  1 +/*
  2 + * Copyright 2014, Freescale Semiconductor
  3 + *
  4 + * SPDX-License-Identifier: GPL-2.0+
  5 + */
  6 +
  7 +#ifndef _FSL_CH3_MP_H
  8 +#define _FSL_CH3_MP_H
  9 +
  10 +/*
  11 +* Each spin table element is defined as
  12 +* struct {
  13 +* uint64_t entry_addr;
  14 +* uint64_t status;
  15 +* uint64_t lpid;
  16 +* };
  17 +* we pad this struct to 64 bytes so each entry is in its own cacheline
  18 +* the actual spin table is an array of these structures
  19 +*/
  20 +#define SPIN_TABLE_ELEM_ENTRY_ADDR_IDX 0
  21 +#define SPIN_TABLE_ELEM_STATUS_IDX 1
  22 +#define SPIN_TABLE_ELEM_LPID_IDX 2
  23 +#define WORDS_PER_SPIN_TABLE_ENTRY 8 /* pad to 64 bytes */
  24 +#define SPIN_TABLE_ELEM_SIZE 64
  25 +
  26 +#define id_to_core(x) ((x & 3) | (x >> 6))
  27 +#ifndef __ASSEMBLY__
  28 +extern u64 __spin_table[];
  29 +extern u64 *secondary_boot_code;
  30 +extern size_t __secondary_boot_code_size;
  31 +int fsl_lsch3_wake_seconday_cores(void);
  32 +void *get_spin_tbl_addr(void);
  33 +phys_addr_t determine_mp_bootpg(void);
  34 +void secondary_boot_func(void);
  35 +#endif
  36 +#endif /* _FSL_CH3_MP_H */
arch/arm/cpu/armv8/transition.S
... ... @@ -14,71 +14,12 @@
14 14 ENTRY(armv8_switch_to_el2)
15 15 switch_el x0, 1f, 0f, 0f
16 16 0: ret
17   -1:
18   - mov x0, #0x5b1 /* Non-secure EL0/EL1 | HVC | 64bit EL2 */
19   - msr scr_el3, x0
20   - msr cptr_el3, xzr /* Disable coprocessor traps to EL3 */
21   - mov x0, #0x33ff
22   - msr cptr_el2, x0 /* Disable coprocessor traps to EL2 */
23   -
24   - /* Initialize SCTLR_EL2 */
25   - msr sctlr_el2, xzr
26   -
27   - /* Return to the EL2_SP2 mode from EL3 */
28   - mov x0, sp
29   - msr sp_el2, x0 /* Migrate SP */
30   - mrs x0, vbar_el3
31   - msr vbar_el2, x0 /* Migrate VBAR */
32   - mov x0, #0x3c9
33   - msr spsr_el3, x0 /* EL2_SP2 | D | A | I | F */
34   - msr elr_el3, lr
35   - eret
  17 +1: armv8_switch_to_el2_m x0
36 18 ENDPROC(armv8_switch_to_el2)
37 19  
38 20 ENTRY(armv8_switch_to_el1)
39 21 switch_el x0, 0f, 1f, 0f
40 22 0: ret
41   -1:
42   - /* Initialize Generic Timers */
43   - mrs x0, cnthctl_el2
44   - orr x0, x0, #0x3 /* Enable EL1 access to timers */
45   - msr cnthctl_el2, x0
46   - msr cntvoff_el2, xzr
47   - mrs x0, cntkctl_el1
48   - orr x0, x0, #0x3 /* Enable EL0 access to timers */
49   - msr cntkctl_el1, x0
50   -
51   - /* Initilize MPID/MPIDR registers */
52   - mrs x0, midr_el1
53   - mrs x1, mpidr_el1
54   - msr vpidr_el2, x0
55   - msr vmpidr_el2, x1
56   -
57   - /* Disable coprocessor traps */
58   - mov x0, #0x33ff
59   - msr cptr_el2, x0 /* Disable coprocessor traps to EL2 */
60   - msr hstr_el2, xzr /* Disable coprocessor traps to EL2 */
61   - mov x0, #3 << 20
62   - msr cpacr_el1, x0 /* Enable FP/SIMD at EL1 */
63   -
64   - /* Initialize HCR_EL2 */
65   - mov x0, #(1 << 31) /* 64bit EL1 */
66   - orr x0, x0, #(1 << 29) /* Disable HVC */
67   - msr hcr_el2, x0
68   -
69   - /* SCTLR_EL1 initialization */
70   - mov x0, #0x0800
71   - movk x0, #0x30d0, lsl #16
72   - msr sctlr_el1, x0
73   -
74   - /* Return to the EL1_SP1 mode from EL2 */
75   - mov x0, sp
76   - msr sp_el1, x0 /* Migrate SP */
77   - mrs x0, vbar_el2
78   - msr vbar_el1, x0 /* Migrate VBAR */
79   - mov x0, #0x3c5
80   - msr spsr_el2, x0 /* EL1_SP1 | D | A | I | F */
81   - msr elr_el2, lr
82   - eret
  23 +1: armv8_switch_to_el1_m x0, x1
83 24 ENDPROC(armv8_switch_to_el1)
arch/arm/include/asm/arch-fsl-lsch3/config.h
... ... @@ -8,7 +8,7 @@
8 8 #define _ASM_ARMV8_FSL_LSCH3_CONFIG_
9 9  
10 10 #include <fsl_ddrc_version.h>
11   -
  11 +#define CONFIG_MP
12 12 #define CONFIG_SYS_FSL_OCRAM_BASE 0x18000000 /* initial RAM */
13 13 /* Link Definitions */
14 14 #define CONFIG_SYS_INIT_SP_ADDR (CONFIG_SYS_FSL_OCRAM_BASE + 0xfff0)
... ... @@ -19,6 +19,7 @@
19 19 #define CONFIG_SYS_FSL_DDR3_ADDR 0x08210000
20 20 #define CONFIG_SYS_FSL_GUTS_ADDR (CONFIG_SYS_IMMR + 0x00E00000)
21 21 #define CONFIG_SYS_FSL_PMU_ADDR (CONFIG_SYS_IMMR + 0x00E30000)
  22 +#define CONFIG_SYS_FSL_RST_ADDR (CONFIG_SYS_IMMR + 0x00E60000)
22 23 #define CONFIG_SYS_FSL_CH3_CLK_GRPA_ADDR (CONFIG_SYS_IMMR + 0x00300000)
23 24 #define CONFIG_SYS_FSL_CH3_CLK_GRPB_ADDR (CONFIG_SYS_IMMR + 0x00310000)
24 25 #define CONFIG_SYS_FSL_CH3_CLK_CTRL_ADDR (CONFIG_SYS_IMMR + 0x00370000)
arch/arm/include/asm/arch-fsl-lsch3/immap_lsch3.h
... ... @@ -113,5 +113,40 @@
113 113 u8 res_04[0x20-0x04];
114 114 } clkcncsr[8];
115 115 };
  116 +
  117 +struct ccsr_reset {
  118 + u32 rstcr; /* 0x000 */
  119 + u32 rstcrsp; /* 0x004 */
  120 + u8 res_008[0x10-0x08]; /* 0x008 */
  121 + u32 rstrqmr1; /* 0x010 */
  122 + u32 rstrqmr2; /* 0x014 */
  123 + u32 rstrqsr1; /* 0x018 */
  124 + u32 rstrqsr2; /* 0x01c */
  125 + u32 rstrqwdtmrl; /* 0x020 */
  126 + u32 rstrqwdtmru; /* 0x024 */
  127 + u8 res_028[0x30-0x28]; /* 0x028 */
  128 + u32 rstrqwdtsrl; /* 0x030 */
  129 + u32 rstrqwdtsru; /* 0x034 */
  130 + u8 res_038[0x60-0x38]; /* 0x038 */
  131 + u32 brrl; /* 0x060 */
  132 + u32 brru; /* 0x064 */
  133 + u8 res_068[0x80-0x68]; /* 0x068 */
  134 + u32 pirset; /* 0x080 */
  135 + u32 pirclr; /* 0x084 */
  136 + u8 res_088[0x90-0x88]; /* 0x088 */
  137 + u32 brcorenbr; /* 0x090 */
  138 + u8 res_094[0x100-0x94]; /* 0x094 */
  139 + u32 rcw_reqr; /* 0x100 */
  140 + u32 rcw_completion; /* 0x104 */
  141 + u8 res_108[0x110-0x108]; /* 0x108 */
  142 + u32 pbi_reqr; /* 0x110 */
  143 + u32 pbi_completion; /* 0x114 */
  144 + u8 res_118[0xa00-0x118]; /* 0x118 */
  145 + u32 qmbm_warmrst; /* 0xa00 */
  146 + u32 soc_warmrst; /* 0xa04 */
  147 + u8 res_a08[0xbf8-0xa08]; /* 0xa08 */
  148 + u32 ip_rev1; /* 0xbf8 */
  149 + u32 ip_rev2; /* 0xbfc */
  150 +};
116 151 #endif /* __ARCH_FSL_LSCH3_IMMAP_H */
arch/arm/include/asm/macro.h
... ... @@ -105,6 +105,99 @@
105 105 cbz \xreg1, \master_label
106 106 .endm
107 107  
  108 +.macro armv8_switch_to_el2_m, xreg1
  109 + /* 64bit EL2 | HCE | SMD | RES1 (Bits[5:4]) | Non-secure EL0/EL1 */
  110 + mov \xreg1, #0x5b1
  111 + msr scr_el3, \xreg1
  112 + msr cptr_el3, xzr /* Disable coprocessor traps to EL3 */
  113 + mov \xreg1, #0x33ff
  114 + msr cptr_el2, \xreg1 /* Disable coprocessor traps to EL2 */
  115 +
  116 + /* Initialize SCTLR_EL2
  117 + *
  118 + * setting RES1 bits (29,28,23,22,18,16,11,5,4) to 1
  119 + * and RES0 bits (31,30,27,26,24,21,20,17,15-13,10-6) +
  120 + * EE,WXN,I,SA,C,A,M to 0
  121 + */
  122 + mov \xreg1, #0x0830
  123 + movk \xreg1, #0x30C5, lsl #16
  124 + msr sctlr_el2, \xreg1
  125 +
  126 + /* Return to the EL2_SP2 mode from EL3 */
  127 + mov \xreg1, sp
  128 + msr sp_el2, \xreg1 /* Migrate SP */
  129 + mrs \xreg1, vbar_el3
  130 + msr vbar_el2, \xreg1 /* Migrate VBAR */
  131 + mov \xreg1, #0x3c9
  132 + msr spsr_el3, \xreg1 /* EL2_SP2 | D | A | I | F */
  133 + msr elr_el3, lr
  134 + eret
  135 +.endm
  136 +
  137 +.macro armv8_switch_to_el1_m, xreg1, xreg2
  138 + /* Initialize Generic Timers */
  139 + mrs \xreg1, cnthctl_el2
  140 + orr \xreg1, \xreg1, #0x3 /* Enable EL1 access to timers */
  141 + msr cnthctl_el2, \xreg1
  142 + msr cntvoff_el2, xzr
  143 +
  144 + /* Initilize MPID/MPIDR registers */
  145 + mrs \xreg1, midr_el1
  146 + mrs \xreg2, mpidr_el1
  147 + msr vpidr_el2, \xreg1
  148 + msr vmpidr_el2, \xreg2
  149 +
  150 + /* Disable coprocessor traps */
  151 + mov \xreg1, #0x33ff
  152 + msr cptr_el2, \xreg1 /* Disable coprocessor traps to EL2 */
  153 + msr hstr_el2, xzr /* Disable coprocessor traps to EL2 */
  154 + mov \xreg1, #3 << 20
  155 + msr cpacr_el1, \xreg1 /* Enable FP/SIMD at EL1 */
  156 +
  157 + /* Initialize HCR_EL2 */
  158 + mov \xreg1, #(1 << 31) /* 64bit EL1 */
  159 + orr \xreg1, \xreg1, #(1 << 29) /* Disable HVC */
  160 + msr hcr_el2, \xreg1
  161 +
  162 + /* SCTLR_EL1 initialization
  163 + *
  164 + * setting RES1 bits (29,28,23,22,20,11) to 1
  165 + * and RES0 bits (31,30,27,21,17,13,10,6) +
  166 + * UCI,EE,EOE,WXN,nTWE,nTWI,UCT,DZE,I,UMA,SED,ITD,
  167 + * CP15BEN,SA0,SA,C,A,M to 0
  168 + */
  169 + mov \xreg1, #0x0800
  170 + movk \xreg1, #0x30d0, lsl #16
  171 + msr sctlr_el1, \xreg1
  172 +
  173 + /* Return to the EL1_SP1 mode from EL2 */
  174 + mov \xreg1, sp
  175 + msr sp_el1, \xreg1 /* Migrate SP */
  176 + mrs \xreg1, vbar_el2
  177 + msr vbar_el1, \xreg1 /* Migrate VBAR */
  178 + mov \xreg1, #0x3c5
  179 + msr spsr_el2, \xreg1 /* EL1_SP1 | D | A | I | F */
  180 + msr elr_el2, lr
  181 + eret
  182 +.endm
  183 +
  184 +#if defined(CONFIG_GICV3)
  185 +.macro gic_wait_for_interrupt_m xreg1
  186 +0 : wfi
  187 + mrs \xreg1, ICC_IAR1_EL1
  188 + msr ICC_EOIR1_EL1, \xreg1
  189 + cbnz \xreg1, 0b
  190 +.endm
  191 +#elif defined(CONFIG_GICV2)
  192 +.macro gic_wait_for_interrupt_m xreg1, wreg2
  193 +0 : wfi
  194 + ldr \wreg2, [\xreg1, GICC_AIAR]
  195 + str \wreg2, [\xreg1, GICC_AEOIR]
  196 + and \wreg2, \wreg2, #3ff
  197 + cbnz \wreg2, 0b
  198 +.endm
  199 +#endif
  200 +
108 201 #endif /* CONFIG_ARM64 */
109 202  
110 203 #endif /* __ASSEMBLY__ */
arch/arm/lib/gic_64.S
... ... @@ -10,8 +10,8 @@
10 10 #include <asm-offsets.h>
11 11 #include <config.h>
12 12 #include <linux/linkage.h>
13   -#include <asm/macro.h>
14 13 #include <asm/gic.h>
  14 +#include <asm/macro.h>
15 15  
16 16  
17 17 /*************************************************************************
18 18  
19 19  
20 20  
... ... @@ -181,15 +181,11 @@
181 181 *
182 182 *************************************************************************/
183 183 ENTRY(gic_wait_for_interrupt)
184   -0: wfi
185 184 #if defined(CONFIG_GICV3)
186   - mrs x9, ICC_IAR1_EL1
187   - msr ICC_EOIR1_EL1, x9
  185 + gic_wait_for_interrupt_m x9
188 186 #elif defined(CONFIG_GICV2)
189   - ldr w9, [x0, GICC_AIAR]
190   - str w9, [x0, GICC_AEOIR]
  187 + gic_wait_for_interrupt_m x0, w9
191 188 #endif
192   - cbnz w9, 0b
193 189 ret
194 190 ENDPROC(gic_wait_for_interrupt)