Commit d80b34c9163aaeb546dd083f896d948edd585932

Authored by Linus Torvalds

Merge tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux

Pull arm64 fixes from Will Deacon:
 "Here is a handful of minor arm64 fixes discovered and fixed over the
  Christmas break.  The main part is adding some missing #includes that
  we seem to be getting transitively but have started causing problems
  in -next.

   - Fix early mapping fixmap corruption by EFI runtime services
   - Fix __NR_compat_syscalls off-by-one
   - Add missing sanity checks for some 32-bit registers
   - Add some missing #includes which we get transitively
   - Remove unused prepare_to_copy() macro"

* tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux:
  arm64/efi: add missing call to early_ioremap_reset()
  arm64: fix missing asm/io.h include in kernel/smp_spin_table.c
  arm64: fix missing asm/alternative.h include in kernel/module.c
  arm64: fix missing linux/bug.h include in asm/arch_timer.h
  arm64: fix missing asm/pgtable-hwdef.h include in asm/processor.h
  arm64: sanity checks: add missing AArch32 registers
  arm64: Remove unused prepare_to_copy()
  arm64: Correct __NR_compat_syscalls for bpf

Showing 9 changed files Inline Diff

arch/arm64/include/asm/arch_timer.h
1 /* 1 /*
2 * arch/arm64/include/asm/arch_timer.h 2 * arch/arm64/include/asm/arch_timer.h
3 * 3 *
4 * Copyright (C) 2012 ARM Ltd. 4 * Copyright (C) 2012 ARM Ltd.
5 * Author: Marc Zyngier <marc.zyngier@arm.com> 5 * Author: Marc Zyngier <marc.zyngier@arm.com>
6 * 6 *
7 * This program is free software: you can redistribute it and/or modify 7 * This program is free software: you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as 8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation. 9 * published by the Free Software Foundation.
10 * 10 *
11 * This program is distributed in the hope that it will be useful, 11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details. 14 * GNU General Public License for more details.
15 * 15 *
16 * You should have received a copy of the GNU General Public License 16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>. 17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 */ 18 */
19 #ifndef __ASM_ARCH_TIMER_H 19 #ifndef __ASM_ARCH_TIMER_H
20 #define __ASM_ARCH_TIMER_H 20 #define __ASM_ARCH_TIMER_H
21 21
22 #include <asm/barrier.h> 22 #include <asm/barrier.h>
23 23
24 #include <linux/bug.h>
24 #include <linux/init.h> 25 #include <linux/init.h>
25 #include <linux/types.h> 26 #include <linux/types.h>
26 27
27 #include <clocksource/arm_arch_timer.h> 28 #include <clocksource/arm_arch_timer.h>
28 29
29 /* 30 /*
30 * These register accessors are marked inline so the compiler can 31 * These register accessors are marked inline so the compiler can
31 * nicely work out which register we want, and chuck away the rest of 32 * nicely work out which register we want, and chuck away the rest of
32 * the code. 33 * the code.
33 */ 34 */
34 static __always_inline 35 static __always_inline
35 void arch_timer_reg_write_cp15(int access, enum arch_timer_reg reg, u32 val) 36 void arch_timer_reg_write_cp15(int access, enum arch_timer_reg reg, u32 val)
36 { 37 {
37 if (access == ARCH_TIMER_PHYS_ACCESS) { 38 if (access == ARCH_TIMER_PHYS_ACCESS) {
38 switch (reg) { 39 switch (reg) {
39 case ARCH_TIMER_REG_CTRL: 40 case ARCH_TIMER_REG_CTRL:
40 asm volatile("msr cntp_ctl_el0, %0" : : "r" (val)); 41 asm volatile("msr cntp_ctl_el0, %0" : : "r" (val));
41 break; 42 break;
42 case ARCH_TIMER_REG_TVAL: 43 case ARCH_TIMER_REG_TVAL:
43 asm volatile("msr cntp_tval_el0, %0" : : "r" (val)); 44 asm volatile("msr cntp_tval_el0, %0" : : "r" (val));
44 break; 45 break;
45 } 46 }
46 } else if (access == ARCH_TIMER_VIRT_ACCESS) { 47 } else if (access == ARCH_TIMER_VIRT_ACCESS) {
47 switch (reg) { 48 switch (reg) {
48 case ARCH_TIMER_REG_CTRL: 49 case ARCH_TIMER_REG_CTRL:
49 asm volatile("msr cntv_ctl_el0, %0" : : "r" (val)); 50 asm volatile("msr cntv_ctl_el0, %0" : : "r" (val));
50 break; 51 break;
51 case ARCH_TIMER_REG_TVAL: 52 case ARCH_TIMER_REG_TVAL:
52 asm volatile("msr cntv_tval_el0, %0" : : "r" (val)); 53 asm volatile("msr cntv_tval_el0, %0" : : "r" (val));
53 break; 54 break;
54 } 55 }
55 } 56 }
56 57
57 isb(); 58 isb();
58 } 59 }
59 60
60 static __always_inline 61 static __always_inline
61 u32 arch_timer_reg_read_cp15(int access, enum arch_timer_reg reg) 62 u32 arch_timer_reg_read_cp15(int access, enum arch_timer_reg reg)
62 { 63 {
63 u32 val; 64 u32 val;
64 65
65 if (access == ARCH_TIMER_PHYS_ACCESS) { 66 if (access == ARCH_TIMER_PHYS_ACCESS) {
66 switch (reg) { 67 switch (reg) {
67 case ARCH_TIMER_REG_CTRL: 68 case ARCH_TIMER_REG_CTRL:
68 asm volatile("mrs %0, cntp_ctl_el0" : "=r" (val)); 69 asm volatile("mrs %0, cntp_ctl_el0" : "=r" (val));
69 break; 70 break;
70 case ARCH_TIMER_REG_TVAL: 71 case ARCH_TIMER_REG_TVAL:
71 asm volatile("mrs %0, cntp_tval_el0" : "=r" (val)); 72 asm volatile("mrs %0, cntp_tval_el0" : "=r" (val));
72 break; 73 break;
73 } 74 }
74 } else if (access == ARCH_TIMER_VIRT_ACCESS) { 75 } else if (access == ARCH_TIMER_VIRT_ACCESS) {
75 switch (reg) { 76 switch (reg) {
76 case ARCH_TIMER_REG_CTRL: 77 case ARCH_TIMER_REG_CTRL:
77 asm volatile("mrs %0, cntv_ctl_el0" : "=r" (val)); 78 asm volatile("mrs %0, cntv_ctl_el0" : "=r" (val));
78 break; 79 break;
79 case ARCH_TIMER_REG_TVAL: 80 case ARCH_TIMER_REG_TVAL:
80 asm volatile("mrs %0, cntv_tval_el0" : "=r" (val)); 81 asm volatile("mrs %0, cntv_tval_el0" : "=r" (val));
81 break; 82 break;
82 } 83 }
83 } 84 }
84 85
85 return val; 86 return val;
86 } 87 }
87 88
88 static inline u32 arch_timer_get_cntfrq(void) 89 static inline u32 arch_timer_get_cntfrq(void)
89 { 90 {
90 u32 val; 91 u32 val;
91 asm volatile("mrs %0, cntfrq_el0" : "=r" (val)); 92 asm volatile("mrs %0, cntfrq_el0" : "=r" (val));
92 return val; 93 return val;
93 } 94 }
94 95
95 static inline u32 arch_timer_get_cntkctl(void) 96 static inline u32 arch_timer_get_cntkctl(void)
96 { 97 {
97 u32 cntkctl; 98 u32 cntkctl;
98 asm volatile("mrs %0, cntkctl_el1" : "=r" (cntkctl)); 99 asm volatile("mrs %0, cntkctl_el1" : "=r" (cntkctl));
99 return cntkctl; 100 return cntkctl;
100 } 101 }
101 102
102 static inline void arch_timer_set_cntkctl(u32 cntkctl) 103 static inline void arch_timer_set_cntkctl(u32 cntkctl)
103 { 104 {
104 asm volatile("msr cntkctl_el1, %0" : : "r" (cntkctl)); 105 asm volatile("msr cntkctl_el1, %0" : : "r" (cntkctl));
105 } 106 }
106 107
107 static inline u64 arch_counter_get_cntpct(void) 108 static inline u64 arch_counter_get_cntpct(void)
108 { 109 {
109 /* 110 /*
110 * AArch64 kernel and user space mandate the use of CNTVCT. 111 * AArch64 kernel and user space mandate the use of CNTVCT.
111 */ 112 */
112 BUG(); 113 BUG();
113 return 0; 114 return 0;
114 } 115 }
115 116
116 static inline u64 arch_counter_get_cntvct(void) 117 static inline u64 arch_counter_get_cntvct(void)
117 { 118 {
118 u64 cval; 119 u64 cval;
119 120
120 isb(); 121 isb();
121 asm volatile("mrs %0, cntvct_el0" : "=r" (cval)); 122 asm volatile("mrs %0, cntvct_el0" : "=r" (cval));
122 123
123 return cval; 124 return cval;
124 } 125 }
125 126
126 static inline int arch_timer_arch_init(void) 127 static inline int arch_timer_arch_init(void)
127 { 128 {
128 return 0; 129 return 0;
129 } 130 }
130 131
131 #endif 132 #endif
132 133
arch/arm64/include/asm/cpu.h
1 /* 1 /*
2 * Copyright (C) 2014 ARM Ltd. 2 * Copyright (C) 2014 ARM Ltd.
3 * 3 *
4 * This program is free software; you can redistribute it and/or modify 4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as 5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation. 6 * published by the Free Software Foundation.
7 * 7 *
8 * This program is distributed in the hope that it will be useful, 8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of 9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details. 11 * GNU General Public License for more details.
12 * 12 *
13 * You should have received a copy of the GNU General Public License 13 * You should have received a copy of the GNU General Public License
14 * along with this program. If not, see <http://www.gnu.org/licenses/>. 14 * along with this program. If not, see <http://www.gnu.org/licenses/>.
15 */ 15 */
16 #ifndef __ASM_CPU_H 16 #ifndef __ASM_CPU_H
17 #define __ASM_CPU_H 17 #define __ASM_CPU_H
18 18
19 #include <linux/cpu.h> 19 #include <linux/cpu.h>
20 #include <linux/init.h> 20 #include <linux/init.h>
21 #include <linux/percpu.h> 21 #include <linux/percpu.h>
22 22
23 /* 23 /*
24 * Records attributes of an individual CPU. 24 * Records attributes of an individual CPU.
25 */ 25 */
26 struct cpuinfo_arm64 { 26 struct cpuinfo_arm64 {
27 struct cpu cpu; 27 struct cpu cpu;
28 u32 reg_ctr; 28 u32 reg_ctr;
29 u32 reg_cntfrq; 29 u32 reg_cntfrq;
30 u32 reg_dczid; 30 u32 reg_dczid;
31 u32 reg_midr; 31 u32 reg_midr;
32 32
33 u64 reg_id_aa64dfr0; 33 u64 reg_id_aa64dfr0;
34 u64 reg_id_aa64dfr1; 34 u64 reg_id_aa64dfr1;
35 u64 reg_id_aa64isar0; 35 u64 reg_id_aa64isar0;
36 u64 reg_id_aa64isar1; 36 u64 reg_id_aa64isar1;
37 u64 reg_id_aa64mmfr0; 37 u64 reg_id_aa64mmfr0;
38 u64 reg_id_aa64mmfr1; 38 u64 reg_id_aa64mmfr1;
39 u64 reg_id_aa64pfr0; 39 u64 reg_id_aa64pfr0;
40 u64 reg_id_aa64pfr1; 40 u64 reg_id_aa64pfr1;
41 41
42 u32 reg_id_dfr0;
42 u32 reg_id_isar0; 43 u32 reg_id_isar0;
43 u32 reg_id_isar1; 44 u32 reg_id_isar1;
44 u32 reg_id_isar2; 45 u32 reg_id_isar2;
45 u32 reg_id_isar3; 46 u32 reg_id_isar3;
46 u32 reg_id_isar4; 47 u32 reg_id_isar4;
47 u32 reg_id_isar5; 48 u32 reg_id_isar5;
48 u32 reg_id_mmfr0; 49 u32 reg_id_mmfr0;
49 u32 reg_id_mmfr1; 50 u32 reg_id_mmfr1;
50 u32 reg_id_mmfr2; 51 u32 reg_id_mmfr2;
51 u32 reg_id_mmfr3; 52 u32 reg_id_mmfr3;
52 u32 reg_id_pfr0; 53 u32 reg_id_pfr0;
53 u32 reg_id_pfr1; 54 u32 reg_id_pfr1;
55
56 u32 reg_mvfr0;
57 u32 reg_mvfr1;
58 u32 reg_mvfr2;
54 }; 59 };
55 60
56 DECLARE_PER_CPU(struct cpuinfo_arm64, cpu_data); 61 DECLARE_PER_CPU(struct cpuinfo_arm64, cpu_data);
57 62
58 void cpuinfo_store_cpu(void); 63 void cpuinfo_store_cpu(void);
59 void __init cpuinfo_store_boot_cpu(void); 64 void __init cpuinfo_store_boot_cpu(void);
60 65
61 #endif /* __ASM_CPU_H */ 66 #endif /* __ASM_CPU_H */
62 67
arch/arm64/include/asm/processor.h
1 /* 1 /*
2 * Based on arch/arm/include/asm/processor.h 2 * Based on arch/arm/include/asm/processor.h
3 * 3 *
4 * Copyright (C) 1995-1999 Russell King 4 * Copyright (C) 1995-1999 Russell King
5 * Copyright (C) 2012 ARM Ltd. 5 * Copyright (C) 2012 ARM Ltd.
6 * 6 *
7 * This program is free software; you can redistribute it and/or modify 7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as 8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation. 9 * published by the Free Software Foundation.
10 * 10 *
11 * This program is distributed in the hope that it will be useful, 11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details. 14 * GNU General Public License for more details.
15 * 15 *
16 * You should have received a copy of the GNU General Public License 16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>. 17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 */ 18 */
19 #ifndef __ASM_PROCESSOR_H 19 #ifndef __ASM_PROCESSOR_H
20 #define __ASM_PROCESSOR_H 20 #define __ASM_PROCESSOR_H
21 21
22 /* 22 /*
23 * Default implementation of macro that returns current 23 * Default implementation of macro that returns current
24 * instruction pointer ("program counter"). 24 * instruction pointer ("program counter").
25 */ 25 */
26 #define current_text_addr() ({ __label__ _l; _l: &&_l;}) 26 #define current_text_addr() ({ __label__ _l; _l: &&_l;})
27 27
28 #ifdef __KERNEL__ 28 #ifdef __KERNEL__
29 29
30 #include <linux/string.h> 30 #include <linux/string.h>
31 31
32 #include <asm/fpsimd.h> 32 #include <asm/fpsimd.h>
33 #include <asm/hw_breakpoint.h> 33 #include <asm/hw_breakpoint.h>
34 #include <asm/pgtable-hwdef.h>
34 #include <asm/ptrace.h> 35 #include <asm/ptrace.h>
35 #include <asm/types.h> 36 #include <asm/types.h>
36 37
37 #ifdef __KERNEL__ 38 #ifdef __KERNEL__
38 #define STACK_TOP_MAX TASK_SIZE_64 39 #define STACK_TOP_MAX TASK_SIZE_64
39 #ifdef CONFIG_COMPAT 40 #ifdef CONFIG_COMPAT
40 #define AARCH32_VECTORS_BASE 0xffff0000 41 #define AARCH32_VECTORS_BASE 0xffff0000
41 #define STACK_TOP (test_thread_flag(TIF_32BIT) ? \ 42 #define STACK_TOP (test_thread_flag(TIF_32BIT) ? \
42 AARCH32_VECTORS_BASE : STACK_TOP_MAX) 43 AARCH32_VECTORS_BASE : STACK_TOP_MAX)
43 #else 44 #else
44 #define STACK_TOP STACK_TOP_MAX 45 #define STACK_TOP STACK_TOP_MAX
45 #endif /* CONFIG_COMPAT */ 46 #endif /* CONFIG_COMPAT */
46 47
47 #define ARCH_LOW_ADDRESS_LIMIT PHYS_MASK 48 #define ARCH_LOW_ADDRESS_LIMIT PHYS_MASK
48 #endif /* __KERNEL__ */ 49 #endif /* __KERNEL__ */
49 50
50 struct debug_info { 51 struct debug_info {
51 /* Have we suspended stepping by a debugger? */ 52 /* Have we suspended stepping by a debugger? */
52 int suspended_step; 53 int suspended_step;
53 /* Allow breakpoints and watchpoints to be disabled for this thread. */ 54 /* Allow breakpoints and watchpoints to be disabled for this thread. */
54 int bps_disabled; 55 int bps_disabled;
55 int wps_disabled; 56 int wps_disabled;
56 /* Hardware breakpoints pinned to this task. */ 57 /* Hardware breakpoints pinned to this task. */
57 struct perf_event *hbp_break[ARM_MAX_BRP]; 58 struct perf_event *hbp_break[ARM_MAX_BRP];
58 struct perf_event *hbp_watch[ARM_MAX_WRP]; 59 struct perf_event *hbp_watch[ARM_MAX_WRP];
59 }; 60 };
60 61
61 struct cpu_context { 62 struct cpu_context {
62 unsigned long x19; 63 unsigned long x19;
63 unsigned long x20; 64 unsigned long x20;
64 unsigned long x21; 65 unsigned long x21;
65 unsigned long x22; 66 unsigned long x22;
66 unsigned long x23; 67 unsigned long x23;
67 unsigned long x24; 68 unsigned long x24;
68 unsigned long x25; 69 unsigned long x25;
69 unsigned long x26; 70 unsigned long x26;
70 unsigned long x27; 71 unsigned long x27;
71 unsigned long x28; 72 unsigned long x28;
72 unsigned long fp; 73 unsigned long fp;
73 unsigned long sp; 74 unsigned long sp;
74 unsigned long pc; 75 unsigned long pc;
75 }; 76 };
76 77
77 struct thread_struct { 78 struct thread_struct {
78 struct cpu_context cpu_context; /* cpu context */ 79 struct cpu_context cpu_context; /* cpu context */
79 unsigned long tp_value; 80 unsigned long tp_value;
80 struct fpsimd_state fpsimd_state; 81 struct fpsimd_state fpsimd_state;
81 unsigned long fault_address; /* fault info */ 82 unsigned long fault_address; /* fault info */
82 unsigned long fault_code; /* ESR_EL1 value */ 83 unsigned long fault_code; /* ESR_EL1 value */
83 struct debug_info debug; /* debugging */ 84 struct debug_info debug; /* debugging */
84 }; 85 };
85 86
86 #define INIT_THREAD { } 87 #define INIT_THREAD { }
87 88
88 static inline void start_thread_common(struct pt_regs *regs, unsigned long pc) 89 static inline void start_thread_common(struct pt_regs *regs, unsigned long pc)
89 { 90 {
90 memset(regs, 0, sizeof(*regs)); 91 memset(regs, 0, sizeof(*regs));
91 regs->syscallno = ~0UL; 92 regs->syscallno = ~0UL;
92 regs->pc = pc; 93 regs->pc = pc;
93 } 94 }
94 95
95 static inline void start_thread(struct pt_regs *regs, unsigned long pc, 96 static inline void start_thread(struct pt_regs *regs, unsigned long pc,
96 unsigned long sp) 97 unsigned long sp)
97 { 98 {
98 start_thread_common(regs, pc); 99 start_thread_common(regs, pc);
99 regs->pstate = PSR_MODE_EL0t; 100 regs->pstate = PSR_MODE_EL0t;
100 regs->sp = sp; 101 regs->sp = sp;
101 } 102 }
102 103
103 #ifdef CONFIG_COMPAT 104 #ifdef CONFIG_COMPAT
104 static inline void compat_start_thread(struct pt_regs *regs, unsigned long pc, 105 static inline void compat_start_thread(struct pt_regs *regs, unsigned long pc,
105 unsigned long sp) 106 unsigned long sp)
106 { 107 {
107 start_thread_common(regs, pc); 108 start_thread_common(regs, pc);
108 regs->pstate = COMPAT_PSR_MODE_USR; 109 regs->pstate = COMPAT_PSR_MODE_USR;
109 if (pc & 1) 110 if (pc & 1)
110 regs->pstate |= COMPAT_PSR_T_BIT; 111 regs->pstate |= COMPAT_PSR_T_BIT;
111 112
112 #ifdef __AARCH64EB__ 113 #ifdef __AARCH64EB__
113 regs->pstate |= COMPAT_PSR_E_BIT; 114 regs->pstate |= COMPAT_PSR_E_BIT;
114 #endif 115 #endif
115 116
116 regs->compat_sp = sp; 117 regs->compat_sp = sp;
117 } 118 }
118 #endif 119 #endif
119 120
120 /* Forward declaration, a strange C thing */ 121 /* Forward declaration, a strange C thing */
121 struct task_struct; 122 struct task_struct;
122 123
123 /* Free all resources held by a thread. */ 124 /* Free all resources held by a thread. */
124 extern void release_thread(struct task_struct *); 125 extern void release_thread(struct task_struct *);
125
126 /* Prepare to copy thread state - unlazy all lazy status */
127 #define prepare_to_copy(tsk) do { } while (0)
128 126
129 unsigned long get_wchan(struct task_struct *p); 127 unsigned long get_wchan(struct task_struct *p);
130 128
131 #define cpu_relax() barrier() 129 #define cpu_relax() barrier()
132 #define cpu_relax_lowlatency() cpu_relax() 130 #define cpu_relax_lowlatency() cpu_relax()
133 131
134 /* Thread switching */ 132 /* Thread switching */
135 extern struct task_struct *cpu_switch_to(struct task_struct *prev, 133 extern struct task_struct *cpu_switch_to(struct task_struct *prev,
136 struct task_struct *next); 134 struct task_struct *next);
137 135
138 #define task_pt_regs(p) \ 136 #define task_pt_regs(p) \
139 ((struct pt_regs *)(THREAD_START_SP + task_stack_page(p)) - 1) 137 ((struct pt_regs *)(THREAD_START_SP + task_stack_page(p)) - 1)
140 138
141 #define KSTK_EIP(tsk) ((unsigned long)task_pt_regs(tsk)->pc) 139 #define KSTK_EIP(tsk) ((unsigned long)task_pt_regs(tsk)->pc)
142 #define KSTK_ESP(tsk) user_stack_pointer(task_pt_regs(tsk)) 140 #define KSTK_ESP(tsk) user_stack_pointer(task_pt_regs(tsk))
143 141
144 /* 142 /*
145 * Prefetching support 143 * Prefetching support
146 */ 144 */
147 #define ARCH_HAS_PREFETCH 145 #define ARCH_HAS_PREFETCH
148 static inline void prefetch(const void *ptr) 146 static inline void prefetch(const void *ptr)
149 { 147 {
150 asm volatile("prfm pldl1keep, %a0\n" : : "p" (ptr)); 148 asm volatile("prfm pldl1keep, %a0\n" : : "p" (ptr));
151 } 149 }
152 150
153 #define ARCH_HAS_PREFETCHW 151 #define ARCH_HAS_PREFETCHW
154 static inline void prefetchw(const void *ptr) 152 static inline void prefetchw(const void *ptr)
155 { 153 {
156 asm volatile("prfm pstl1keep, %a0\n" : : "p" (ptr)); 154 asm volatile("prfm pstl1keep, %a0\n" : : "p" (ptr));
157 } 155 }
158 156
159 #define ARCH_HAS_SPINLOCK_PREFETCH 157 #define ARCH_HAS_SPINLOCK_PREFETCH
160 static inline void spin_lock_prefetch(const void *x) 158 static inline void spin_lock_prefetch(const void *x)
161 { 159 {
162 prefetchw(x); 160 prefetchw(x);
163 } 161 }
164 162
165 #define HAVE_ARCH_PICK_MMAP_LAYOUT 163 #define HAVE_ARCH_PICK_MMAP_LAYOUT
166 164
167 #endif 165 #endif
168 166
169 #endif /* __ASM_PROCESSOR_H */ 167 #endif /* __ASM_PROCESSOR_H */
arch/arm64/include/asm/unistd.h
1 /* 1 /*
2 * Copyright (C) 2012 ARM Ltd. 2 * Copyright (C) 2012 ARM Ltd.
3 * 3 *
4 * This program is free software; you can redistribute it and/or modify 4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as 5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation. 6 * published by the Free Software Foundation.
7 * 7 *
8 * This program is distributed in the hope that it will be useful, 8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of 9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details. 11 * GNU General Public License for more details.
12 * 12 *
13 * You should have received a copy of the GNU General Public License 13 * You should have received a copy of the GNU General Public License
14 * along with this program. If not, see <http://www.gnu.org/licenses/>. 14 * along with this program. If not, see <http://www.gnu.org/licenses/>.
15 */ 15 */
16 #ifdef CONFIG_COMPAT 16 #ifdef CONFIG_COMPAT
17 #define __ARCH_WANT_COMPAT_SYS_GETDENTS64 17 #define __ARCH_WANT_COMPAT_SYS_GETDENTS64
18 #define __ARCH_WANT_COMPAT_STAT64 18 #define __ARCH_WANT_COMPAT_STAT64
19 #define __ARCH_WANT_SYS_GETHOSTNAME 19 #define __ARCH_WANT_SYS_GETHOSTNAME
20 #define __ARCH_WANT_SYS_PAUSE 20 #define __ARCH_WANT_SYS_PAUSE
21 #define __ARCH_WANT_SYS_GETPGRP 21 #define __ARCH_WANT_SYS_GETPGRP
22 #define __ARCH_WANT_SYS_LLSEEK 22 #define __ARCH_WANT_SYS_LLSEEK
23 #define __ARCH_WANT_SYS_NICE 23 #define __ARCH_WANT_SYS_NICE
24 #define __ARCH_WANT_SYS_SIGPENDING 24 #define __ARCH_WANT_SYS_SIGPENDING
25 #define __ARCH_WANT_SYS_SIGPROCMASK 25 #define __ARCH_WANT_SYS_SIGPROCMASK
26 #define __ARCH_WANT_COMPAT_SYS_SENDFILE 26 #define __ARCH_WANT_COMPAT_SYS_SENDFILE
27 #define __ARCH_WANT_SYS_FORK 27 #define __ARCH_WANT_SYS_FORK
28 #define __ARCH_WANT_SYS_VFORK 28 #define __ARCH_WANT_SYS_VFORK
29 29
30 /* 30 /*
31 * Compat syscall numbers used by the AArch64 kernel. 31 * Compat syscall numbers used by the AArch64 kernel.
32 */ 32 */
33 #define __NR_compat_restart_syscall 0 33 #define __NR_compat_restart_syscall 0
34 #define __NR_compat_exit 1 34 #define __NR_compat_exit 1
35 #define __NR_compat_read 3 35 #define __NR_compat_read 3
36 #define __NR_compat_write 4 36 #define __NR_compat_write 4
37 #define __NR_compat_sigreturn 119 37 #define __NR_compat_sigreturn 119
38 #define __NR_compat_rt_sigreturn 173 38 #define __NR_compat_rt_sigreturn 173
39 39
40 /* 40 /*
41 * The following SVCs are ARM private. 41 * The following SVCs are ARM private.
42 */ 42 */
43 #define __ARM_NR_COMPAT_BASE 0x0f0000 43 #define __ARM_NR_COMPAT_BASE 0x0f0000
44 #define __ARM_NR_compat_cacheflush (__ARM_NR_COMPAT_BASE+2) 44 #define __ARM_NR_compat_cacheflush (__ARM_NR_COMPAT_BASE+2)
45 #define __ARM_NR_compat_set_tls (__ARM_NR_COMPAT_BASE+5) 45 #define __ARM_NR_compat_set_tls (__ARM_NR_COMPAT_BASE+5)
46 46
47 #define __NR_compat_syscalls 386 47 #define __NR_compat_syscalls 387
48 #endif 48 #endif
49 49
50 #define __ARCH_WANT_SYS_CLONE 50 #define __ARCH_WANT_SYS_CLONE
51 #include <uapi/asm/unistd.h> 51 #include <uapi/asm/unistd.h>
52 52
53 #define NR_syscalls (__NR_syscalls) 53 #define NR_syscalls (__NR_syscalls)
54 54
arch/arm64/kernel/cpuinfo.c
1 /* 1 /*
2 * Record and handle CPU attributes. 2 * Record and handle CPU attributes.
3 * 3 *
4 * Copyright (C) 2014 ARM Ltd. 4 * Copyright (C) 2014 ARM Ltd.
5 * This program is free software; you can redistribute it and/or modify 5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as 6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation. 7 * published by the Free Software Foundation.
8 * 8 *
9 * This program is distributed in the hope that it will be useful, 9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details. 12 * GNU General Public License for more details.
13 * 13 *
14 * You should have received a copy of the GNU General Public License 14 * You should have received a copy of the GNU General Public License
15 * along with this program. If not, see <http://www.gnu.org/licenses/>. 15 * along with this program. If not, see <http://www.gnu.org/licenses/>.
16 */ 16 */
17 #include <asm/arch_timer.h> 17 #include <asm/arch_timer.h>
18 #include <asm/cachetype.h> 18 #include <asm/cachetype.h>
19 #include <asm/cpu.h> 19 #include <asm/cpu.h>
20 #include <asm/cputype.h> 20 #include <asm/cputype.h>
21 #include <asm/cpufeature.h> 21 #include <asm/cpufeature.h>
22 22
23 #include <linux/bitops.h> 23 #include <linux/bitops.h>
24 #include <linux/bug.h> 24 #include <linux/bug.h>
25 #include <linux/init.h> 25 #include <linux/init.h>
26 #include <linux/kernel.h> 26 #include <linux/kernel.h>
27 #include <linux/preempt.h> 27 #include <linux/preempt.h>
28 #include <linux/printk.h> 28 #include <linux/printk.h>
29 #include <linux/smp.h> 29 #include <linux/smp.h>
30 30
31 /* 31 /*
32 * In case the boot CPU is hotpluggable, we record its initial state and 32 * In case the boot CPU is hotpluggable, we record its initial state and
33 * current state separately. Certain system registers may contain different 33 * current state separately. Certain system registers may contain different
34 * values depending on configuration at or after reset. 34 * values depending on configuration at or after reset.
35 */ 35 */
36 DEFINE_PER_CPU(struct cpuinfo_arm64, cpu_data); 36 DEFINE_PER_CPU(struct cpuinfo_arm64, cpu_data);
37 static struct cpuinfo_arm64 boot_cpu_data; 37 static struct cpuinfo_arm64 boot_cpu_data;
38 38
39 static char *icache_policy_str[] = { 39 static char *icache_policy_str[] = {
40 [ICACHE_POLICY_RESERVED] = "RESERVED/UNKNOWN", 40 [ICACHE_POLICY_RESERVED] = "RESERVED/UNKNOWN",
41 [ICACHE_POLICY_AIVIVT] = "AIVIVT", 41 [ICACHE_POLICY_AIVIVT] = "AIVIVT",
42 [ICACHE_POLICY_VIPT] = "VIPT", 42 [ICACHE_POLICY_VIPT] = "VIPT",
43 [ICACHE_POLICY_PIPT] = "PIPT", 43 [ICACHE_POLICY_PIPT] = "PIPT",
44 }; 44 };
45 45
46 unsigned long __icache_flags; 46 unsigned long __icache_flags;
47 47
48 static void cpuinfo_detect_icache_policy(struct cpuinfo_arm64 *info) 48 static void cpuinfo_detect_icache_policy(struct cpuinfo_arm64 *info)
49 { 49 {
50 unsigned int cpu = smp_processor_id(); 50 unsigned int cpu = smp_processor_id();
51 u32 l1ip = CTR_L1IP(info->reg_ctr); 51 u32 l1ip = CTR_L1IP(info->reg_ctr);
52 52
53 if (l1ip != ICACHE_POLICY_PIPT) { 53 if (l1ip != ICACHE_POLICY_PIPT) {
54 /* 54 /*
55 * VIPT caches are non-aliasing if the VA always equals the PA 55 * VIPT caches are non-aliasing if the VA always equals the PA
56 * in all bit positions that are covered by the index. This is 56 * in all bit positions that are covered by the index. This is
57 * the case if the size of a way (# of sets * line size) does 57 * the case if the size of a way (# of sets * line size) does
58 * not exceed PAGE_SIZE. 58 * not exceed PAGE_SIZE.
59 */ 59 */
60 u32 waysize = icache_get_numsets() * icache_get_linesize(); 60 u32 waysize = icache_get_numsets() * icache_get_linesize();
61 61
62 if (l1ip != ICACHE_POLICY_VIPT || waysize > PAGE_SIZE) 62 if (l1ip != ICACHE_POLICY_VIPT || waysize > PAGE_SIZE)
63 set_bit(ICACHEF_ALIASING, &__icache_flags); 63 set_bit(ICACHEF_ALIASING, &__icache_flags);
64 } 64 }
65 if (l1ip == ICACHE_POLICY_AIVIVT) 65 if (l1ip == ICACHE_POLICY_AIVIVT)
66 set_bit(ICACHEF_AIVIVT, &__icache_flags); 66 set_bit(ICACHEF_AIVIVT, &__icache_flags);
67 67
68 pr_info("Detected %s I-cache on CPU%d\n", icache_policy_str[l1ip], cpu); 68 pr_info("Detected %s I-cache on CPU%d\n", icache_policy_str[l1ip], cpu);
69 } 69 }
70 70
71 static int check_reg_mask(char *name, u64 mask, u64 boot, u64 cur, int cpu) 71 static int check_reg_mask(char *name, u64 mask, u64 boot, u64 cur, int cpu)
72 { 72 {
73 if ((boot & mask) == (cur & mask)) 73 if ((boot & mask) == (cur & mask))
74 return 0; 74 return 0;
75 75
76 pr_warn("SANITY CHECK: Unexpected variation in %s. Boot CPU: %#016lx, CPU%d: %#016lx\n", 76 pr_warn("SANITY CHECK: Unexpected variation in %s. Boot CPU: %#016lx, CPU%d: %#016lx\n",
77 name, (unsigned long)boot, cpu, (unsigned long)cur); 77 name, (unsigned long)boot, cpu, (unsigned long)cur);
78 78
79 return 1; 79 return 1;
80 } 80 }
81 81
82 #define CHECK_MASK(field, mask, boot, cur, cpu) \ 82 #define CHECK_MASK(field, mask, boot, cur, cpu) \
83 check_reg_mask(#field, mask, (boot)->reg_ ## field, (cur)->reg_ ## field, cpu) 83 check_reg_mask(#field, mask, (boot)->reg_ ## field, (cur)->reg_ ## field, cpu)
84 84
85 #define CHECK(field, boot, cur, cpu) \ 85 #define CHECK(field, boot, cur, cpu) \
86 CHECK_MASK(field, ~0ULL, boot, cur, cpu) 86 CHECK_MASK(field, ~0ULL, boot, cur, cpu)
87 87
88 /* 88 /*
89 * Verify that CPUs don't have unexpected differences that will cause problems. 89 * Verify that CPUs don't have unexpected differences that will cause problems.
90 */ 90 */
91 static void cpuinfo_sanity_check(struct cpuinfo_arm64 *cur) 91 static void cpuinfo_sanity_check(struct cpuinfo_arm64 *cur)
92 { 92 {
93 unsigned int cpu = smp_processor_id(); 93 unsigned int cpu = smp_processor_id();
94 struct cpuinfo_arm64 *boot = &boot_cpu_data; 94 struct cpuinfo_arm64 *boot = &boot_cpu_data;
95 unsigned int diff = 0; 95 unsigned int diff = 0;
96 96
97 /* 97 /*
98 * The kernel can handle differing I-cache policies, but otherwise 98 * The kernel can handle differing I-cache policies, but otherwise
99 * caches should look identical. Userspace JITs will make use of 99 * caches should look identical. Userspace JITs will make use of
100 * *minLine. 100 * *minLine.
101 */ 101 */
102 diff |= CHECK_MASK(ctr, 0xffff3fff, boot, cur, cpu); 102 diff |= CHECK_MASK(ctr, 0xffff3fff, boot, cur, cpu);
103 103
104 /* 104 /*
105 * Userspace may perform DC ZVA instructions. Mismatched block sizes 105 * Userspace may perform DC ZVA instructions. Mismatched block sizes
106 * could result in too much or too little memory being zeroed if a 106 * could result in too much or too little memory being zeroed if a
107 * process is preempted and migrated between CPUs. 107 * process is preempted and migrated between CPUs.
108 */ 108 */
109 diff |= CHECK(dczid, boot, cur, cpu); 109 diff |= CHECK(dczid, boot, cur, cpu);
110 110
111 /* If different, timekeeping will be broken (especially with KVM) */ 111 /* If different, timekeeping will be broken (especially with KVM) */
112 diff |= CHECK(cntfrq, boot, cur, cpu); 112 diff |= CHECK(cntfrq, boot, cur, cpu);
113 113
114 /* 114 /*
115 * The kernel uses self-hosted debug features and expects CPUs to 115 * The kernel uses self-hosted debug features and expects CPUs to
116 * support identical debug features. We presently need CTX_CMPs, WRPs, 116 * support identical debug features. We presently need CTX_CMPs, WRPs,
117 * and BRPs to be identical. 117 * and BRPs to be identical.
118 * ID_AA64DFR1 is currently RES0. 118 * ID_AA64DFR1 is currently RES0.
119 */ 119 */
120 diff |= CHECK(id_aa64dfr0, boot, cur, cpu); 120 diff |= CHECK(id_aa64dfr0, boot, cur, cpu);
121 diff |= CHECK(id_aa64dfr1, boot, cur, cpu); 121 diff |= CHECK(id_aa64dfr1, boot, cur, cpu);
122 122
123 /* 123 /*
124 * Even in big.LITTLE, processors should be identical instruction-set 124 * Even in big.LITTLE, processors should be identical instruction-set
125 * wise. 125 * wise.
126 */ 126 */
127 diff |= CHECK(id_aa64isar0, boot, cur, cpu); 127 diff |= CHECK(id_aa64isar0, boot, cur, cpu);
128 diff |= CHECK(id_aa64isar1, boot, cur, cpu); 128 diff |= CHECK(id_aa64isar1, boot, cur, cpu);
129 129
130 /* 130 /*
131 * Differing PARange support is fine as long as all peripherals and 131 * Differing PARange support is fine as long as all peripherals and
132 * memory are mapped within the minimum PARange of all CPUs. 132 * memory are mapped within the minimum PARange of all CPUs.
133 * Linux should not care about secure memory. 133 * Linux should not care about secure memory.
134 * ID_AA64MMFR1 is currently RES0. 134 * ID_AA64MMFR1 is currently RES0.
135 */ 135 */
136 diff |= CHECK_MASK(id_aa64mmfr0, 0xffffffffffff0ff0, boot, cur, cpu); 136 diff |= CHECK_MASK(id_aa64mmfr0, 0xffffffffffff0ff0, boot, cur, cpu);
137 diff |= CHECK(id_aa64mmfr1, boot, cur, cpu); 137 diff |= CHECK(id_aa64mmfr1, boot, cur, cpu);
138 138
139 /* 139 /*
140 * EL3 is not our concern. 140 * EL3 is not our concern.
141 * ID_AA64PFR1 is currently RES0. 141 * ID_AA64PFR1 is currently RES0.
142 */ 142 */
143 diff |= CHECK_MASK(id_aa64pfr0, 0xffffffffffff0fff, boot, cur, cpu); 143 diff |= CHECK_MASK(id_aa64pfr0, 0xffffffffffff0fff, boot, cur, cpu);
144 diff |= CHECK(id_aa64pfr1, boot, cur, cpu); 144 diff |= CHECK(id_aa64pfr1, boot, cur, cpu);
145 145
146 /* 146 /*
147 * If we have AArch32, we care about 32-bit features for compat. These 147 * If we have AArch32, we care about 32-bit features for compat. These
148 * registers should be RES0 otherwise. 148 * registers should be RES0 otherwise.
149 */ 149 */
150 diff |= CHECK(id_dfr0, boot, cur, cpu);
150 diff |= CHECK(id_isar0, boot, cur, cpu); 151 diff |= CHECK(id_isar0, boot, cur, cpu);
151 diff |= CHECK(id_isar1, boot, cur, cpu); 152 diff |= CHECK(id_isar1, boot, cur, cpu);
152 diff |= CHECK(id_isar2, boot, cur, cpu); 153 diff |= CHECK(id_isar2, boot, cur, cpu);
153 diff |= CHECK(id_isar3, boot, cur, cpu); 154 diff |= CHECK(id_isar3, boot, cur, cpu);
154 diff |= CHECK(id_isar4, boot, cur, cpu); 155 diff |= CHECK(id_isar4, boot, cur, cpu);
155 diff |= CHECK(id_isar5, boot, cur, cpu); 156 diff |= CHECK(id_isar5, boot, cur, cpu);
156 /* 157 /*
157 * Regardless of the value of the AuxReg field, the AIFSR, ADFSR, and 158 * Regardless of the value of the AuxReg field, the AIFSR, ADFSR, and
158 * ACTLR formats could differ across CPUs and therefore would have to 159 * ACTLR formats could differ across CPUs and therefore would have to
159 * be trapped for virtualization anyway. 160 * be trapped for virtualization anyway.
160 */ 161 */
161 diff |= CHECK_MASK(id_mmfr0, 0xff0fffff, boot, cur, cpu); 162 diff |= CHECK_MASK(id_mmfr0, 0xff0fffff, boot, cur, cpu);
162 diff |= CHECK(id_mmfr1, boot, cur, cpu); 163 diff |= CHECK(id_mmfr1, boot, cur, cpu);
163 diff |= CHECK(id_mmfr2, boot, cur, cpu); 164 diff |= CHECK(id_mmfr2, boot, cur, cpu);
164 diff |= CHECK(id_mmfr3, boot, cur, cpu); 165 diff |= CHECK(id_mmfr3, boot, cur, cpu);
165 diff |= CHECK(id_pfr0, boot, cur, cpu); 166 diff |= CHECK(id_pfr0, boot, cur, cpu);
166 diff |= CHECK(id_pfr1, boot, cur, cpu); 167 diff |= CHECK(id_pfr1, boot, cur, cpu);
167 168
169 diff |= CHECK(mvfr0, boot, cur, cpu);
170 diff |= CHECK(mvfr1, boot, cur, cpu);
171 diff |= CHECK(mvfr2, boot, cur, cpu);
172
168 /* 173 /*
169 * Mismatched CPU features are a recipe for disaster. Don't even 174 * Mismatched CPU features are a recipe for disaster. Don't even
170 * pretend to support them. 175 * pretend to support them.
171 */ 176 */
172 WARN_TAINT_ONCE(diff, TAINT_CPU_OUT_OF_SPEC, 177 WARN_TAINT_ONCE(diff, TAINT_CPU_OUT_OF_SPEC,
173 "Unsupported CPU feature variation.\n"); 178 "Unsupported CPU feature variation.\n");
174 } 179 }
175 180
176 static void __cpuinfo_store_cpu(struct cpuinfo_arm64 *info) 181 static void __cpuinfo_store_cpu(struct cpuinfo_arm64 *info)
177 { 182 {
178 info->reg_cntfrq = arch_timer_get_cntfrq(); 183 info->reg_cntfrq = arch_timer_get_cntfrq();
179 info->reg_ctr = read_cpuid_cachetype(); 184 info->reg_ctr = read_cpuid_cachetype();
180 info->reg_dczid = read_cpuid(DCZID_EL0); 185 info->reg_dczid = read_cpuid(DCZID_EL0);
181 info->reg_midr = read_cpuid_id(); 186 info->reg_midr = read_cpuid_id();
182 187
183 info->reg_id_aa64dfr0 = read_cpuid(ID_AA64DFR0_EL1); 188 info->reg_id_aa64dfr0 = read_cpuid(ID_AA64DFR0_EL1);
184 info->reg_id_aa64dfr1 = read_cpuid(ID_AA64DFR1_EL1); 189 info->reg_id_aa64dfr1 = read_cpuid(ID_AA64DFR1_EL1);
185 info->reg_id_aa64isar0 = read_cpuid(ID_AA64ISAR0_EL1); 190 info->reg_id_aa64isar0 = read_cpuid(ID_AA64ISAR0_EL1);
186 info->reg_id_aa64isar1 = read_cpuid(ID_AA64ISAR1_EL1); 191 info->reg_id_aa64isar1 = read_cpuid(ID_AA64ISAR1_EL1);
187 info->reg_id_aa64mmfr0 = read_cpuid(ID_AA64MMFR0_EL1); 192 info->reg_id_aa64mmfr0 = read_cpuid(ID_AA64MMFR0_EL1);
188 info->reg_id_aa64mmfr1 = read_cpuid(ID_AA64MMFR1_EL1); 193 info->reg_id_aa64mmfr1 = read_cpuid(ID_AA64MMFR1_EL1);
189 info->reg_id_aa64pfr0 = read_cpuid(ID_AA64PFR0_EL1); 194 info->reg_id_aa64pfr0 = read_cpuid(ID_AA64PFR0_EL1);
190 info->reg_id_aa64pfr1 = read_cpuid(ID_AA64PFR1_EL1); 195 info->reg_id_aa64pfr1 = read_cpuid(ID_AA64PFR1_EL1);
191 196
197 info->reg_id_dfr0 = read_cpuid(ID_DFR0_EL1);
192 info->reg_id_isar0 = read_cpuid(ID_ISAR0_EL1); 198 info->reg_id_isar0 = read_cpuid(ID_ISAR0_EL1);
193 info->reg_id_isar1 = read_cpuid(ID_ISAR1_EL1); 199 info->reg_id_isar1 = read_cpuid(ID_ISAR1_EL1);
194 info->reg_id_isar2 = read_cpuid(ID_ISAR2_EL1); 200 info->reg_id_isar2 = read_cpuid(ID_ISAR2_EL1);
195 info->reg_id_isar3 = read_cpuid(ID_ISAR3_EL1); 201 info->reg_id_isar3 = read_cpuid(ID_ISAR3_EL1);
196 info->reg_id_isar4 = read_cpuid(ID_ISAR4_EL1); 202 info->reg_id_isar4 = read_cpuid(ID_ISAR4_EL1);
197 info->reg_id_isar5 = read_cpuid(ID_ISAR5_EL1); 203 info->reg_id_isar5 = read_cpuid(ID_ISAR5_EL1);
198 info->reg_id_mmfr0 = read_cpuid(ID_MMFR0_EL1); 204 info->reg_id_mmfr0 = read_cpuid(ID_MMFR0_EL1);
199 info->reg_id_mmfr1 = read_cpuid(ID_MMFR1_EL1); 205 info->reg_id_mmfr1 = read_cpuid(ID_MMFR1_EL1);
200 info->reg_id_mmfr2 = read_cpuid(ID_MMFR2_EL1); 206 info->reg_id_mmfr2 = read_cpuid(ID_MMFR2_EL1);
201 info->reg_id_mmfr3 = read_cpuid(ID_MMFR3_EL1); 207 info->reg_id_mmfr3 = read_cpuid(ID_MMFR3_EL1);
202 info->reg_id_pfr0 = read_cpuid(ID_PFR0_EL1); 208 info->reg_id_pfr0 = read_cpuid(ID_PFR0_EL1);
203 info->reg_id_pfr1 = read_cpuid(ID_PFR1_EL1); 209 info->reg_id_pfr1 = read_cpuid(ID_PFR1_EL1);
210
211 info->reg_mvfr0 = read_cpuid(MVFR0_EL1);
212 info->reg_mvfr1 = read_cpuid(MVFR1_EL1);
213 info->reg_mvfr2 = read_cpuid(MVFR2_EL1);
204 214
205 cpuinfo_detect_icache_policy(info); 215 cpuinfo_detect_icache_policy(info);
206 216
207 check_local_cpu_errata(); 217 check_local_cpu_errata();
208 } 218 }
209 219
210 void cpuinfo_store_cpu(void) 220 void cpuinfo_store_cpu(void)
211 { 221 {
212 struct cpuinfo_arm64 *info = this_cpu_ptr(&cpu_data); 222 struct cpuinfo_arm64 *info = this_cpu_ptr(&cpu_data);
213 __cpuinfo_store_cpu(info); 223 __cpuinfo_store_cpu(info);
214 cpuinfo_sanity_check(info); 224 cpuinfo_sanity_check(info);
215 } 225 }
216 226
217 void __init cpuinfo_store_boot_cpu(void) 227 void __init cpuinfo_store_boot_cpu(void)
218 { 228 {
219 struct cpuinfo_arm64 *info = &per_cpu(cpu_data, 0); 229 struct cpuinfo_arm64 *info = &per_cpu(cpu_data, 0);
220 __cpuinfo_store_cpu(info); 230 __cpuinfo_store_cpu(info);
221 231
222 boot_cpu_data = *info; 232 boot_cpu_data = *info;
223 } 233 }
224 234
225 u64 __attribute_const__ icache_get_ccsidr(void) 235 u64 __attribute_const__ icache_get_ccsidr(void)
226 { 236 {
227 u64 ccsidr; 237 u64 ccsidr;
228 238
229 WARN_ON(preemptible()); 239 WARN_ON(preemptible());
230 240
231 /* Select L1 I-cache and read its size ID register */ 241 /* Select L1 I-cache and read its size ID register */
232 asm("msr csselr_el1, %1; isb; mrs %0, ccsidr_el1" 242 asm("msr csselr_el1, %1; isb; mrs %0, ccsidr_el1"
233 : "=r"(ccsidr) : "r"(1L)); 243 : "=r"(ccsidr) : "r"(1L));
234 return ccsidr; 244 return ccsidr;
235 } 245 }
236 246
arch/arm64/kernel/efi.c
1 /* 1 /*
2 * Extensible Firmware Interface 2 * Extensible Firmware Interface
3 * 3 *
4 * Based on Extensible Firmware Interface Specification version 2.4 4 * Based on Extensible Firmware Interface Specification version 2.4
5 * 5 *
6 * Copyright (C) 2013, 2014 Linaro Ltd. 6 * Copyright (C) 2013, 2014 Linaro Ltd.
7 * 7 *
8 * This program is free software; you can redistribute it and/or modify 8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as 9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation. 10 * published by the Free Software Foundation.
11 * 11 *
12 */ 12 */
13 13
14 #include <linux/dmi.h> 14 #include <linux/dmi.h>
15 #include <linux/efi.h> 15 #include <linux/efi.h>
16 #include <linux/export.h> 16 #include <linux/export.h>
17 #include <linux/memblock.h> 17 #include <linux/memblock.h>
18 #include <linux/bootmem.h> 18 #include <linux/bootmem.h>
19 #include <linux/of.h> 19 #include <linux/of.h>
20 #include <linux/of_fdt.h> 20 #include <linux/of_fdt.h>
21 #include <linux/sched.h> 21 #include <linux/sched.h>
22 #include <linux/slab.h> 22 #include <linux/slab.h>
23 23
24 #include <asm/cacheflush.h> 24 #include <asm/cacheflush.h>
25 #include <asm/efi.h> 25 #include <asm/efi.h>
26 #include <asm/tlbflush.h> 26 #include <asm/tlbflush.h>
27 #include <asm/mmu_context.h> 27 #include <asm/mmu_context.h>
28 28
29 struct efi_memory_map memmap; 29 struct efi_memory_map memmap;
30 30
31 static efi_runtime_services_t *runtime; 31 static efi_runtime_services_t *runtime;
32 32
33 static u64 efi_system_table; 33 static u64 efi_system_table;
34 34
35 static int uefi_debug __initdata; 35 static int uefi_debug __initdata;
36 static int __init uefi_debug_setup(char *str) 36 static int __init uefi_debug_setup(char *str)
37 { 37 {
38 uefi_debug = 1; 38 uefi_debug = 1;
39 39
40 return 0; 40 return 0;
41 } 41 }
42 early_param("uefi_debug", uefi_debug_setup); 42 early_param("uefi_debug", uefi_debug_setup);
43 43
44 static int __init is_normal_ram(efi_memory_desc_t *md) 44 static int __init is_normal_ram(efi_memory_desc_t *md)
45 { 45 {
46 if (md->attribute & EFI_MEMORY_WB) 46 if (md->attribute & EFI_MEMORY_WB)
47 return 1; 47 return 1;
48 return 0; 48 return 0;
49 } 49 }
50 50
51 static void __init efi_setup_idmap(void) 51 static void __init efi_setup_idmap(void)
52 { 52 {
53 struct memblock_region *r; 53 struct memblock_region *r;
54 efi_memory_desc_t *md; 54 efi_memory_desc_t *md;
55 u64 paddr, npages, size; 55 u64 paddr, npages, size;
56 56
57 for_each_memblock(memory, r) 57 for_each_memblock(memory, r)
58 create_id_mapping(r->base, r->size, 0); 58 create_id_mapping(r->base, r->size, 0);
59 59
60 /* map runtime io spaces */ 60 /* map runtime io spaces */
61 for_each_efi_memory_desc(&memmap, md) { 61 for_each_efi_memory_desc(&memmap, md) {
62 if (!(md->attribute & EFI_MEMORY_RUNTIME) || is_normal_ram(md)) 62 if (!(md->attribute & EFI_MEMORY_RUNTIME) || is_normal_ram(md))
63 continue; 63 continue;
64 paddr = md->phys_addr; 64 paddr = md->phys_addr;
65 npages = md->num_pages; 65 npages = md->num_pages;
66 memrange_efi_to_native(&paddr, &npages); 66 memrange_efi_to_native(&paddr, &npages);
67 size = npages << PAGE_SHIFT; 67 size = npages << PAGE_SHIFT;
68 create_id_mapping(paddr, size, 1); 68 create_id_mapping(paddr, size, 1);
69 } 69 }
70 } 70 }
71 71
72 static int __init uefi_init(void) 72 static int __init uefi_init(void)
73 { 73 {
74 efi_char16_t *c16; 74 efi_char16_t *c16;
75 char vendor[100] = "unknown"; 75 char vendor[100] = "unknown";
76 int i, retval; 76 int i, retval;
77 77
78 efi.systab = early_memremap(efi_system_table, 78 efi.systab = early_memremap(efi_system_table,
79 sizeof(efi_system_table_t)); 79 sizeof(efi_system_table_t));
80 if (efi.systab == NULL) { 80 if (efi.systab == NULL) {
81 pr_warn("Unable to map EFI system table.\n"); 81 pr_warn("Unable to map EFI system table.\n");
82 return -ENOMEM; 82 return -ENOMEM;
83 } 83 }
84 84
85 set_bit(EFI_BOOT, &efi.flags); 85 set_bit(EFI_BOOT, &efi.flags);
86 set_bit(EFI_64BIT, &efi.flags); 86 set_bit(EFI_64BIT, &efi.flags);
87 87
88 /* 88 /*
89 * Verify the EFI Table 89 * Verify the EFI Table
90 */ 90 */
91 if (efi.systab->hdr.signature != EFI_SYSTEM_TABLE_SIGNATURE) { 91 if (efi.systab->hdr.signature != EFI_SYSTEM_TABLE_SIGNATURE) {
92 pr_err("System table signature incorrect\n"); 92 pr_err("System table signature incorrect\n");
93 retval = -EINVAL; 93 retval = -EINVAL;
94 goto out; 94 goto out;
95 } 95 }
96 if ((efi.systab->hdr.revision >> 16) < 2) 96 if ((efi.systab->hdr.revision >> 16) < 2)
97 pr_warn("Warning: EFI system table version %d.%02d, expected 2.00 or greater\n", 97 pr_warn("Warning: EFI system table version %d.%02d, expected 2.00 or greater\n",
98 efi.systab->hdr.revision >> 16, 98 efi.systab->hdr.revision >> 16,
99 efi.systab->hdr.revision & 0xffff); 99 efi.systab->hdr.revision & 0xffff);
100 100
101 /* Show what we know for posterity */ 101 /* Show what we know for posterity */
102 c16 = early_memremap(efi.systab->fw_vendor, 102 c16 = early_memremap(efi.systab->fw_vendor,
103 sizeof(vendor)); 103 sizeof(vendor));
104 if (c16) { 104 if (c16) {
105 for (i = 0; i < (int) sizeof(vendor) - 1 && *c16; ++i) 105 for (i = 0; i < (int) sizeof(vendor) - 1 && *c16; ++i)
106 vendor[i] = c16[i]; 106 vendor[i] = c16[i];
107 vendor[i] = '\0'; 107 vendor[i] = '\0';
108 early_memunmap(c16, sizeof(vendor)); 108 early_memunmap(c16, sizeof(vendor));
109 } 109 }
110 110
111 pr_info("EFI v%u.%.02u by %s\n", 111 pr_info("EFI v%u.%.02u by %s\n",
112 efi.systab->hdr.revision >> 16, 112 efi.systab->hdr.revision >> 16,
113 efi.systab->hdr.revision & 0xffff, vendor); 113 efi.systab->hdr.revision & 0xffff, vendor);
114 114
115 retval = efi_config_init(NULL); 115 retval = efi_config_init(NULL);
116 116
117 out: 117 out:
118 early_memunmap(efi.systab, sizeof(efi_system_table_t)); 118 early_memunmap(efi.systab, sizeof(efi_system_table_t));
119 return retval; 119 return retval;
120 } 120 }
121 121
122 /* 122 /*
123 * Return true for RAM regions we want to permanently reserve. 123 * Return true for RAM regions we want to permanently reserve.
124 */ 124 */
125 static __init int is_reserve_region(efi_memory_desc_t *md) 125 static __init int is_reserve_region(efi_memory_desc_t *md)
126 { 126 {
127 switch (md->type) { 127 switch (md->type) {
128 case EFI_LOADER_CODE: 128 case EFI_LOADER_CODE:
129 case EFI_LOADER_DATA: 129 case EFI_LOADER_DATA:
130 case EFI_BOOT_SERVICES_CODE: 130 case EFI_BOOT_SERVICES_CODE:
131 case EFI_BOOT_SERVICES_DATA: 131 case EFI_BOOT_SERVICES_DATA:
132 case EFI_CONVENTIONAL_MEMORY: 132 case EFI_CONVENTIONAL_MEMORY:
133 return 0; 133 return 0;
134 default: 134 default:
135 break; 135 break;
136 } 136 }
137 return is_normal_ram(md); 137 return is_normal_ram(md);
138 } 138 }
139 139
140 static __init void reserve_regions(void) 140 static __init void reserve_regions(void)
141 { 141 {
142 efi_memory_desc_t *md; 142 efi_memory_desc_t *md;
143 u64 paddr, npages, size; 143 u64 paddr, npages, size;
144 144
145 if (uefi_debug) 145 if (uefi_debug)
146 pr_info("Processing EFI memory map:\n"); 146 pr_info("Processing EFI memory map:\n");
147 147
148 for_each_efi_memory_desc(&memmap, md) { 148 for_each_efi_memory_desc(&memmap, md) {
149 paddr = md->phys_addr; 149 paddr = md->phys_addr;
150 npages = md->num_pages; 150 npages = md->num_pages;
151 151
152 if (uefi_debug) { 152 if (uefi_debug) {
153 char buf[64]; 153 char buf[64];
154 154
155 pr_info(" 0x%012llx-0x%012llx %s", 155 pr_info(" 0x%012llx-0x%012llx %s",
156 paddr, paddr + (npages << EFI_PAGE_SHIFT) - 1, 156 paddr, paddr + (npages << EFI_PAGE_SHIFT) - 1,
157 efi_md_typeattr_format(buf, sizeof(buf), md)); 157 efi_md_typeattr_format(buf, sizeof(buf), md));
158 } 158 }
159 159
160 memrange_efi_to_native(&paddr, &npages); 160 memrange_efi_to_native(&paddr, &npages);
161 size = npages << PAGE_SHIFT; 161 size = npages << PAGE_SHIFT;
162 162
163 if (is_normal_ram(md)) 163 if (is_normal_ram(md))
164 early_init_dt_add_memory_arch(paddr, size); 164 early_init_dt_add_memory_arch(paddr, size);
165 165
166 if (is_reserve_region(md) || 166 if (is_reserve_region(md) ||
167 md->type == EFI_BOOT_SERVICES_CODE || 167 md->type == EFI_BOOT_SERVICES_CODE ||
168 md->type == EFI_BOOT_SERVICES_DATA) { 168 md->type == EFI_BOOT_SERVICES_DATA) {
169 memblock_reserve(paddr, size); 169 memblock_reserve(paddr, size);
170 if (uefi_debug) 170 if (uefi_debug)
171 pr_cont("*"); 171 pr_cont("*");
172 } 172 }
173 173
174 if (uefi_debug) 174 if (uefi_debug)
175 pr_cont("\n"); 175 pr_cont("\n");
176 } 176 }
177 177
178 set_bit(EFI_MEMMAP, &efi.flags); 178 set_bit(EFI_MEMMAP, &efi.flags);
179 } 179 }
180 180
181 181
182 static u64 __init free_one_region(u64 start, u64 end) 182 static u64 __init free_one_region(u64 start, u64 end)
183 { 183 {
184 u64 size = end - start; 184 u64 size = end - start;
185 185
186 if (uefi_debug) 186 if (uefi_debug)
187 pr_info(" EFI freeing: 0x%012llx-0x%012llx\n", start, end - 1); 187 pr_info(" EFI freeing: 0x%012llx-0x%012llx\n", start, end - 1);
188 188
189 free_bootmem_late(start, size); 189 free_bootmem_late(start, size);
190 return size; 190 return size;
191 } 191 }
192 192
193 static u64 __init free_region(u64 start, u64 end) 193 static u64 __init free_region(u64 start, u64 end)
194 { 194 {
195 u64 map_start, map_end, total = 0; 195 u64 map_start, map_end, total = 0;
196 196
197 if (end <= start) 197 if (end <= start)
198 return total; 198 return total;
199 199
200 map_start = (u64)memmap.phys_map; 200 map_start = (u64)memmap.phys_map;
201 map_end = PAGE_ALIGN(map_start + (memmap.map_end - memmap.map)); 201 map_end = PAGE_ALIGN(map_start + (memmap.map_end - memmap.map));
202 map_start &= PAGE_MASK; 202 map_start &= PAGE_MASK;
203 203
204 if (start < map_end && end > map_start) { 204 if (start < map_end && end > map_start) {
205 /* region overlaps UEFI memmap */ 205 /* region overlaps UEFI memmap */
206 if (start < map_start) 206 if (start < map_start)
207 total += free_one_region(start, map_start); 207 total += free_one_region(start, map_start);
208 208
209 if (map_end < end) 209 if (map_end < end)
210 total += free_one_region(map_end, end); 210 total += free_one_region(map_end, end);
211 } else 211 } else
212 total += free_one_region(start, end); 212 total += free_one_region(start, end);
213 213
214 return total; 214 return total;
215 } 215 }
216 216
217 static void __init free_boot_services(void) 217 static void __init free_boot_services(void)
218 { 218 {
219 u64 total_freed = 0; 219 u64 total_freed = 0;
220 u64 keep_end, free_start, free_end; 220 u64 keep_end, free_start, free_end;
221 efi_memory_desc_t *md; 221 efi_memory_desc_t *md;
222 222
223 /* 223 /*
224 * If kernel uses larger pages than UEFI, we have to be careful 224 * If kernel uses larger pages than UEFI, we have to be careful
225 * not to inadvertantly free memory we want to keep if there is 225 * not to inadvertantly free memory we want to keep if there is
226 * overlap at the kernel page size alignment. We do not want to 226 * overlap at the kernel page size alignment. We do not want to
227 * free is_reserve_region() memory nor the UEFI memmap itself. 227 * free is_reserve_region() memory nor the UEFI memmap itself.
228 * 228 *
229 * The memory map is sorted, so we keep track of the end of 229 * The memory map is sorted, so we keep track of the end of
230 * any previous region we want to keep, remember any region 230 * any previous region we want to keep, remember any region
231 * we want to free and defer freeing it until we encounter 231 * we want to free and defer freeing it until we encounter
232 * the next region we want to keep. This way, before freeing 232 * the next region we want to keep. This way, before freeing
233 * it, we can clip it as needed to avoid freeing memory we 233 * it, we can clip it as needed to avoid freeing memory we
234 * want to keep for UEFI. 234 * want to keep for UEFI.
235 */ 235 */
236 236
237 keep_end = 0; 237 keep_end = 0;
238 free_start = 0; 238 free_start = 0;
239 239
240 for_each_efi_memory_desc(&memmap, md) { 240 for_each_efi_memory_desc(&memmap, md) {
241 u64 paddr, npages, size; 241 u64 paddr, npages, size;
242 242
243 if (is_reserve_region(md)) { 243 if (is_reserve_region(md)) {
244 /* 244 /*
245 * We don't want to free any memory from this region. 245 * We don't want to free any memory from this region.
246 */ 246 */
247 if (free_start) { 247 if (free_start) {
248 /* adjust free_end then free region */ 248 /* adjust free_end then free region */
249 if (free_end > md->phys_addr) 249 if (free_end > md->phys_addr)
250 free_end -= PAGE_SIZE; 250 free_end -= PAGE_SIZE;
251 total_freed += free_region(free_start, free_end); 251 total_freed += free_region(free_start, free_end);
252 free_start = 0; 252 free_start = 0;
253 } 253 }
254 keep_end = md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT); 254 keep_end = md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT);
255 continue; 255 continue;
256 } 256 }
257 257
258 if (md->type != EFI_BOOT_SERVICES_CODE && 258 if (md->type != EFI_BOOT_SERVICES_CODE &&
259 md->type != EFI_BOOT_SERVICES_DATA) { 259 md->type != EFI_BOOT_SERVICES_DATA) {
260 /* no need to free this region */ 260 /* no need to free this region */
261 continue; 261 continue;
262 } 262 }
263 263
264 /* 264 /*
265 * We want to free memory from this region. 265 * We want to free memory from this region.
266 */ 266 */
267 paddr = md->phys_addr; 267 paddr = md->phys_addr;
268 npages = md->num_pages; 268 npages = md->num_pages;
269 memrange_efi_to_native(&paddr, &npages); 269 memrange_efi_to_native(&paddr, &npages);
270 size = npages << PAGE_SHIFT; 270 size = npages << PAGE_SHIFT;
271 271
272 if (free_start) { 272 if (free_start) {
273 if (paddr <= free_end) 273 if (paddr <= free_end)
274 free_end = paddr + size; 274 free_end = paddr + size;
275 else { 275 else {
276 total_freed += free_region(free_start, free_end); 276 total_freed += free_region(free_start, free_end);
277 free_start = paddr; 277 free_start = paddr;
278 free_end = paddr + size; 278 free_end = paddr + size;
279 } 279 }
280 } else { 280 } else {
281 free_start = paddr; 281 free_start = paddr;
282 free_end = paddr + size; 282 free_end = paddr + size;
283 } 283 }
284 if (free_start < keep_end) { 284 if (free_start < keep_end) {
285 free_start += PAGE_SIZE; 285 free_start += PAGE_SIZE;
286 if (free_start >= free_end) 286 if (free_start >= free_end)
287 free_start = 0; 287 free_start = 0;
288 } 288 }
289 } 289 }
290 if (free_start) 290 if (free_start)
291 total_freed += free_region(free_start, free_end); 291 total_freed += free_region(free_start, free_end);
292 292
293 if (total_freed) 293 if (total_freed)
294 pr_info("Freed 0x%llx bytes of EFI boot services memory", 294 pr_info("Freed 0x%llx bytes of EFI boot services memory",
295 total_freed); 295 total_freed);
296 } 296 }
297 297
298 void __init efi_init(void) 298 void __init efi_init(void)
299 { 299 {
300 struct efi_fdt_params params; 300 struct efi_fdt_params params;
301 301
302 /* Grab UEFI information placed in FDT by stub */ 302 /* Grab UEFI information placed in FDT by stub */
303 if (!efi_get_fdt_params(&params, uefi_debug)) 303 if (!efi_get_fdt_params(&params, uefi_debug))
304 return; 304 return;
305 305
306 efi_system_table = params.system_table; 306 efi_system_table = params.system_table;
307 307
308 memblock_reserve(params.mmap & PAGE_MASK, 308 memblock_reserve(params.mmap & PAGE_MASK,
309 PAGE_ALIGN(params.mmap_size + (params.mmap & ~PAGE_MASK))); 309 PAGE_ALIGN(params.mmap_size + (params.mmap & ~PAGE_MASK)));
310 memmap.phys_map = (void *)params.mmap; 310 memmap.phys_map = (void *)params.mmap;
311 memmap.map = early_memremap(params.mmap, params.mmap_size); 311 memmap.map = early_memremap(params.mmap, params.mmap_size);
312 memmap.map_end = memmap.map + params.mmap_size; 312 memmap.map_end = memmap.map + params.mmap_size;
313 memmap.desc_size = params.desc_size; 313 memmap.desc_size = params.desc_size;
314 memmap.desc_version = params.desc_ver; 314 memmap.desc_version = params.desc_ver;
315 315
316 if (uefi_init() < 0) 316 if (uefi_init() < 0)
317 return; 317 return;
318 318
319 reserve_regions(); 319 reserve_regions();
320 } 320 }
321 321
322 void __init efi_idmap_init(void) 322 void __init efi_idmap_init(void)
323 { 323 {
324 if (!efi_enabled(EFI_BOOT)) 324 if (!efi_enabled(EFI_BOOT))
325 return; 325 return;
326 326
327 /* boot time idmap_pg_dir is incomplete, so fill in missing parts */ 327 /* boot time idmap_pg_dir is incomplete, so fill in missing parts */
328 efi_setup_idmap(); 328 efi_setup_idmap();
329 early_memunmap(memmap.map, memmap.map_end - memmap.map);
329 } 330 }
330 331
331 static int __init remap_region(efi_memory_desc_t *md, void **new) 332 static int __init remap_region(efi_memory_desc_t *md, void **new)
332 { 333 {
333 u64 paddr, vaddr, npages, size; 334 u64 paddr, vaddr, npages, size;
334 335
335 paddr = md->phys_addr; 336 paddr = md->phys_addr;
336 npages = md->num_pages; 337 npages = md->num_pages;
337 memrange_efi_to_native(&paddr, &npages); 338 memrange_efi_to_native(&paddr, &npages);
338 size = npages << PAGE_SHIFT; 339 size = npages << PAGE_SHIFT;
339 340
340 if (is_normal_ram(md)) 341 if (is_normal_ram(md))
341 vaddr = (__force u64)ioremap_cache(paddr, size); 342 vaddr = (__force u64)ioremap_cache(paddr, size);
342 else 343 else
343 vaddr = (__force u64)ioremap(paddr, size); 344 vaddr = (__force u64)ioremap(paddr, size);
344 345
345 if (!vaddr) { 346 if (!vaddr) {
346 pr_err("Unable to remap 0x%llx pages @ %p\n", 347 pr_err("Unable to remap 0x%llx pages @ %p\n",
347 npages, (void *)paddr); 348 npages, (void *)paddr);
348 return 0; 349 return 0;
349 } 350 }
350 351
351 /* adjust for any rounding when EFI and system pagesize differs */ 352 /* adjust for any rounding when EFI and system pagesize differs */
352 md->virt_addr = vaddr + (md->phys_addr - paddr); 353 md->virt_addr = vaddr + (md->phys_addr - paddr);
353 354
354 if (uefi_debug) 355 if (uefi_debug)
355 pr_info(" EFI remap 0x%012llx => %p\n", 356 pr_info(" EFI remap 0x%012llx => %p\n",
356 md->phys_addr, (void *)md->virt_addr); 357 md->phys_addr, (void *)md->virt_addr);
357 358
358 memcpy(*new, md, memmap.desc_size); 359 memcpy(*new, md, memmap.desc_size);
359 *new += memmap.desc_size; 360 *new += memmap.desc_size;
360 361
361 return 1; 362 return 1;
362 } 363 }
363 364
364 /* 365 /*
365 * Switch UEFI from an identity map to a kernel virtual map 366 * Switch UEFI from an identity map to a kernel virtual map
366 */ 367 */
367 static int __init arm64_enter_virtual_mode(void) 368 static int __init arm64_enter_virtual_mode(void)
368 { 369 {
369 efi_memory_desc_t *md; 370 efi_memory_desc_t *md;
370 phys_addr_t virtmap_phys; 371 phys_addr_t virtmap_phys;
371 void *virtmap, *virt_md; 372 void *virtmap, *virt_md;
372 efi_status_t status; 373 efi_status_t status;
373 u64 mapsize; 374 u64 mapsize;
374 int count = 0; 375 int count = 0;
375 unsigned long flags; 376 unsigned long flags;
376 377
377 if (!efi_enabled(EFI_BOOT)) { 378 if (!efi_enabled(EFI_BOOT)) {
378 pr_info("EFI services will not be available.\n"); 379 pr_info("EFI services will not be available.\n");
379 return -1; 380 return -1;
380 } 381 }
381 382
382 mapsize = memmap.map_end - memmap.map; 383 mapsize = memmap.map_end - memmap.map;
383 early_memunmap(memmap.map, mapsize);
384 384
385 if (efi_runtime_disabled()) { 385 if (efi_runtime_disabled()) {
386 pr_info("EFI runtime services will be disabled.\n"); 386 pr_info("EFI runtime services will be disabled.\n");
387 return -1; 387 return -1;
388 } 388 }
389 389
390 pr_info("Remapping and enabling EFI services.\n"); 390 pr_info("Remapping and enabling EFI services.\n");
391 /* replace early memmap mapping with permanent mapping */ 391 /* replace early memmap mapping with permanent mapping */
392 memmap.map = (__force void *)ioremap_cache((phys_addr_t)memmap.phys_map, 392 memmap.map = (__force void *)ioremap_cache((phys_addr_t)memmap.phys_map,
393 mapsize); 393 mapsize);
394 memmap.map_end = memmap.map + mapsize; 394 memmap.map_end = memmap.map + mapsize;
395 395
396 efi.memmap = &memmap; 396 efi.memmap = &memmap;
397 397
398 /* Map the runtime regions */ 398 /* Map the runtime regions */
399 virtmap = kmalloc(mapsize, GFP_KERNEL); 399 virtmap = kmalloc(mapsize, GFP_KERNEL);
400 if (!virtmap) { 400 if (!virtmap) {
401 pr_err("Failed to allocate EFI virtual memmap\n"); 401 pr_err("Failed to allocate EFI virtual memmap\n");
402 return -1; 402 return -1;
403 } 403 }
404 virtmap_phys = virt_to_phys(virtmap); 404 virtmap_phys = virt_to_phys(virtmap);
405 virt_md = virtmap; 405 virt_md = virtmap;
406 406
407 for_each_efi_memory_desc(&memmap, md) { 407 for_each_efi_memory_desc(&memmap, md) {
408 if (!(md->attribute & EFI_MEMORY_RUNTIME)) 408 if (!(md->attribute & EFI_MEMORY_RUNTIME))
409 continue; 409 continue;
410 if (!remap_region(md, &virt_md)) 410 if (!remap_region(md, &virt_md))
411 goto err_unmap; 411 goto err_unmap;
412 ++count; 412 ++count;
413 } 413 }
414 414
415 efi.systab = (__force void *)efi_lookup_mapped_addr(efi_system_table); 415 efi.systab = (__force void *)efi_lookup_mapped_addr(efi_system_table);
416 if (!efi.systab) { 416 if (!efi.systab) {
417 /* 417 /*
418 * If we have no virtual mapping for the System Table at this 418 * If we have no virtual mapping for the System Table at this
419 * point, the memory map doesn't cover the physical offset where 419 * point, the memory map doesn't cover the physical offset where
420 * it resides. This means the System Table will be inaccessible 420 * it resides. This means the System Table will be inaccessible
421 * to Runtime Services themselves once the virtual mapping is 421 * to Runtime Services themselves once the virtual mapping is
422 * installed. 422 * installed.
423 */ 423 */
424 pr_err("Failed to remap EFI System Table -- buggy firmware?\n"); 424 pr_err("Failed to remap EFI System Table -- buggy firmware?\n");
425 goto err_unmap; 425 goto err_unmap;
426 } 426 }
427 set_bit(EFI_SYSTEM_TABLES, &efi.flags); 427 set_bit(EFI_SYSTEM_TABLES, &efi.flags);
428 428
429 local_irq_save(flags); 429 local_irq_save(flags);
430 cpu_switch_mm(idmap_pg_dir, &init_mm); 430 cpu_switch_mm(idmap_pg_dir, &init_mm);
431 431
432 /* Call SetVirtualAddressMap with the physical address of the map */ 432 /* Call SetVirtualAddressMap with the physical address of the map */
433 runtime = efi.systab->runtime; 433 runtime = efi.systab->runtime;
434 efi.set_virtual_address_map = runtime->set_virtual_address_map; 434 efi.set_virtual_address_map = runtime->set_virtual_address_map;
435 435
436 status = efi.set_virtual_address_map(count * memmap.desc_size, 436 status = efi.set_virtual_address_map(count * memmap.desc_size,
437 memmap.desc_size, 437 memmap.desc_size,
438 memmap.desc_version, 438 memmap.desc_version,
439 (efi_memory_desc_t *)virtmap_phys); 439 (efi_memory_desc_t *)virtmap_phys);
440 cpu_set_reserved_ttbr0(); 440 cpu_set_reserved_ttbr0();
441 flush_tlb_all(); 441 flush_tlb_all();
442 local_irq_restore(flags); 442 local_irq_restore(flags);
443 443
444 kfree(virtmap); 444 kfree(virtmap);
445 445
446 free_boot_services(); 446 free_boot_services();
447 447
448 if (status != EFI_SUCCESS) { 448 if (status != EFI_SUCCESS) {
449 pr_err("Failed to set EFI virtual address map! [%lx]\n", 449 pr_err("Failed to set EFI virtual address map! [%lx]\n",
450 status); 450 status);
451 return -1; 451 return -1;
452 } 452 }
453 453
454 /* Set up runtime services function pointers */ 454 /* Set up runtime services function pointers */
455 runtime = efi.systab->runtime; 455 runtime = efi.systab->runtime;
456 efi_native_runtime_setup(); 456 efi_native_runtime_setup();
457 set_bit(EFI_RUNTIME_SERVICES, &efi.flags); 457 set_bit(EFI_RUNTIME_SERVICES, &efi.flags);
458 458
459 efi.runtime_version = efi.systab->hdr.revision; 459 efi.runtime_version = efi.systab->hdr.revision;
460 460
461 return 0; 461 return 0;
462 462
463 err_unmap: 463 err_unmap:
464 /* unmap all mappings that succeeded: there are 'count' of those */ 464 /* unmap all mappings that succeeded: there are 'count' of those */
465 for (virt_md = virtmap; count--; virt_md += memmap.desc_size) { 465 for (virt_md = virtmap; count--; virt_md += memmap.desc_size) {
466 md = virt_md; 466 md = virt_md;
467 iounmap((__force void __iomem *)md->virt_addr); 467 iounmap((__force void __iomem *)md->virt_addr);
468 } 468 }
469 kfree(virtmap); 469 kfree(virtmap);
470 return -1; 470 return -1;
471 } 471 }
472 early_initcall(arm64_enter_virtual_mode); 472 early_initcall(arm64_enter_virtual_mode);
473 473
474 static int __init arm64_dmi_init(void) 474 static int __init arm64_dmi_init(void)
475 { 475 {
476 /* 476 /*
477 * On arm64, DMI depends on UEFI, and dmi_scan_machine() needs to 477 * On arm64, DMI depends on UEFI, and dmi_scan_machine() needs to
478 * be called early because dmi_id_init(), which is an arch_initcall 478 * be called early because dmi_id_init(), which is an arch_initcall
479 * itself, depends on dmi_scan_machine() having been called already. 479 * itself, depends on dmi_scan_machine() having been called already.
480 */ 480 */
481 dmi_scan_machine(); 481 dmi_scan_machine();
482 if (dmi_available) 482 if (dmi_available)
483 dmi_set_dump_stack_arch_desc(); 483 dmi_set_dump_stack_arch_desc();
484 return 0; 484 return 0;
485 } 485 }
486 core_initcall(arm64_dmi_init); 486 core_initcall(arm64_dmi_init);
arch/arm64/kernel/module.c
1 /* 1 /*
2 * AArch64 loadable module support. 2 * AArch64 loadable module support.
3 * 3 *
4 * Copyright (C) 2012 ARM Limited 4 * Copyright (C) 2012 ARM Limited
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify 6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as 7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation. 8 * published by the Free Software Foundation.
9 * 9 *
10 * This program is distributed in the hope that it will be useful, 10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details. 13 * GNU General Public License for more details.
14 * 14 *
15 * You should have received a copy of the GNU General Public License 15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>. 16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
17 * 17 *
18 * Author: Will Deacon <will.deacon@arm.com> 18 * Author: Will Deacon <will.deacon@arm.com>
19 */ 19 */
20 20
21 #include <linux/bitops.h> 21 #include <linux/bitops.h>
22 #include <linux/elf.h> 22 #include <linux/elf.h>
23 #include <linux/gfp.h> 23 #include <linux/gfp.h>
24 #include <linux/kernel.h> 24 #include <linux/kernel.h>
25 #include <linux/mm.h> 25 #include <linux/mm.h>
26 #include <linux/moduleloader.h> 26 #include <linux/moduleloader.h>
27 #include <linux/vmalloc.h> 27 #include <linux/vmalloc.h>
28 #include <asm/alternative.h>
28 #include <asm/insn.h> 29 #include <asm/insn.h>
29 #include <asm/sections.h> 30 #include <asm/sections.h>
30 31
31 #define AARCH64_INSN_IMM_MOVNZ AARCH64_INSN_IMM_MAX 32 #define AARCH64_INSN_IMM_MOVNZ AARCH64_INSN_IMM_MAX
32 #define AARCH64_INSN_IMM_MOVK AARCH64_INSN_IMM_16 33 #define AARCH64_INSN_IMM_MOVK AARCH64_INSN_IMM_16
33 34
34 void *module_alloc(unsigned long size) 35 void *module_alloc(unsigned long size)
35 { 36 {
36 return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END, 37 return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
37 GFP_KERNEL, PAGE_KERNEL_EXEC, NUMA_NO_NODE, 38 GFP_KERNEL, PAGE_KERNEL_EXEC, NUMA_NO_NODE,
38 __builtin_return_address(0)); 39 __builtin_return_address(0));
39 } 40 }
40 41
41 enum aarch64_reloc_op { 42 enum aarch64_reloc_op {
42 RELOC_OP_NONE, 43 RELOC_OP_NONE,
43 RELOC_OP_ABS, 44 RELOC_OP_ABS,
44 RELOC_OP_PREL, 45 RELOC_OP_PREL,
45 RELOC_OP_PAGE, 46 RELOC_OP_PAGE,
46 }; 47 };
47 48
48 static u64 do_reloc(enum aarch64_reloc_op reloc_op, void *place, u64 val) 49 static u64 do_reloc(enum aarch64_reloc_op reloc_op, void *place, u64 val)
49 { 50 {
50 switch (reloc_op) { 51 switch (reloc_op) {
51 case RELOC_OP_ABS: 52 case RELOC_OP_ABS:
52 return val; 53 return val;
53 case RELOC_OP_PREL: 54 case RELOC_OP_PREL:
54 return val - (u64)place; 55 return val - (u64)place;
55 case RELOC_OP_PAGE: 56 case RELOC_OP_PAGE:
56 return (val & ~0xfff) - ((u64)place & ~0xfff); 57 return (val & ~0xfff) - ((u64)place & ~0xfff);
57 case RELOC_OP_NONE: 58 case RELOC_OP_NONE:
58 return 0; 59 return 0;
59 } 60 }
60 61
61 pr_err("do_reloc: unknown relocation operation %d\n", reloc_op); 62 pr_err("do_reloc: unknown relocation operation %d\n", reloc_op);
62 return 0; 63 return 0;
63 } 64 }
64 65
65 static int reloc_data(enum aarch64_reloc_op op, void *place, u64 val, int len) 66 static int reloc_data(enum aarch64_reloc_op op, void *place, u64 val, int len)
66 { 67 {
67 u64 imm_mask = (1 << len) - 1; 68 u64 imm_mask = (1 << len) - 1;
68 s64 sval = do_reloc(op, place, val); 69 s64 sval = do_reloc(op, place, val);
69 70
70 switch (len) { 71 switch (len) {
71 case 16: 72 case 16:
72 *(s16 *)place = sval; 73 *(s16 *)place = sval;
73 break; 74 break;
74 case 32: 75 case 32:
75 *(s32 *)place = sval; 76 *(s32 *)place = sval;
76 break; 77 break;
77 case 64: 78 case 64:
78 *(s64 *)place = sval; 79 *(s64 *)place = sval;
79 break; 80 break;
80 default: 81 default:
81 pr_err("Invalid length (%d) for data relocation\n", len); 82 pr_err("Invalid length (%d) for data relocation\n", len);
82 return 0; 83 return 0;
83 } 84 }
84 85
85 /* 86 /*
86 * Extract the upper value bits (including the sign bit) and 87 * Extract the upper value bits (including the sign bit) and
87 * shift them to bit 0. 88 * shift them to bit 0.
88 */ 89 */
89 sval = (s64)(sval & ~(imm_mask >> 1)) >> (len - 1); 90 sval = (s64)(sval & ~(imm_mask >> 1)) >> (len - 1);
90 91
91 /* 92 /*
92 * Overflow has occurred if the value is not representable in 93 * Overflow has occurred if the value is not representable in
93 * len bits (i.e the bottom len bits are not sign-extended and 94 * len bits (i.e the bottom len bits are not sign-extended and
94 * the top bits are not all zero). 95 * the top bits are not all zero).
95 */ 96 */
96 if ((u64)(sval + 1) > 2) 97 if ((u64)(sval + 1) > 2)
97 return -ERANGE; 98 return -ERANGE;
98 99
99 return 0; 100 return 0;
100 } 101 }
101 102
102 static int reloc_insn_movw(enum aarch64_reloc_op op, void *place, u64 val, 103 static int reloc_insn_movw(enum aarch64_reloc_op op, void *place, u64 val,
103 int lsb, enum aarch64_insn_imm_type imm_type) 104 int lsb, enum aarch64_insn_imm_type imm_type)
104 { 105 {
105 u64 imm, limit = 0; 106 u64 imm, limit = 0;
106 s64 sval; 107 s64 sval;
107 u32 insn = le32_to_cpu(*(u32 *)place); 108 u32 insn = le32_to_cpu(*(u32 *)place);
108 109
109 sval = do_reloc(op, place, val); 110 sval = do_reloc(op, place, val);
110 sval >>= lsb; 111 sval >>= lsb;
111 imm = sval & 0xffff; 112 imm = sval & 0xffff;
112 113
113 if (imm_type == AARCH64_INSN_IMM_MOVNZ) { 114 if (imm_type == AARCH64_INSN_IMM_MOVNZ) {
114 /* 115 /*
115 * For signed MOVW relocations, we have to manipulate the 116 * For signed MOVW relocations, we have to manipulate the
116 * instruction encoding depending on whether or not the 117 * instruction encoding depending on whether or not the
117 * immediate is less than zero. 118 * immediate is less than zero.
118 */ 119 */
119 insn &= ~(3 << 29); 120 insn &= ~(3 << 29);
120 if ((s64)imm >= 0) { 121 if ((s64)imm >= 0) {
121 /* >=0: Set the instruction to MOVZ (opcode 10b). */ 122 /* >=0: Set the instruction to MOVZ (opcode 10b). */
122 insn |= 2 << 29; 123 insn |= 2 << 29;
123 } else { 124 } else {
124 /* 125 /*
125 * <0: Set the instruction to MOVN (opcode 00b). 126 * <0: Set the instruction to MOVN (opcode 00b).
126 * Since we've masked the opcode already, we 127 * Since we've masked the opcode already, we
127 * don't need to do anything other than 128 * don't need to do anything other than
128 * inverting the new immediate field. 129 * inverting the new immediate field.
129 */ 130 */
130 imm = ~imm; 131 imm = ~imm;
131 } 132 }
132 imm_type = AARCH64_INSN_IMM_MOVK; 133 imm_type = AARCH64_INSN_IMM_MOVK;
133 } 134 }
134 135
135 /* Update the instruction with the new encoding. */ 136 /* Update the instruction with the new encoding. */
136 insn = aarch64_insn_encode_immediate(imm_type, insn, imm); 137 insn = aarch64_insn_encode_immediate(imm_type, insn, imm);
137 *(u32 *)place = cpu_to_le32(insn); 138 *(u32 *)place = cpu_to_le32(insn);
138 139
139 /* Shift out the immediate field. */ 140 /* Shift out the immediate field. */
140 sval >>= 16; 141 sval >>= 16;
141 142
142 /* 143 /*
143 * For unsigned immediates, the overflow check is straightforward. 144 * For unsigned immediates, the overflow check is straightforward.
144 * For signed immediates, the sign bit is actually the bit past the 145 * For signed immediates, the sign bit is actually the bit past the
145 * most significant bit of the field. 146 * most significant bit of the field.
146 * The AARCH64_INSN_IMM_16 immediate type is unsigned. 147 * The AARCH64_INSN_IMM_16 immediate type is unsigned.
147 */ 148 */
148 if (imm_type != AARCH64_INSN_IMM_16) { 149 if (imm_type != AARCH64_INSN_IMM_16) {
149 sval++; 150 sval++;
150 limit++; 151 limit++;
151 } 152 }
152 153
153 /* Check the upper bits depending on the sign of the immediate. */ 154 /* Check the upper bits depending on the sign of the immediate. */
154 if ((u64)sval > limit) 155 if ((u64)sval > limit)
155 return -ERANGE; 156 return -ERANGE;
156 157
157 return 0; 158 return 0;
158 } 159 }
159 160
160 static int reloc_insn_imm(enum aarch64_reloc_op op, void *place, u64 val, 161 static int reloc_insn_imm(enum aarch64_reloc_op op, void *place, u64 val,
161 int lsb, int len, enum aarch64_insn_imm_type imm_type) 162 int lsb, int len, enum aarch64_insn_imm_type imm_type)
162 { 163 {
163 u64 imm, imm_mask; 164 u64 imm, imm_mask;
164 s64 sval; 165 s64 sval;
165 u32 insn = le32_to_cpu(*(u32 *)place); 166 u32 insn = le32_to_cpu(*(u32 *)place);
166 167
167 /* Calculate the relocation value. */ 168 /* Calculate the relocation value. */
168 sval = do_reloc(op, place, val); 169 sval = do_reloc(op, place, val);
169 sval >>= lsb; 170 sval >>= lsb;
170 171
171 /* Extract the value bits and shift them to bit 0. */ 172 /* Extract the value bits and shift them to bit 0. */
172 imm_mask = (BIT(lsb + len) - 1) >> lsb; 173 imm_mask = (BIT(lsb + len) - 1) >> lsb;
173 imm = sval & imm_mask; 174 imm = sval & imm_mask;
174 175
175 /* Update the instruction's immediate field. */ 176 /* Update the instruction's immediate field. */
176 insn = aarch64_insn_encode_immediate(imm_type, insn, imm); 177 insn = aarch64_insn_encode_immediate(imm_type, insn, imm);
177 *(u32 *)place = cpu_to_le32(insn); 178 *(u32 *)place = cpu_to_le32(insn);
178 179
179 /* 180 /*
180 * Extract the upper value bits (including the sign bit) and 181 * Extract the upper value bits (including the sign bit) and
181 * shift them to bit 0. 182 * shift them to bit 0.
182 */ 183 */
183 sval = (s64)(sval & ~(imm_mask >> 1)) >> (len - 1); 184 sval = (s64)(sval & ~(imm_mask >> 1)) >> (len - 1);
184 185
185 /* 186 /*
186 * Overflow has occurred if the upper bits are not all equal to 187 * Overflow has occurred if the upper bits are not all equal to
187 * the sign bit of the value. 188 * the sign bit of the value.
188 */ 189 */
189 if ((u64)(sval + 1) >= 2) 190 if ((u64)(sval + 1) >= 2)
190 return -ERANGE; 191 return -ERANGE;
191 192
192 return 0; 193 return 0;
193 } 194 }
194 195
195 int apply_relocate_add(Elf64_Shdr *sechdrs, 196 int apply_relocate_add(Elf64_Shdr *sechdrs,
196 const char *strtab, 197 const char *strtab,
197 unsigned int symindex, 198 unsigned int symindex,
198 unsigned int relsec, 199 unsigned int relsec,
199 struct module *me) 200 struct module *me)
200 { 201 {
201 unsigned int i; 202 unsigned int i;
202 int ovf; 203 int ovf;
203 bool overflow_check; 204 bool overflow_check;
204 Elf64_Sym *sym; 205 Elf64_Sym *sym;
205 void *loc; 206 void *loc;
206 u64 val; 207 u64 val;
207 Elf64_Rela *rel = (void *)sechdrs[relsec].sh_addr; 208 Elf64_Rela *rel = (void *)sechdrs[relsec].sh_addr;
208 209
209 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) { 210 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
210 /* loc corresponds to P in the AArch64 ELF document. */ 211 /* loc corresponds to P in the AArch64 ELF document. */
211 loc = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr 212 loc = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
212 + rel[i].r_offset; 213 + rel[i].r_offset;
213 214
214 /* sym is the ELF symbol we're referring to. */ 215 /* sym is the ELF symbol we're referring to. */
215 sym = (Elf64_Sym *)sechdrs[symindex].sh_addr 216 sym = (Elf64_Sym *)sechdrs[symindex].sh_addr
216 + ELF64_R_SYM(rel[i].r_info); 217 + ELF64_R_SYM(rel[i].r_info);
217 218
218 /* val corresponds to (S + A) in the AArch64 ELF document. */ 219 /* val corresponds to (S + A) in the AArch64 ELF document. */
219 val = sym->st_value + rel[i].r_addend; 220 val = sym->st_value + rel[i].r_addend;
220 221
221 /* Check for overflow by default. */ 222 /* Check for overflow by default. */
222 overflow_check = true; 223 overflow_check = true;
223 224
224 /* Perform the static relocation. */ 225 /* Perform the static relocation. */
225 switch (ELF64_R_TYPE(rel[i].r_info)) { 226 switch (ELF64_R_TYPE(rel[i].r_info)) {
226 /* Null relocations. */ 227 /* Null relocations. */
227 case R_ARM_NONE: 228 case R_ARM_NONE:
228 case R_AARCH64_NONE: 229 case R_AARCH64_NONE:
229 ovf = 0; 230 ovf = 0;
230 break; 231 break;
231 232
232 /* Data relocations. */ 233 /* Data relocations. */
233 case R_AARCH64_ABS64: 234 case R_AARCH64_ABS64:
234 overflow_check = false; 235 overflow_check = false;
235 ovf = reloc_data(RELOC_OP_ABS, loc, val, 64); 236 ovf = reloc_data(RELOC_OP_ABS, loc, val, 64);
236 break; 237 break;
237 case R_AARCH64_ABS32: 238 case R_AARCH64_ABS32:
238 ovf = reloc_data(RELOC_OP_ABS, loc, val, 32); 239 ovf = reloc_data(RELOC_OP_ABS, loc, val, 32);
239 break; 240 break;
240 case R_AARCH64_ABS16: 241 case R_AARCH64_ABS16:
241 ovf = reloc_data(RELOC_OP_ABS, loc, val, 16); 242 ovf = reloc_data(RELOC_OP_ABS, loc, val, 16);
242 break; 243 break;
243 case R_AARCH64_PREL64: 244 case R_AARCH64_PREL64:
244 overflow_check = false; 245 overflow_check = false;
245 ovf = reloc_data(RELOC_OP_PREL, loc, val, 64); 246 ovf = reloc_data(RELOC_OP_PREL, loc, val, 64);
246 break; 247 break;
247 case R_AARCH64_PREL32: 248 case R_AARCH64_PREL32:
248 ovf = reloc_data(RELOC_OP_PREL, loc, val, 32); 249 ovf = reloc_data(RELOC_OP_PREL, loc, val, 32);
249 break; 250 break;
250 case R_AARCH64_PREL16: 251 case R_AARCH64_PREL16:
251 ovf = reloc_data(RELOC_OP_PREL, loc, val, 16); 252 ovf = reloc_data(RELOC_OP_PREL, loc, val, 16);
252 break; 253 break;
253 254
254 /* MOVW instruction relocations. */ 255 /* MOVW instruction relocations. */
255 case R_AARCH64_MOVW_UABS_G0_NC: 256 case R_AARCH64_MOVW_UABS_G0_NC:
256 overflow_check = false; 257 overflow_check = false;
257 case R_AARCH64_MOVW_UABS_G0: 258 case R_AARCH64_MOVW_UABS_G0:
258 ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 0, 259 ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 0,
259 AARCH64_INSN_IMM_16); 260 AARCH64_INSN_IMM_16);
260 break; 261 break;
261 case R_AARCH64_MOVW_UABS_G1_NC: 262 case R_AARCH64_MOVW_UABS_G1_NC:
262 overflow_check = false; 263 overflow_check = false;
263 case R_AARCH64_MOVW_UABS_G1: 264 case R_AARCH64_MOVW_UABS_G1:
264 ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 16, 265 ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 16,
265 AARCH64_INSN_IMM_16); 266 AARCH64_INSN_IMM_16);
266 break; 267 break;
267 case R_AARCH64_MOVW_UABS_G2_NC: 268 case R_AARCH64_MOVW_UABS_G2_NC:
268 overflow_check = false; 269 overflow_check = false;
269 case R_AARCH64_MOVW_UABS_G2: 270 case R_AARCH64_MOVW_UABS_G2:
270 ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 32, 271 ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 32,
271 AARCH64_INSN_IMM_16); 272 AARCH64_INSN_IMM_16);
272 break; 273 break;
273 case R_AARCH64_MOVW_UABS_G3: 274 case R_AARCH64_MOVW_UABS_G3:
274 /* We're using the top bits so we can't overflow. */ 275 /* We're using the top bits so we can't overflow. */
275 overflow_check = false; 276 overflow_check = false;
276 ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 48, 277 ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 48,
277 AARCH64_INSN_IMM_16); 278 AARCH64_INSN_IMM_16);
278 break; 279 break;
279 case R_AARCH64_MOVW_SABS_G0: 280 case R_AARCH64_MOVW_SABS_G0:
280 ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 0, 281 ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 0,
281 AARCH64_INSN_IMM_MOVNZ); 282 AARCH64_INSN_IMM_MOVNZ);
282 break; 283 break;
283 case R_AARCH64_MOVW_SABS_G1: 284 case R_AARCH64_MOVW_SABS_G1:
284 ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 16, 285 ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 16,
285 AARCH64_INSN_IMM_MOVNZ); 286 AARCH64_INSN_IMM_MOVNZ);
286 break; 287 break;
287 case R_AARCH64_MOVW_SABS_G2: 288 case R_AARCH64_MOVW_SABS_G2:
288 ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 32, 289 ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 32,
289 AARCH64_INSN_IMM_MOVNZ); 290 AARCH64_INSN_IMM_MOVNZ);
290 break; 291 break;
291 case R_AARCH64_MOVW_PREL_G0_NC: 292 case R_AARCH64_MOVW_PREL_G0_NC:
292 overflow_check = false; 293 overflow_check = false;
293 ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 0, 294 ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 0,
294 AARCH64_INSN_IMM_MOVK); 295 AARCH64_INSN_IMM_MOVK);
295 break; 296 break;
296 case R_AARCH64_MOVW_PREL_G0: 297 case R_AARCH64_MOVW_PREL_G0:
297 ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 0, 298 ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 0,
298 AARCH64_INSN_IMM_MOVNZ); 299 AARCH64_INSN_IMM_MOVNZ);
299 break; 300 break;
300 case R_AARCH64_MOVW_PREL_G1_NC: 301 case R_AARCH64_MOVW_PREL_G1_NC:
301 overflow_check = false; 302 overflow_check = false;
302 ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 16, 303 ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 16,
303 AARCH64_INSN_IMM_MOVK); 304 AARCH64_INSN_IMM_MOVK);
304 break; 305 break;
305 case R_AARCH64_MOVW_PREL_G1: 306 case R_AARCH64_MOVW_PREL_G1:
306 ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 16, 307 ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 16,
307 AARCH64_INSN_IMM_MOVNZ); 308 AARCH64_INSN_IMM_MOVNZ);
308 break; 309 break;
309 case R_AARCH64_MOVW_PREL_G2_NC: 310 case R_AARCH64_MOVW_PREL_G2_NC:
310 overflow_check = false; 311 overflow_check = false;
311 ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 32, 312 ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 32,
312 AARCH64_INSN_IMM_MOVK); 313 AARCH64_INSN_IMM_MOVK);
313 break; 314 break;
314 case R_AARCH64_MOVW_PREL_G2: 315 case R_AARCH64_MOVW_PREL_G2:
315 ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 32, 316 ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 32,
316 AARCH64_INSN_IMM_MOVNZ); 317 AARCH64_INSN_IMM_MOVNZ);
317 break; 318 break;
318 case R_AARCH64_MOVW_PREL_G3: 319 case R_AARCH64_MOVW_PREL_G3:
319 /* We're using the top bits so we can't overflow. */ 320 /* We're using the top bits so we can't overflow. */
320 overflow_check = false; 321 overflow_check = false;
321 ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 48, 322 ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 48,
322 AARCH64_INSN_IMM_MOVNZ); 323 AARCH64_INSN_IMM_MOVNZ);
323 break; 324 break;
324 325
325 /* Immediate instruction relocations. */ 326 /* Immediate instruction relocations. */
326 case R_AARCH64_LD_PREL_LO19: 327 case R_AARCH64_LD_PREL_LO19:
327 ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 19, 328 ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 19,
328 AARCH64_INSN_IMM_19); 329 AARCH64_INSN_IMM_19);
329 break; 330 break;
330 case R_AARCH64_ADR_PREL_LO21: 331 case R_AARCH64_ADR_PREL_LO21:
331 ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 0, 21, 332 ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 0, 21,
332 AARCH64_INSN_IMM_ADR); 333 AARCH64_INSN_IMM_ADR);
333 break; 334 break;
334 case R_AARCH64_ADR_PREL_PG_HI21_NC: 335 case R_AARCH64_ADR_PREL_PG_HI21_NC:
335 overflow_check = false; 336 overflow_check = false;
336 case R_AARCH64_ADR_PREL_PG_HI21: 337 case R_AARCH64_ADR_PREL_PG_HI21:
337 ovf = reloc_insn_imm(RELOC_OP_PAGE, loc, val, 12, 21, 338 ovf = reloc_insn_imm(RELOC_OP_PAGE, loc, val, 12, 21,
338 AARCH64_INSN_IMM_ADR); 339 AARCH64_INSN_IMM_ADR);
339 break; 340 break;
340 case R_AARCH64_ADD_ABS_LO12_NC: 341 case R_AARCH64_ADD_ABS_LO12_NC:
341 case R_AARCH64_LDST8_ABS_LO12_NC: 342 case R_AARCH64_LDST8_ABS_LO12_NC:
342 overflow_check = false; 343 overflow_check = false;
343 ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 0, 12, 344 ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 0, 12,
344 AARCH64_INSN_IMM_12); 345 AARCH64_INSN_IMM_12);
345 break; 346 break;
346 case R_AARCH64_LDST16_ABS_LO12_NC: 347 case R_AARCH64_LDST16_ABS_LO12_NC:
347 overflow_check = false; 348 overflow_check = false;
348 ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 1, 11, 349 ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 1, 11,
349 AARCH64_INSN_IMM_12); 350 AARCH64_INSN_IMM_12);
350 break; 351 break;
351 case R_AARCH64_LDST32_ABS_LO12_NC: 352 case R_AARCH64_LDST32_ABS_LO12_NC:
352 overflow_check = false; 353 overflow_check = false;
353 ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 2, 10, 354 ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 2, 10,
354 AARCH64_INSN_IMM_12); 355 AARCH64_INSN_IMM_12);
355 break; 356 break;
356 case R_AARCH64_LDST64_ABS_LO12_NC: 357 case R_AARCH64_LDST64_ABS_LO12_NC:
357 overflow_check = false; 358 overflow_check = false;
358 ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 3, 9, 359 ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 3, 9,
359 AARCH64_INSN_IMM_12); 360 AARCH64_INSN_IMM_12);
360 break; 361 break;
361 case R_AARCH64_LDST128_ABS_LO12_NC: 362 case R_AARCH64_LDST128_ABS_LO12_NC:
362 overflow_check = false; 363 overflow_check = false;
363 ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 4, 8, 364 ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 4, 8,
364 AARCH64_INSN_IMM_12); 365 AARCH64_INSN_IMM_12);
365 break; 366 break;
366 case R_AARCH64_TSTBR14: 367 case R_AARCH64_TSTBR14:
367 ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 14, 368 ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 14,
368 AARCH64_INSN_IMM_14); 369 AARCH64_INSN_IMM_14);
369 break; 370 break;
370 case R_AARCH64_CONDBR19: 371 case R_AARCH64_CONDBR19:
371 ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 19, 372 ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 19,
372 AARCH64_INSN_IMM_19); 373 AARCH64_INSN_IMM_19);
373 break; 374 break;
374 case R_AARCH64_JUMP26: 375 case R_AARCH64_JUMP26:
375 case R_AARCH64_CALL26: 376 case R_AARCH64_CALL26:
376 ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 26, 377 ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 26,
377 AARCH64_INSN_IMM_26); 378 AARCH64_INSN_IMM_26);
378 break; 379 break;
379 380
380 default: 381 default:
381 pr_err("module %s: unsupported RELA relocation: %llu\n", 382 pr_err("module %s: unsupported RELA relocation: %llu\n",
382 me->name, ELF64_R_TYPE(rel[i].r_info)); 383 me->name, ELF64_R_TYPE(rel[i].r_info));
383 return -ENOEXEC; 384 return -ENOEXEC;
384 } 385 }
385 386
386 if (overflow_check && ovf == -ERANGE) 387 if (overflow_check && ovf == -ERANGE)
387 goto overflow; 388 goto overflow;
388 389
389 } 390 }
390 391
391 return 0; 392 return 0;
392 393
393 overflow: 394 overflow:
394 pr_err("module %s: overflow in relocation type %d val %Lx\n", 395 pr_err("module %s: overflow in relocation type %d val %Lx\n",
395 me->name, (int)ELF64_R_TYPE(rel[i].r_info), val); 396 me->name, (int)ELF64_R_TYPE(rel[i].r_info), val);
396 return -ENOEXEC; 397 return -ENOEXEC;
397 } 398 }
398 399
399 int module_finalize(const Elf_Ehdr *hdr, 400 int module_finalize(const Elf_Ehdr *hdr,
400 const Elf_Shdr *sechdrs, 401 const Elf_Shdr *sechdrs,
401 struct module *me) 402 struct module *me)
402 { 403 {
403 const Elf_Shdr *s, *se; 404 const Elf_Shdr *s, *se;
404 const char *secstrs = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset; 405 const char *secstrs = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
405 406
406 for (s = sechdrs, se = sechdrs + hdr->e_shnum; s < se; s++) { 407 for (s = sechdrs, se = sechdrs + hdr->e_shnum; s < se; s++) {
407 if (strcmp(".altinstructions", secstrs + s->sh_name) == 0) { 408 if (strcmp(".altinstructions", secstrs + s->sh_name) == 0) {
408 apply_alternatives((void *)s->sh_addr, s->sh_size); 409 apply_alternatives((void *)s->sh_addr, s->sh_size);
409 return 0; 410 return 0;
410 } 411 }
411 } 412 }
412 413
413 return 0; 414 return 0;
414 } 415 }
415 416
arch/arm64/kernel/setup.c
1 /* 1 /*
2 * Based on arch/arm/kernel/setup.c 2 * Based on arch/arm/kernel/setup.c
3 * 3 *
4 * Copyright (C) 1995-2001 Russell King 4 * Copyright (C) 1995-2001 Russell King
5 * Copyright (C) 2012 ARM Ltd. 5 * Copyright (C) 2012 ARM Ltd.
6 * 6 *
7 * This program is free software; you can redistribute it and/or modify 7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as 8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation. 9 * published by the Free Software Foundation.
10 * 10 *
11 * This program is distributed in the hope that it will be useful, 11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details. 14 * GNU General Public License for more details.
15 * 15 *
16 * You should have received a copy of the GNU General Public License 16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>. 17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 */ 18 */
19 19
20 #include <linux/export.h> 20 #include <linux/export.h>
21 #include <linux/kernel.h> 21 #include <linux/kernel.h>
22 #include <linux/stddef.h> 22 #include <linux/stddef.h>
23 #include <linux/ioport.h> 23 #include <linux/ioport.h>
24 #include <linux/delay.h> 24 #include <linux/delay.h>
25 #include <linux/utsname.h> 25 #include <linux/utsname.h>
26 #include <linux/initrd.h> 26 #include <linux/initrd.h>
27 #include <linux/console.h> 27 #include <linux/console.h>
28 #include <linux/cache.h> 28 #include <linux/cache.h>
29 #include <linux/bootmem.h> 29 #include <linux/bootmem.h>
30 #include <linux/seq_file.h> 30 #include <linux/seq_file.h>
31 #include <linux/screen_info.h> 31 #include <linux/screen_info.h>
32 #include <linux/init.h> 32 #include <linux/init.h>
33 #include <linux/kexec.h> 33 #include <linux/kexec.h>
34 #include <linux/crash_dump.h> 34 #include <linux/crash_dump.h>
35 #include <linux/root_dev.h> 35 #include <linux/root_dev.h>
36 #include <linux/clk-provider.h> 36 #include <linux/clk-provider.h>
37 #include <linux/cpu.h> 37 #include <linux/cpu.h>
38 #include <linux/interrupt.h> 38 #include <linux/interrupt.h>
39 #include <linux/smp.h> 39 #include <linux/smp.h>
40 #include <linux/fs.h> 40 #include <linux/fs.h>
41 #include <linux/proc_fs.h> 41 #include <linux/proc_fs.h>
42 #include <linux/memblock.h> 42 #include <linux/memblock.h>
43 #include <linux/of_fdt.h> 43 #include <linux/of_fdt.h>
44 #include <linux/of_platform.h> 44 #include <linux/of_platform.h>
45 #include <linux/efi.h> 45 #include <linux/efi.h>
46 #include <linux/personality.h> 46 #include <linux/personality.h>
47 47
48 #include <asm/fixmap.h> 48 #include <asm/fixmap.h>
49 #include <asm/cpu.h> 49 #include <asm/cpu.h>
50 #include <asm/cputype.h> 50 #include <asm/cputype.h>
51 #include <asm/elf.h> 51 #include <asm/elf.h>
52 #include <asm/cputable.h> 52 #include <asm/cputable.h>
53 #include <asm/cpufeature.h> 53 #include <asm/cpufeature.h>
54 #include <asm/cpu_ops.h> 54 #include <asm/cpu_ops.h>
55 #include <asm/sections.h> 55 #include <asm/sections.h>
56 #include <asm/setup.h> 56 #include <asm/setup.h>
57 #include <asm/smp_plat.h> 57 #include <asm/smp_plat.h>
58 #include <asm/cacheflush.h> 58 #include <asm/cacheflush.h>
59 #include <asm/tlbflush.h> 59 #include <asm/tlbflush.h>
60 #include <asm/traps.h> 60 #include <asm/traps.h>
61 #include <asm/memblock.h> 61 #include <asm/memblock.h>
62 #include <asm/psci.h> 62 #include <asm/psci.h>
63 #include <asm/efi.h> 63 #include <asm/efi.h>
64 64
65 unsigned int processor_id; 65 unsigned int processor_id;
66 EXPORT_SYMBOL(processor_id); 66 EXPORT_SYMBOL(processor_id);
67 67
68 unsigned long elf_hwcap __read_mostly; 68 unsigned long elf_hwcap __read_mostly;
69 EXPORT_SYMBOL_GPL(elf_hwcap); 69 EXPORT_SYMBOL_GPL(elf_hwcap);
70 70
71 #ifdef CONFIG_COMPAT 71 #ifdef CONFIG_COMPAT
72 #define COMPAT_ELF_HWCAP_DEFAULT \ 72 #define COMPAT_ELF_HWCAP_DEFAULT \
73 (COMPAT_HWCAP_HALF|COMPAT_HWCAP_THUMB|\ 73 (COMPAT_HWCAP_HALF|COMPAT_HWCAP_THUMB|\
74 COMPAT_HWCAP_FAST_MULT|COMPAT_HWCAP_EDSP|\ 74 COMPAT_HWCAP_FAST_MULT|COMPAT_HWCAP_EDSP|\
75 COMPAT_HWCAP_TLS|COMPAT_HWCAP_VFP|\ 75 COMPAT_HWCAP_TLS|COMPAT_HWCAP_VFP|\
76 COMPAT_HWCAP_VFPv3|COMPAT_HWCAP_VFPv4|\ 76 COMPAT_HWCAP_VFPv3|COMPAT_HWCAP_VFPv4|\
77 COMPAT_HWCAP_NEON|COMPAT_HWCAP_IDIV|\ 77 COMPAT_HWCAP_NEON|COMPAT_HWCAP_IDIV|\
78 COMPAT_HWCAP_LPAE) 78 COMPAT_HWCAP_LPAE)
79 unsigned int compat_elf_hwcap __read_mostly = COMPAT_ELF_HWCAP_DEFAULT; 79 unsigned int compat_elf_hwcap __read_mostly = COMPAT_ELF_HWCAP_DEFAULT;
80 unsigned int compat_elf_hwcap2 __read_mostly; 80 unsigned int compat_elf_hwcap2 __read_mostly;
81 #endif 81 #endif
82 82
83 DECLARE_BITMAP(cpu_hwcaps, ARM64_NCAPS); 83 DECLARE_BITMAP(cpu_hwcaps, ARM64_NCAPS);
84 84
85 static const char *cpu_name; 85 static const char *cpu_name;
86 phys_addr_t __fdt_pointer __initdata; 86 phys_addr_t __fdt_pointer __initdata;
87 87
88 /* 88 /*
89 * Standard memory resources 89 * Standard memory resources
90 */ 90 */
91 static struct resource mem_res[] = { 91 static struct resource mem_res[] = {
92 { 92 {
93 .name = "Kernel code", 93 .name = "Kernel code",
94 .start = 0, 94 .start = 0,
95 .end = 0, 95 .end = 0,
96 .flags = IORESOURCE_MEM 96 .flags = IORESOURCE_MEM
97 }, 97 },
98 { 98 {
99 .name = "Kernel data", 99 .name = "Kernel data",
100 .start = 0, 100 .start = 0,
101 .end = 0, 101 .end = 0,
102 .flags = IORESOURCE_MEM 102 .flags = IORESOURCE_MEM
103 } 103 }
104 }; 104 };
105 105
106 #define kernel_code mem_res[0] 106 #define kernel_code mem_res[0]
107 #define kernel_data mem_res[1] 107 #define kernel_data mem_res[1]
108 108
109 void __init early_print(const char *str, ...) 109 void __init early_print(const char *str, ...)
110 { 110 {
111 char buf[256]; 111 char buf[256];
112 va_list ap; 112 va_list ap;
113 113
114 va_start(ap, str); 114 va_start(ap, str);
115 vsnprintf(buf, sizeof(buf), str, ap); 115 vsnprintf(buf, sizeof(buf), str, ap);
116 va_end(ap); 116 va_end(ap);
117 117
118 printk("%s", buf); 118 printk("%s", buf);
119 } 119 }
120 120
121 void __init smp_setup_processor_id(void) 121 void __init smp_setup_processor_id(void)
122 { 122 {
123 u64 mpidr = read_cpuid_mpidr() & MPIDR_HWID_BITMASK; 123 u64 mpidr = read_cpuid_mpidr() & MPIDR_HWID_BITMASK;
124 cpu_logical_map(0) = mpidr; 124 cpu_logical_map(0) = mpidr;
125 125
126 /* 126 /*
127 * clear __my_cpu_offset on boot CPU to avoid hang caused by 127 * clear __my_cpu_offset on boot CPU to avoid hang caused by
128 * using percpu variable early, for example, lockdep will 128 * using percpu variable early, for example, lockdep will
129 * access percpu variable inside lock_release 129 * access percpu variable inside lock_release
130 */ 130 */
131 set_my_cpu_offset(0); 131 set_my_cpu_offset(0);
132 pr_info("Booting Linux on physical CPU 0x%lx\n", (unsigned long)mpidr); 132 pr_info("Booting Linux on physical CPU 0x%lx\n", (unsigned long)mpidr);
133 } 133 }
134 134
135 bool arch_match_cpu_phys_id(int cpu, u64 phys_id) 135 bool arch_match_cpu_phys_id(int cpu, u64 phys_id)
136 { 136 {
137 return phys_id == cpu_logical_map(cpu); 137 return phys_id == cpu_logical_map(cpu);
138 } 138 }
139 139
140 struct mpidr_hash mpidr_hash; 140 struct mpidr_hash mpidr_hash;
141 #ifdef CONFIG_SMP 141 #ifdef CONFIG_SMP
142 /** 142 /**
143 * smp_build_mpidr_hash - Pre-compute shifts required at each affinity 143 * smp_build_mpidr_hash - Pre-compute shifts required at each affinity
144 * level in order to build a linear index from an 144 * level in order to build a linear index from an
145 * MPIDR value. Resulting algorithm is a collision 145 * MPIDR value. Resulting algorithm is a collision
146 * free hash carried out through shifting and ORing 146 * free hash carried out through shifting and ORing
147 */ 147 */
148 static void __init smp_build_mpidr_hash(void) 148 static void __init smp_build_mpidr_hash(void)
149 { 149 {
150 u32 i, affinity, fs[4], bits[4], ls; 150 u32 i, affinity, fs[4], bits[4], ls;
151 u64 mask = 0; 151 u64 mask = 0;
152 /* 152 /*
153 * Pre-scan the list of MPIDRS and filter out bits that do 153 * Pre-scan the list of MPIDRS and filter out bits that do
154 * not contribute to affinity levels, ie they never toggle. 154 * not contribute to affinity levels, ie they never toggle.
155 */ 155 */
156 for_each_possible_cpu(i) 156 for_each_possible_cpu(i)
157 mask |= (cpu_logical_map(i) ^ cpu_logical_map(0)); 157 mask |= (cpu_logical_map(i) ^ cpu_logical_map(0));
158 pr_debug("mask of set bits %#llx\n", mask); 158 pr_debug("mask of set bits %#llx\n", mask);
159 /* 159 /*
160 * Find and stash the last and first bit set at all affinity levels to 160 * Find and stash the last and first bit set at all affinity levels to
161 * check how many bits are required to represent them. 161 * check how many bits are required to represent them.
162 */ 162 */
163 for (i = 0; i < 4; i++) { 163 for (i = 0; i < 4; i++) {
164 affinity = MPIDR_AFFINITY_LEVEL(mask, i); 164 affinity = MPIDR_AFFINITY_LEVEL(mask, i);
165 /* 165 /*
166 * Find the MSB bit and LSB bits position 166 * Find the MSB bit and LSB bits position
167 * to determine how many bits are required 167 * to determine how many bits are required
168 * to express the affinity level. 168 * to express the affinity level.
169 */ 169 */
170 ls = fls(affinity); 170 ls = fls(affinity);
171 fs[i] = affinity ? ffs(affinity) - 1 : 0; 171 fs[i] = affinity ? ffs(affinity) - 1 : 0;
172 bits[i] = ls - fs[i]; 172 bits[i] = ls - fs[i];
173 } 173 }
174 /* 174 /*
175 * An index can be created from the MPIDR_EL1 by isolating the 175 * An index can be created from the MPIDR_EL1 by isolating the
176 * significant bits at each affinity level and by shifting 176 * significant bits at each affinity level and by shifting
177 * them in order to compress the 32 bits values space to a 177 * them in order to compress the 32 bits values space to a
178 * compressed set of values. This is equivalent to hashing 178 * compressed set of values. This is equivalent to hashing
179 * the MPIDR_EL1 through shifting and ORing. It is a collision free 179 * the MPIDR_EL1 through shifting and ORing. It is a collision free
180 * hash though not minimal since some levels might contain a number 180 * hash though not minimal since some levels might contain a number
181 * of CPUs that is not an exact power of 2 and their bit 181 * of CPUs that is not an exact power of 2 and their bit
182 * representation might contain holes, eg MPIDR_EL1[7:0] = {0x2, 0x80}. 182 * representation might contain holes, eg MPIDR_EL1[7:0] = {0x2, 0x80}.
183 */ 183 */
184 mpidr_hash.shift_aff[0] = MPIDR_LEVEL_SHIFT(0) + fs[0]; 184 mpidr_hash.shift_aff[0] = MPIDR_LEVEL_SHIFT(0) + fs[0];
185 mpidr_hash.shift_aff[1] = MPIDR_LEVEL_SHIFT(1) + fs[1] - bits[0]; 185 mpidr_hash.shift_aff[1] = MPIDR_LEVEL_SHIFT(1) + fs[1] - bits[0];
186 mpidr_hash.shift_aff[2] = MPIDR_LEVEL_SHIFT(2) + fs[2] - 186 mpidr_hash.shift_aff[2] = MPIDR_LEVEL_SHIFT(2) + fs[2] -
187 (bits[1] + bits[0]); 187 (bits[1] + bits[0]);
188 mpidr_hash.shift_aff[3] = MPIDR_LEVEL_SHIFT(3) + 188 mpidr_hash.shift_aff[3] = MPIDR_LEVEL_SHIFT(3) +
189 fs[3] - (bits[2] + bits[1] + bits[0]); 189 fs[3] - (bits[2] + bits[1] + bits[0]);
190 mpidr_hash.mask = mask; 190 mpidr_hash.mask = mask;
191 mpidr_hash.bits = bits[3] + bits[2] + bits[1] + bits[0]; 191 mpidr_hash.bits = bits[3] + bits[2] + bits[1] + bits[0];
192 pr_debug("MPIDR hash: aff0[%u] aff1[%u] aff2[%u] aff3[%u] mask[%#llx] bits[%u]\n", 192 pr_debug("MPIDR hash: aff0[%u] aff1[%u] aff2[%u] aff3[%u] mask[%#llx] bits[%u]\n",
193 mpidr_hash.shift_aff[0], 193 mpidr_hash.shift_aff[0],
194 mpidr_hash.shift_aff[1], 194 mpidr_hash.shift_aff[1],
195 mpidr_hash.shift_aff[2], 195 mpidr_hash.shift_aff[2],
196 mpidr_hash.shift_aff[3], 196 mpidr_hash.shift_aff[3],
197 mpidr_hash.mask, 197 mpidr_hash.mask,
198 mpidr_hash.bits); 198 mpidr_hash.bits);
199 /* 199 /*
200 * 4x is an arbitrary value used to warn on a hash table much bigger 200 * 4x is an arbitrary value used to warn on a hash table much bigger
201 * than expected on most systems. 201 * than expected on most systems.
202 */ 202 */
203 if (mpidr_hash_size() > 4 * num_possible_cpus()) 203 if (mpidr_hash_size() > 4 * num_possible_cpus())
204 pr_warn("Large number of MPIDR hash buckets detected\n"); 204 pr_warn("Large number of MPIDR hash buckets detected\n");
205 __flush_dcache_area(&mpidr_hash, sizeof(struct mpidr_hash)); 205 __flush_dcache_area(&mpidr_hash, sizeof(struct mpidr_hash));
206 } 206 }
207 #endif 207 #endif
208 208
209 static void __init setup_processor(void) 209 static void __init setup_processor(void)
210 { 210 {
211 struct cpu_info *cpu_info; 211 struct cpu_info *cpu_info;
212 u64 features, block; 212 u64 features, block;
213 u32 cwg; 213 u32 cwg;
214 int cls; 214 int cls;
215 215
216 cpu_info = lookup_processor_type(read_cpuid_id()); 216 cpu_info = lookup_processor_type(read_cpuid_id());
217 if (!cpu_info) { 217 if (!cpu_info) {
218 printk("CPU configuration botched (ID %08x), unable to continue.\n", 218 printk("CPU configuration botched (ID %08x), unable to continue.\n",
219 read_cpuid_id()); 219 read_cpuid_id());
220 while (1); 220 while (1);
221 } 221 }
222 222
223 cpu_name = cpu_info->cpu_name; 223 cpu_name = cpu_info->cpu_name;
224 224
225 printk("CPU: %s [%08x] revision %d\n", 225 printk("CPU: %s [%08x] revision %d\n",
226 cpu_name, read_cpuid_id(), read_cpuid_id() & 15); 226 cpu_name, read_cpuid_id(), read_cpuid_id() & 15);
227 227
228 sprintf(init_utsname()->machine, ELF_PLATFORM); 228 sprintf(init_utsname()->machine, ELF_PLATFORM);
229 elf_hwcap = 0; 229 elf_hwcap = 0;
230 230
231 cpuinfo_store_boot_cpu(); 231 cpuinfo_store_boot_cpu();
232 232
233 /* 233 /*
234 * Check for sane CTR_EL0.CWG value. 234 * Check for sane CTR_EL0.CWG value.
235 */ 235 */
236 cwg = cache_type_cwg(); 236 cwg = cache_type_cwg();
237 cls = cache_line_size(); 237 cls = cache_line_size();
238 if (!cwg) 238 if (!cwg)
239 pr_warn("No Cache Writeback Granule information, assuming cache line size %d\n", 239 pr_warn("No Cache Writeback Granule information, assuming cache line size %d\n",
240 cls); 240 cls);
241 if (L1_CACHE_BYTES < cls) 241 if (L1_CACHE_BYTES < cls)
242 pr_warn("L1_CACHE_BYTES smaller than the Cache Writeback Granule (%d < %d)\n", 242 pr_warn("L1_CACHE_BYTES smaller than the Cache Writeback Granule (%d < %d)\n",
243 L1_CACHE_BYTES, cls); 243 L1_CACHE_BYTES, cls);
244 244
245 /* 245 /*
246 * ID_AA64ISAR0_EL1 contains 4-bit wide signed feature blocks. 246 * ID_AA64ISAR0_EL1 contains 4-bit wide signed feature blocks.
247 * The blocks we test below represent incremental functionality 247 * The blocks we test below represent incremental functionality
248 * for non-negative values. Negative values are reserved. 248 * for non-negative values. Negative values are reserved.
249 */ 249 */
250 features = read_cpuid(ID_AA64ISAR0_EL1); 250 features = read_cpuid(ID_AA64ISAR0_EL1);
251 block = (features >> 4) & 0xf; 251 block = (features >> 4) & 0xf;
252 if (!(block & 0x8)) { 252 if (!(block & 0x8)) {
253 switch (block) { 253 switch (block) {
254 default: 254 default:
255 case 2: 255 case 2:
256 elf_hwcap |= HWCAP_PMULL; 256 elf_hwcap |= HWCAP_PMULL;
257 case 1: 257 case 1:
258 elf_hwcap |= HWCAP_AES; 258 elf_hwcap |= HWCAP_AES;
259 case 0: 259 case 0:
260 break; 260 break;
261 } 261 }
262 } 262 }
263 263
264 block = (features >> 8) & 0xf; 264 block = (features >> 8) & 0xf;
265 if (block && !(block & 0x8)) 265 if (block && !(block & 0x8))
266 elf_hwcap |= HWCAP_SHA1; 266 elf_hwcap |= HWCAP_SHA1;
267 267
268 block = (features >> 12) & 0xf; 268 block = (features >> 12) & 0xf;
269 if (block && !(block & 0x8)) 269 if (block && !(block & 0x8))
270 elf_hwcap |= HWCAP_SHA2; 270 elf_hwcap |= HWCAP_SHA2;
271 271
272 block = (features >> 16) & 0xf; 272 block = (features >> 16) & 0xf;
273 if (block && !(block & 0x8)) 273 if (block && !(block & 0x8))
274 elf_hwcap |= HWCAP_CRC32; 274 elf_hwcap |= HWCAP_CRC32;
275 275
276 #ifdef CONFIG_COMPAT 276 #ifdef CONFIG_COMPAT
277 /* 277 /*
278 * ID_ISAR5_EL1 carries similar information as above, but pertaining to 278 * ID_ISAR5_EL1 carries similar information as above, but pertaining to
279 * the Aarch32 32-bit execution state. 279 * the Aarch32 32-bit execution state.
280 */ 280 */
281 features = read_cpuid(ID_ISAR5_EL1); 281 features = read_cpuid(ID_ISAR5_EL1);
282 block = (features >> 4) & 0xf; 282 block = (features >> 4) & 0xf;
283 if (!(block & 0x8)) { 283 if (!(block & 0x8)) {
284 switch (block) { 284 switch (block) {
285 default: 285 default:
286 case 2: 286 case 2:
287 compat_elf_hwcap2 |= COMPAT_HWCAP2_PMULL; 287 compat_elf_hwcap2 |= COMPAT_HWCAP2_PMULL;
288 case 1: 288 case 1:
289 compat_elf_hwcap2 |= COMPAT_HWCAP2_AES; 289 compat_elf_hwcap2 |= COMPAT_HWCAP2_AES;
290 case 0: 290 case 0:
291 break; 291 break;
292 } 292 }
293 } 293 }
294 294
295 block = (features >> 8) & 0xf; 295 block = (features >> 8) & 0xf;
296 if (block && !(block & 0x8)) 296 if (block && !(block & 0x8))
297 compat_elf_hwcap2 |= COMPAT_HWCAP2_SHA1; 297 compat_elf_hwcap2 |= COMPAT_HWCAP2_SHA1;
298 298
299 block = (features >> 12) & 0xf; 299 block = (features >> 12) & 0xf;
300 if (block && !(block & 0x8)) 300 if (block && !(block & 0x8))
301 compat_elf_hwcap2 |= COMPAT_HWCAP2_SHA2; 301 compat_elf_hwcap2 |= COMPAT_HWCAP2_SHA2;
302 302
303 block = (features >> 16) & 0xf; 303 block = (features >> 16) & 0xf;
304 if (block && !(block & 0x8)) 304 if (block && !(block & 0x8))
305 compat_elf_hwcap2 |= COMPAT_HWCAP2_CRC32; 305 compat_elf_hwcap2 |= COMPAT_HWCAP2_CRC32;
306 #endif 306 #endif
307 } 307 }
308 308
309 static void __init setup_machine_fdt(phys_addr_t dt_phys) 309 static void __init setup_machine_fdt(phys_addr_t dt_phys)
310 { 310 {
311 if (!dt_phys || !early_init_dt_scan(phys_to_virt(dt_phys))) { 311 if (!dt_phys || !early_init_dt_scan(phys_to_virt(dt_phys))) {
312 early_print("\n" 312 early_print("\n"
313 "Error: invalid device tree blob at physical address 0x%p (virtual address 0x%p)\n" 313 "Error: invalid device tree blob at physical address 0x%p (virtual address 0x%p)\n"
314 "The dtb must be 8-byte aligned and passed in the first 512MB of memory\n" 314 "The dtb must be 8-byte aligned and passed in the first 512MB of memory\n"
315 "\nPlease check your bootloader.\n", 315 "\nPlease check your bootloader.\n",
316 dt_phys, phys_to_virt(dt_phys)); 316 dt_phys, phys_to_virt(dt_phys));
317 317
318 while (true) 318 while (true)
319 cpu_relax(); 319 cpu_relax();
320 } 320 }
321 321
322 dump_stack_set_arch_desc("%s (DT)", of_flat_dt_get_machine_name()); 322 dump_stack_set_arch_desc("%s (DT)", of_flat_dt_get_machine_name());
323 } 323 }
324 324
325 /* 325 /*
326 * Limit the memory size that was specified via FDT. 326 * Limit the memory size that was specified via FDT.
327 */ 327 */
328 static int __init early_mem(char *p) 328 static int __init early_mem(char *p)
329 { 329 {
330 phys_addr_t limit; 330 phys_addr_t limit;
331 331
332 if (!p) 332 if (!p)
333 return 1; 333 return 1;
334 334
335 limit = memparse(p, &p) & PAGE_MASK; 335 limit = memparse(p, &p) & PAGE_MASK;
336 pr_notice("Memory limited to %lldMB\n", limit >> 20); 336 pr_notice("Memory limited to %lldMB\n", limit >> 20);
337 337
338 memblock_enforce_memory_limit(limit); 338 memblock_enforce_memory_limit(limit);
339 339
340 return 0; 340 return 0;
341 } 341 }
342 early_param("mem", early_mem); 342 early_param("mem", early_mem);
343 343
344 static void __init request_standard_resources(void) 344 static void __init request_standard_resources(void)
345 { 345 {
346 struct memblock_region *region; 346 struct memblock_region *region;
347 struct resource *res; 347 struct resource *res;
348 348
349 kernel_code.start = virt_to_phys(_text); 349 kernel_code.start = virt_to_phys(_text);
350 kernel_code.end = virt_to_phys(_etext - 1); 350 kernel_code.end = virt_to_phys(_etext - 1);
351 kernel_data.start = virt_to_phys(_sdata); 351 kernel_data.start = virt_to_phys(_sdata);
352 kernel_data.end = virt_to_phys(_end - 1); 352 kernel_data.end = virt_to_phys(_end - 1);
353 353
354 for_each_memblock(memory, region) { 354 for_each_memblock(memory, region) {
355 res = alloc_bootmem_low(sizeof(*res)); 355 res = alloc_bootmem_low(sizeof(*res));
356 res->name = "System RAM"; 356 res->name = "System RAM";
357 res->start = __pfn_to_phys(memblock_region_memory_base_pfn(region)); 357 res->start = __pfn_to_phys(memblock_region_memory_base_pfn(region));
358 res->end = __pfn_to_phys(memblock_region_memory_end_pfn(region)) - 1; 358 res->end = __pfn_to_phys(memblock_region_memory_end_pfn(region)) - 1;
359 res->flags = IORESOURCE_MEM | IORESOURCE_BUSY; 359 res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
360 360
361 request_resource(&iomem_resource, res); 361 request_resource(&iomem_resource, res);
362 362
363 if (kernel_code.start >= res->start && 363 if (kernel_code.start >= res->start &&
364 kernel_code.end <= res->end) 364 kernel_code.end <= res->end)
365 request_resource(res, &kernel_code); 365 request_resource(res, &kernel_code);
366 if (kernel_data.start >= res->start && 366 if (kernel_data.start >= res->start &&
367 kernel_data.end <= res->end) 367 kernel_data.end <= res->end)
368 request_resource(res, &kernel_data); 368 request_resource(res, &kernel_data);
369 } 369 }
370 } 370 }
371 371
372 u64 __cpu_logical_map[NR_CPUS] = { [0 ... NR_CPUS-1] = INVALID_HWID }; 372 u64 __cpu_logical_map[NR_CPUS] = { [0 ... NR_CPUS-1] = INVALID_HWID };
373 373
374 void __init setup_arch(char **cmdline_p) 374 void __init setup_arch(char **cmdline_p)
375 { 375 {
376 setup_processor(); 376 setup_processor();
377 377
378 setup_machine_fdt(__fdt_pointer); 378 setup_machine_fdt(__fdt_pointer);
379 379
380 init_mm.start_code = (unsigned long) _text; 380 init_mm.start_code = (unsigned long) _text;
381 init_mm.end_code = (unsigned long) _etext; 381 init_mm.end_code = (unsigned long) _etext;
382 init_mm.end_data = (unsigned long) _edata; 382 init_mm.end_data = (unsigned long) _edata;
383 init_mm.brk = (unsigned long) _end; 383 init_mm.brk = (unsigned long) _end;
384 384
385 *cmdline_p = boot_command_line; 385 *cmdline_p = boot_command_line;
386 386
387 early_fixmap_init(); 387 early_fixmap_init();
388 early_ioremap_init(); 388 early_ioremap_init();
389 389
390 parse_early_param(); 390 parse_early_param();
391 391
392 /* 392 /*
393 * Unmask asynchronous aborts after bringing up possible earlycon. 393 * Unmask asynchronous aborts after bringing up possible earlycon.
394 * (Report possible System Errors once we can report this occurred) 394 * (Report possible System Errors once we can report this occurred)
395 */ 395 */
396 local_async_enable(); 396 local_async_enable();
397 397
398 efi_init(); 398 efi_init();
399 arm64_memblock_init(); 399 arm64_memblock_init();
400 400
401 paging_init(); 401 paging_init();
402 request_standard_resources(); 402 request_standard_resources();
403 403
404 efi_idmap_init(); 404 efi_idmap_init();
405 early_ioremap_reset();
405 406
406 unflatten_device_tree(); 407 unflatten_device_tree();
407 408
408 psci_init(); 409 psci_init();
409 410
410 cpu_read_bootcpu_ops(); 411 cpu_read_bootcpu_ops();
411 #ifdef CONFIG_SMP 412 #ifdef CONFIG_SMP
412 smp_init_cpus(); 413 smp_init_cpus();
413 smp_build_mpidr_hash(); 414 smp_build_mpidr_hash();
414 #endif 415 #endif
415 416
416 #ifdef CONFIG_VT 417 #ifdef CONFIG_VT
417 #if defined(CONFIG_VGA_CONSOLE) 418 #if defined(CONFIG_VGA_CONSOLE)
418 conswitchp = &vga_con; 419 conswitchp = &vga_con;
419 #elif defined(CONFIG_DUMMY_CONSOLE) 420 #elif defined(CONFIG_DUMMY_CONSOLE)
420 conswitchp = &dummy_con; 421 conswitchp = &dummy_con;
421 #endif 422 #endif
422 #endif 423 #endif
423 } 424 }
424 425
425 static int __init arm64_device_init(void) 426 static int __init arm64_device_init(void)
426 { 427 {
427 of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL); 428 of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
428 return 0; 429 return 0;
429 } 430 }
430 arch_initcall_sync(arm64_device_init); 431 arch_initcall_sync(arm64_device_init);
431 432
432 static int __init topology_init(void) 433 static int __init topology_init(void)
433 { 434 {
434 int i; 435 int i;
435 436
436 for_each_possible_cpu(i) { 437 for_each_possible_cpu(i) {
437 struct cpu *cpu = &per_cpu(cpu_data.cpu, i); 438 struct cpu *cpu = &per_cpu(cpu_data.cpu, i);
438 cpu->hotpluggable = 1; 439 cpu->hotpluggable = 1;
439 register_cpu(cpu, i); 440 register_cpu(cpu, i);
440 } 441 }
441 442
442 return 0; 443 return 0;
443 } 444 }
444 subsys_initcall(topology_init); 445 subsys_initcall(topology_init);
445 446
446 static const char *hwcap_str[] = { 447 static const char *hwcap_str[] = {
447 "fp", 448 "fp",
448 "asimd", 449 "asimd",
449 "evtstrm", 450 "evtstrm",
450 "aes", 451 "aes",
451 "pmull", 452 "pmull",
452 "sha1", 453 "sha1",
453 "sha2", 454 "sha2",
454 "crc32", 455 "crc32",
455 NULL 456 NULL
456 }; 457 };
457 458
458 #ifdef CONFIG_COMPAT 459 #ifdef CONFIG_COMPAT
459 static const char *compat_hwcap_str[] = { 460 static const char *compat_hwcap_str[] = {
460 "swp", 461 "swp",
461 "half", 462 "half",
462 "thumb", 463 "thumb",
463 "26bit", 464 "26bit",
464 "fastmult", 465 "fastmult",
465 "fpa", 466 "fpa",
466 "vfp", 467 "vfp",
467 "edsp", 468 "edsp",
468 "java", 469 "java",
469 "iwmmxt", 470 "iwmmxt",
470 "crunch", 471 "crunch",
471 "thumbee", 472 "thumbee",
472 "neon", 473 "neon",
473 "vfpv3", 474 "vfpv3",
474 "vfpv3d16", 475 "vfpv3d16",
475 "tls", 476 "tls",
476 "vfpv4", 477 "vfpv4",
477 "idiva", 478 "idiva",
478 "idivt", 479 "idivt",
479 "vfpd32", 480 "vfpd32",
480 "lpae", 481 "lpae",
481 "evtstrm" 482 "evtstrm"
482 }; 483 };
483 484
484 static const char *compat_hwcap2_str[] = { 485 static const char *compat_hwcap2_str[] = {
485 "aes", 486 "aes",
486 "pmull", 487 "pmull",
487 "sha1", 488 "sha1",
488 "sha2", 489 "sha2",
489 "crc32", 490 "crc32",
490 NULL 491 NULL
491 }; 492 };
492 #endif /* CONFIG_COMPAT */ 493 #endif /* CONFIG_COMPAT */
493 494
494 static int c_show(struct seq_file *m, void *v) 495 static int c_show(struct seq_file *m, void *v)
495 { 496 {
496 int i, j; 497 int i, j;
497 498
498 for_each_online_cpu(i) { 499 for_each_online_cpu(i) {
499 struct cpuinfo_arm64 *cpuinfo = &per_cpu(cpu_data, i); 500 struct cpuinfo_arm64 *cpuinfo = &per_cpu(cpu_data, i);
500 u32 midr = cpuinfo->reg_midr; 501 u32 midr = cpuinfo->reg_midr;
501 502
502 /* 503 /*
503 * glibc reads /proc/cpuinfo to determine the number of 504 * glibc reads /proc/cpuinfo to determine the number of
504 * online processors, looking for lines beginning with 505 * online processors, looking for lines beginning with
505 * "processor". Give glibc what it expects. 506 * "processor". Give glibc what it expects.
506 */ 507 */
507 #ifdef CONFIG_SMP 508 #ifdef CONFIG_SMP
508 seq_printf(m, "processor\t: %d\n", i); 509 seq_printf(m, "processor\t: %d\n", i);
509 #endif 510 #endif
510 511
511 /* 512 /*
512 * Dump out the common processor features in a single line. 513 * Dump out the common processor features in a single line.
513 * Userspace should read the hwcaps with getauxval(AT_HWCAP) 514 * Userspace should read the hwcaps with getauxval(AT_HWCAP)
514 * rather than attempting to parse this, but there's a body of 515 * rather than attempting to parse this, but there's a body of
515 * software which does already (at least for 32-bit). 516 * software which does already (at least for 32-bit).
516 */ 517 */
517 seq_puts(m, "Features\t:"); 518 seq_puts(m, "Features\t:");
518 if (personality(current->personality) == PER_LINUX32) { 519 if (personality(current->personality) == PER_LINUX32) {
519 #ifdef CONFIG_COMPAT 520 #ifdef CONFIG_COMPAT
520 for (j = 0; compat_hwcap_str[j]; j++) 521 for (j = 0; compat_hwcap_str[j]; j++)
521 if (compat_elf_hwcap & (1 << j)) 522 if (compat_elf_hwcap & (1 << j))
522 seq_printf(m, " %s", compat_hwcap_str[j]); 523 seq_printf(m, " %s", compat_hwcap_str[j]);
523 524
524 for (j = 0; compat_hwcap2_str[j]; j++) 525 for (j = 0; compat_hwcap2_str[j]; j++)
525 if (compat_elf_hwcap2 & (1 << j)) 526 if (compat_elf_hwcap2 & (1 << j))
526 seq_printf(m, " %s", compat_hwcap2_str[j]); 527 seq_printf(m, " %s", compat_hwcap2_str[j]);
527 #endif /* CONFIG_COMPAT */ 528 #endif /* CONFIG_COMPAT */
528 } else { 529 } else {
529 for (j = 0; hwcap_str[j]; j++) 530 for (j = 0; hwcap_str[j]; j++)
530 if (elf_hwcap & (1 << j)) 531 if (elf_hwcap & (1 << j))
531 seq_printf(m, " %s", hwcap_str[j]); 532 seq_printf(m, " %s", hwcap_str[j]);
532 } 533 }
533 seq_puts(m, "\n"); 534 seq_puts(m, "\n");
534 535
535 seq_printf(m, "CPU implementer\t: 0x%02x\n", 536 seq_printf(m, "CPU implementer\t: 0x%02x\n",
536 MIDR_IMPLEMENTOR(midr)); 537 MIDR_IMPLEMENTOR(midr));
537 seq_printf(m, "CPU architecture: 8\n"); 538 seq_printf(m, "CPU architecture: 8\n");
538 seq_printf(m, "CPU variant\t: 0x%x\n", MIDR_VARIANT(midr)); 539 seq_printf(m, "CPU variant\t: 0x%x\n", MIDR_VARIANT(midr));
539 seq_printf(m, "CPU part\t: 0x%03x\n", MIDR_PARTNUM(midr)); 540 seq_printf(m, "CPU part\t: 0x%03x\n", MIDR_PARTNUM(midr));
540 seq_printf(m, "CPU revision\t: %d\n\n", MIDR_REVISION(midr)); 541 seq_printf(m, "CPU revision\t: %d\n\n", MIDR_REVISION(midr));
541 } 542 }
542 543
543 return 0; 544 return 0;
544 } 545 }
545 546
546 static void *c_start(struct seq_file *m, loff_t *pos) 547 static void *c_start(struct seq_file *m, loff_t *pos)
547 { 548 {
548 return *pos < 1 ? (void *)1 : NULL; 549 return *pos < 1 ? (void *)1 : NULL;
549 } 550 }
550 551
551 static void *c_next(struct seq_file *m, void *v, loff_t *pos) 552 static void *c_next(struct seq_file *m, void *v, loff_t *pos)
552 { 553 {
553 ++*pos; 554 ++*pos;
554 return NULL; 555 return NULL;
555 } 556 }
556 557
557 static void c_stop(struct seq_file *m, void *v) 558 static void c_stop(struct seq_file *m, void *v)
558 { 559 {
559 } 560 }
560 561
561 const struct seq_operations cpuinfo_op = { 562 const struct seq_operations cpuinfo_op = {
562 .start = c_start, 563 .start = c_start,
563 .next = c_next, 564 .next = c_next,
564 .stop = c_stop, 565 .stop = c_stop,
565 .show = c_show 566 .show = c_show
566 }; 567 };
567 568
arch/arm64/kernel/smp_spin_table.c
1 /* 1 /*
2 * Spin Table SMP initialisation 2 * Spin Table SMP initialisation
3 * 3 *
4 * Copyright (C) 2013 ARM Ltd. 4 * Copyright (C) 2013 ARM Ltd.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify 6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as 7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation. 8 * published by the Free Software Foundation.
9 * 9 *
10 * This program is distributed in the hope that it will be useful, 10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details. 13 * GNU General Public License for more details.
14 * 14 *
15 * You should have received a copy of the GNU General Public License 15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>. 16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
17 */ 17 */
18 18
19 #include <linux/delay.h> 19 #include <linux/delay.h>
20 #include <linux/init.h> 20 #include <linux/init.h>
21 #include <linux/of.h> 21 #include <linux/of.h>
22 #include <linux/smp.h> 22 #include <linux/smp.h>
23 #include <linux/types.h> 23 #include <linux/types.h>
24 24
25 #include <asm/cacheflush.h> 25 #include <asm/cacheflush.h>
26 #include <asm/cpu_ops.h> 26 #include <asm/cpu_ops.h>
27 #include <asm/cputype.h> 27 #include <asm/cputype.h>
28 #include <asm/io.h>
28 #include <asm/smp_plat.h> 29 #include <asm/smp_plat.h>
29 30
30 extern void secondary_holding_pen(void); 31 extern void secondary_holding_pen(void);
31 volatile unsigned long secondary_holding_pen_release = INVALID_HWID; 32 volatile unsigned long secondary_holding_pen_release = INVALID_HWID;
32 33
33 static phys_addr_t cpu_release_addr[NR_CPUS]; 34 static phys_addr_t cpu_release_addr[NR_CPUS];
34 35
35 /* 36 /*
36 * Write secondary_holding_pen_release in a way that is guaranteed to be 37 * Write secondary_holding_pen_release in a way that is guaranteed to be
37 * visible to all observers, irrespective of whether they're taking part 38 * visible to all observers, irrespective of whether they're taking part
38 * in coherency or not. This is necessary for the hotplug code to work 39 * in coherency or not. This is necessary for the hotplug code to work
39 * reliably. 40 * reliably.
40 */ 41 */
41 static void write_pen_release(u64 val) 42 static void write_pen_release(u64 val)
42 { 43 {
43 void *start = (void *)&secondary_holding_pen_release; 44 void *start = (void *)&secondary_holding_pen_release;
44 unsigned long size = sizeof(secondary_holding_pen_release); 45 unsigned long size = sizeof(secondary_holding_pen_release);
45 46
46 secondary_holding_pen_release = val; 47 secondary_holding_pen_release = val;
47 __flush_dcache_area(start, size); 48 __flush_dcache_area(start, size);
48 } 49 }
49 50
50 51
51 static int smp_spin_table_cpu_init(struct device_node *dn, unsigned int cpu) 52 static int smp_spin_table_cpu_init(struct device_node *dn, unsigned int cpu)
52 { 53 {
53 /* 54 /*
54 * Determine the address from which the CPU is polling. 55 * Determine the address from which the CPU is polling.
55 */ 56 */
56 if (of_property_read_u64(dn, "cpu-release-addr", 57 if (of_property_read_u64(dn, "cpu-release-addr",
57 &cpu_release_addr[cpu])) { 58 &cpu_release_addr[cpu])) {
58 pr_err("CPU %d: missing or invalid cpu-release-addr property\n", 59 pr_err("CPU %d: missing or invalid cpu-release-addr property\n",
59 cpu); 60 cpu);
60 61
61 return -1; 62 return -1;
62 } 63 }
63 64
64 return 0; 65 return 0;
65 } 66 }
66 67
67 static int smp_spin_table_cpu_prepare(unsigned int cpu) 68 static int smp_spin_table_cpu_prepare(unsigned int cpu)
68 { 69 {
69 __le64 __iomem *release_addr; 70 __le64 __iomem *release_addr;
70 71
71 if (!cpu_release_addr[cpu]) 72 if (!cpu_release_addr[cpu])
72 return -ENODEV; 73 return -ENODEV;
73 74
74 /* 75 /*
75 * The cpu-release-addr may or may not be inside the linear mapping. 76 * The cpu-release-addr may or may not be inside the linear mapping.
76 * As ioremap_cache will either give us a new mapping or reuse the 77 * As ioremap_cache will either give us a new mapping or reuse the
77 * existing linear mapping, we can use it to cover both cases. In 78 * existing linear mapping, we can use it to cover both cases. In
78 * either case the memory will be MT_NORMAL. 79 * either case the memory will be MT_NORMAL.
79 */ 80 */
80 release_addr = ioremap_cache(cpu_release_addr[cpu], 81 release_addr = ioremap_cache(cpu_release_addr[cpu],
81 sizeof(*release_addr)); 82 sizeof(*release_addr));
82 if (!release_addr) 83 if (!release_addr)
83 return -ENOMEM; 84 return -ENOMEM;
84 85
85 /* 86 /*
86 * We write the release address as LE regardless of the native 87 * We write the release address as LE regardless of the native
87 * endianess of the kernel. Therefore, any boot-loaders that 88 * endianess of the kernel. Therefore, any boot-loaders that
88 * read this address need to convert this address to the 89 * read this address need to convert this address to the
89 * boot-loader's endianess before jumping. This is mandated by 90 * boot-loader's endianess before jumping. This is mandated by
90 * the boot protocol. 91 * the boot protocol.
91 */ 92 */
92 writeq_relaxed(__pa(secondary_holding_pen), release_addr); 93 writeq_relaxed(__pa(secondary_holding_pen), release_addr);
93 __flush_dcache_area((__force void *)release_addr, 94 __flush_dcache_area((__force void *)release_addr,
94 sizeof(*release_addr)); 95 sizeof(*release_addr));
95 96
96 /* 97 /*
97 * Send an event to wake up the secondary CPU. 98 * Send an event to wake up the secondary CPU.
98 */ 99 */
99 sev(); 100 sev();
100 101
101 iounmap(release_addr); 102 iounmap(release_addr);
102 103
103 return 0; 104 return 0;
104 } 105 }
105 106
106 static int smp_spin_table_cpu_boot(unsigned int cpu) 107 static int smp_spin_table_cpu_boot(unsigned int cpu)
107 { 108 {
108 /* 109 /*
109 * Update the pen release flag. 110 * Update the pen release flag.
110 */ 111 */
111 write_pen_release(cpu_logical_map(cpu)); 112 write_pen_release(cpu_logical_map(cpu));
112 113
113 /* 114 /*
114 * Send an event, causing the secondaries to read pen_release. 115 * Send an event, causing the secondaries to read pen_release.
115 */ 116 */
116 sev(); 117 sev();
117 118
118 return 0; 119 return 0;
119 } 120 }
120 121
121 const struct cpu_operations smp_spin_table_ops = { 122 const struct cpu_operations smp_spin_table_ops = {
122 .name = "spin-table", 123 .name = "spin-table",
123 .cpu_init = smp_spin_table_cpu_init, 124 .cpu_init = smp_spin_table_cpu_init,
124 .cpu_prepare = smp_spin_table_cpu_prepare, 125 .cpu_prepare = smp_spin_table_cpu_prepare,
125 .cpu_boot = smp_spin_table_cpu_boot, 126 .cpu_boot = smp_spin_table_cpu_boot,
126 }; 127 };
127 128