Commit 5e39977edf6500fd12f169e6c458d33b0ef62feb

Authored by Will Deacon
1 parent 7c68a9cc04

Revert "arm64: cpuinfo: print info for all CPUs"

It turns out that vendors are relying on the format of /proc/cpuinfo,
and we've even spotted out-of-tree hacks attempting to make it look
identical to the format used by arch/arm/. That means we can't afford to
churn this interface in mainline, so revert the recent reformatting of
the file for arm64 pending discussions on the list to find out what
people actually want.

This reverts commit d7a49086f263164a2c4c178eb76412d48cd671d7.

Acked-by: Mark Rutland <mark.rutland@arm.com>
Signed-off-by: Will Deacon <will.deacon@arm.com>

Showing 1 changed file with 22 additions and 18 deletions Inline Diff

arch/arm64/kernel/setup.c
1 /* 1 /*
2 * Based on arch/arm/kernel/setup.c 2 * Based on arch/arm/kernel/setup.c
3 * 3 *
4 * Copyright (C) 1995-2001 Russell King 4 * Copyright (C) 1995-2001 Russell King
5 * Copyright (C) 2012 ARM Ltd. 5 * Copyright (C) 2012 ARM Ltd.
6 * 6 *
7 * This program is free software; you can redistribute it and/or modify 7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as 8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation. 9 * published by the Free Software Foundation.
10 * 10 *
11 * This program is distributed in the hope that it will be useful, 11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details. 14 * GNU General Public License for more details.
15 * 15 *
16 * You should have received a copy of the GNU General Public License 16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>. 17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 */ 18 */
19 19
20 #include <linux/export.h> 20 #include <linux/export.h>
21 #include <linux/kernel.h> 21 #include <linux/kernel.h>
22 #include <linux/stddef.h> 22 #include <linux/stddef.h>
23 #include <linux/ioport.h> 23 #include <linux/ioport.h>
24 #include <linux/delay.h> 24 #include <linux/delay.h>
25 #include <linux/utsname.h> 25 #include <linux/utsname.h>
26 #include <linux/initrd.h> 26 #include <linux/initrd.h>
27 #include <linux/console.h> 27 #include <linux/console.h>
28 #include <linux/cache.h> 28 #include <linux/cache.h>
29 #include <linux/bootmem.h> 29 #include <linux/bootmem.h>
30 #include <linux/seq_file.h> 30 #include <linux/seq_file.h>
31 #include <linux/screen_info.h> 31 #include <linux/screen_info.h>
32 #include <linux/init.h> 32 #include <linux/init.h>
33 #include <linux/kexec.h> 33 #include <linux/kexec.h>
34 #include <linux/crash_dump.h> 34 #include <linux/crash_dump.h>
35 #include <linux/root_dev.h> 35 #include <linux/root_dev.h>
36 #include <linux/clk-provider.h> 36 #include <linux/clk-provider.h>
37 #include <linux/cpu.h> 37 #include <linux/cpu.h>
38 #include <linux/interrupt.h> 38 #include <linux/interrupt.h>
39 #include <linux/smp.h> 39 #include <linux/smp.h>
40 #include <linux/fs.h> 40 #include <linux/fs.h>
41 #include <linux/proc_fs.h> 41 #include <linux/proc_fs.h>
42 #include <linux/memblock.h> 42 #include <linux/memblock.h>
43 #include <linux/of_fdt.h> 43 #include <linux/of_fdt.h>
44 #include <linux/of_platform.h> 44 #include <linux/of_platform.h>
45 #include <linux/efi.h> 45 #include <linux/efi.h>
46 46
47 #include <asm/fixmap.h> 47 #include <asm/fixmap.h>
48 #include <asm/cpu.h> 48 #include <asm/cpu.h>
49 #include <asm/cputype.h> 49 #include <asm/cputype.h>
50 #include <asm/elf.h> 50 #include <asm/elf.h>
51 #include <asm/cputable.h> 51 #include <asm/cputable.h>
52 #include <asm/cpu_ops.h> 52 #include <asm/cpu_ops.h>
53 #include <asm/sections.h> 53 #include <asm/sections.h>
54 #include <asm/setup.h> 54 #include <asm/setup.h>
55 #include <asm/smp_plat.h> 55 #include <asm/smp_plat.h>
56 #include <asm/cacheflush.h> 56 #include <asm/cacheflush.h>
57 #include <asm/tlbflush.h> 57 #include <asm/tlbflush.h>
58 #include <asm/traps.h> 58 #include <asm/traps.h>
59 #include <asm/memblock.h> 59 #include <asm/memblock.h>
60 #include <asm/psci.h> 60 #include <asm/psci.h>
61 #include <asm/efi.h> 61 #include <asm/efi.h>
62 62
63 unsigned int processor_id; 63 unsigned int processor_id;
64 EXPORT_SYMBOL(processor_id); 64 EXPORT_SYMBOL(processor_id);
65 65
66 unsigned long elf_hwcap __read_mostly; 66 unsigned long elf_hwcap __read_mostly;
67 EXPORT_SYMBOL_GPL(elf_hwcap); 67 EXPORT_SYMBOL_GPL(elf_hwcap);
68 68
69 #ifdef CONFIG_COMPAT 69 #ifdef CONFIG_COMPAT
70 #define COMPAT_ELF_HWCAP_DEFAULT \ 70 #define COMPAT_ELF_HWCAP_DEFAULT \
71 (COMPAT_HWCAP_HALF|COMPAT_HWCAP_THUMB|\ 71 (COMPAT_HWCAP_HALF|COMPAT_HWCAP_THUMB|\
72 COMPAT_HWCAP_FAST_MULT|COMPAT_HWCAP_EDSP|\ 72 COMPAT_HWCAP_FAST_MULT|COMPAT_HWCAP_EDSP|\
73 COMPAT_HWCAP_TLS|COMPAT_HWCAP_VFP|\ 73 COMPAT_HWCAP_TLS|COMPAT_HWCAP_VFP|\
74 COMPAT_HWCAP_VFPv3|COMPAT_HWCAP_VFPv4|\ 74 COMPAT_HWCAP_VFPv3|COMPAT_HWCAP_VFPv4|\
75 COMPAT_HWCAP_NEON|COMPAT_HWCAP_IDIV) 75 COMPAT_HWCAP_NEON|COMPAT_HWCAP_IDIV)
76 unsigned int compat_elf_hwcap __read_mostly = COMPAT_ELF_HWCAP_DEFAULT; 76 unsigned int compat_elf_hwcap __read_mostly = COMPAT_ELF_HWCAP_DEFAULT;
77 unsigned int compat_elf_hwcap2 __read_mostly; 77 unsigned int compat_elf_hwcap2 __read_mostly;
78 #endif 78 #endif
79 79
80 static const char *cpu_name; 80 static const char *cpu_name;
81 static const char *machine_name;
81 phys_addr_t __fdt_pointer __initdata; 82 phys_addr_t __fdt_pointer __initdata;
82 83
83 /* 84 /*
84 * Standard memory resources 85 * Standard memory resources
85 */ 86 */
86 static struct resource mem_res[] = { 87 static struct resource mem_res[] = {
87 { 88 {
88 .name = "Kernel code", 89 .name = "Kernel code",
89 .start = 0, 90 .start = 0,
90 .end = 0, 91 .end = 0,
91 .flags = IORESOURCE_MEM 92 .flags = IORESOURCE_MEM
92 }, 93 },
93 { 94 {
94 .name = "Kernel data", 95 .name = "Kernel data",
95 .start = 0, 96 .start = 0,
96 .end = 0, 97 .end = 0,
97 .flags = IORESOURCE_MEM 98 .flags = IORESOURCE_MEM
98 } 99 }
99 }; 100 };
100 101
101 #define kernel_code mem_res[0] 102 #define kernel_code mem_res[0]
102 #define kernel_data mem_res[1] 103 #define kernel_data mem_res[1]
103 104
104 void __init early_print(const char *str, ...) 105 void __init early_print(const char *str, ...)
105 { 106 {
106 char buf[256]; 107 char buf[256];
107 va_list ap; 108 va_list ap;
108 109
109 va_start(ap, str); 110 va_start(ap, str);
110 vsnprintf(buf, sizeof(buf), str, ap); 111 vsnprintf(buf, sizeof(buf), str, ap);
111 va_end(ap); 112 va_end(ap);
112 113
113 printk("%s", buf); 114 printk("%s", buf);
114 } 115 }
115 116
116 void __init smp_setup_processor_id(void) 117 void __init smp_setup_processor_id(void)
117 { 118 {
118 /* 119 /*
119 * clear __my_cpu_offset on boot CPU to avoid hang caused by 120 * clear __my_cpu_offset on boot CPU to avoid hang caused by
120 * using percpu variable early, for example, lockdep will 121 * using percpu variable early, for example, lockdep will
121 * access percpu variable inside lock_release 122 * access percpu variable inside lock_release
122 */ 123 */
123 set_my_cpu_offset(0); 124 set_my_cpu_offset(0);
124 } 125 }
125 126
126 bool arch_match_cpu_phys_id(int cpu, u64 phys_id) 127 bool arch_match_cpu_phys_id(int cpu, u64 phys_id)
127 { 128 {
128 return phys_id == cpu_logical_map(cpu); 129 return phys_id == cpu_logical_map(cpu);
129 } 130 }
130 131
131 struct mpidr_hash mpidr_hash; 132 struct mpidr_hash mpidr_hash;
132 #ifdef CONFIG_SMP 133 #ifdef CONFIG_SMP
133 /** 134 /**
134 * smp_build_mpidr_hash - Pre-compute shifts required at each affinity 135 * smp_build_mpidr_hash - Pre-compute shifts required at each affinity
135 * level in order to build a linear index from an 136 * level in order to build a linear index from an
136 * MPIDR value. Resulting algorithm is a collision 137 * MPIDR value. Resulting algorithm is a collision
137 * free hash carried out through shifting and ORing 138 * free hash carried out through shifting and ORing
138 */ 139 */
139 static void __init smp_build_mpidr_hash(void) 140 static void __init smp_build_mpidr_hash(void)
140 { 141 {
141 u32 i, affinity, fs[4], bits[4], ls; 142 u32 i, affinity, fs[4], bits[4], ls;
142 u64 mask = 0; 143 u64 mask = 0;
143 /* 144 /*
144 * Pre-scan the list of MPIDRS and filter out bits that do 145 * Pre-scan the list of MPIDRS and filter out bits that do
145 * not contribute to affinity levels, ie they never toggle. 146 * not contribute to affinity levels, ie they never toggle.
146 */ 147 */
147 for_each_possible_cpu(i) 148 for_each_possible_cpu(i)
148 mask |= (cpu_logical_map(i) ^ cpu_logical_map(0)); 149 mask |= (cpu_logical_map(i) ^ cpu_logical_map(0));
149 pr_debug("mask of set bits %#llx\n", mask); 150 pr_debug("mask of set bits %#llx\n", mask);
150 /* 151 /*
151 * Find and stash the last and first bit set at all affinity levels to 152 * Find and stash the last and first bit set at all affinity levels to
152 * check how many bits are required to represent them. 153 * check how many bits are required to represent them.
153 */ 154 */
154 for (i = 0; i < 4; i++) { 155 for (i = 0; i < 4; i++) {
155 affinity = MPIDR_AFFINITY_LEVEL(mask, i); 156 affinity = MPIDR_AFFINITY_LEVEL(mask, i);
156 /* 157 /*
157 * Find the MSB bit and LSB bits position 158 * Find the MSB bit and LSB bits position
158 * to determine how many bits are required 159 * to determine how many bits are required
159 * to express the affinity level. 160 * to express the affinity level.
160 */ 161 */
161 ls = fls(affinity); 162 ls = fls(affinity);
162 fs[i] = affinity ? ffs(affinity) - 1 : 0; 163 fs[i] = affinity ? ffs(affinity) - 1 : 0;
163 bits[i] = ls - fs[i]; 164 bits[i] = ls - fs[i];
164 } 165 }
165 /* 166 /*
166 * An index can be created from the MPIDR_EL1 by isolating the 167 * An index can be created from the MPIDR_EL1 by isolating the
167 * significant bits at each affinity level and by shifting 168 * significant bits at each affinity level and by shifting
168 * them in order to compress the 32 bits values space to a 169 * them in order to compress the 32 bits values space to a
169 * compressed set of values. This is equivalent to hashing 170 * compressed set of values. This is equivalent to hashing
170 * the MPIDR_EL1 through shifting and ORing. It is a collision free 171 * the MPIDR_EL1 through shifting and ORing. It is a collision free
171 * hash though not minimal since some levels might contain a number 172 * hash though not minimal since some levels might contain a number
172 * of CPUs that is not an exact power of 2 and their bit 173 * of CPUs that is not an exact power of 2 and their bit
173 * representation might contain holes, eg MPIDR_EL1[7:0] = {0x2, 0x80}. 174 * representation might contain holes, eg MPIDR_EL1[7:0] = {0x2, 0x80}.
174 */ 175 */
175 mpidr_hash.shift_aff[0] = MPIDR_LEVEL_SHIFT(0) + fs[0]; 176 mpidr_hash.shift_aff[0] = MPIDR_LEVEL_SHIFT(0) + fs[0];
176 mpidr_hash.shift_aff[1] = MPIDR_LEVEL_SHIFT(1) + fs[1] - bits[0]; 177 mpidr_hash.shift_aff[1] = MPIDR_LEVEL_SHIFT(1) + fs[1] - bits[0];
177 mpidr_hash.shift_aff[2] = MPIDR_LEVEL_SHIFT(2) + fs[2] - 178 mpidr_hash.shift_aff[2] = MPIDR_LEVEL_SHIFT(2) + fs[2] -
178 (bits[1] + bits[0]); 179 (bits[1] + bits[0]);
179 mpidr_hash.shift_aff[3] = MPIDR_LEVEL_SHIFT(3) + 180 mpidr_hash.shift_aff[3] = MPIDR_LEVEL_SHIFT(3) +
180 fs[3] - (bits[2] + bits[1] + bits[0]); 181 fs[3] - (bits[2] + bits[1] + bits[0]);
181 mpidr_hash.mask = mask; 182 mpidr_hash.mask = mask;
182 mpidr_hash.bits = bits[3] + bits[2] + bits[1] + bits[0]; 183 mpidr_hash.bits = bits[3] + bits[2] + bits[1] + bits[0];
183 pr_debug("MPIDR hash: aff0[%u] aff1[%u] aff2[%u] aff3[%u] mask[%#llx] bits[%u]\n", 184 pr_debug("MPIDR hash: aff0[%u] aff1[%u] aff2[%u] aff3[%u] mask[%#llx] bits[%u]\n",
184 mpidr_hash.shift_aff[0], 185 mpidr_hash.shift_aff[0],
185 mpidr_hash.shift_aff[1], 186 mpidr_hash.shift_aff[1],
186 mpidr_hash.shift_aff[2], 187 mpidr_hash.shift_aff[2],
187 mpidr_hash.shift_aff[3], 188 mpidr_hash.shift_aff[3],
188 mpidr_hash.mask, 189 mpidr_hash.mask,
189 mpidr_hash.bits); 190 mpidr_hash.bits);
190 /* 191 /*
191 * 4x is an arbitrary value used to warn on a hash table much bigger 192 * 4x is an arbitrary value used to warn on a hash table much bigger
192 * than expected on most systems. 193 * than expected on most systems.
193 */ 194 */
194 if (mpidr_hash_size() > 4 * num_possible_cpus()) 195 if (mpidr_hash_size() > 4 * num_possible_cpus())
195 pr_warn("Large number of MPIDR hash buckets detected\n"); 196 pr_warn("Large number of MPIDR hash buckets detected\n");
196 __flush_dcache_area(&mpidr_hash, sizeof(struct mpidr_hash)); 197 __flush_dcache_area(&mpidr_hash, sizeof(struct mpidr_hash));
197 } 198 }
198 #endif 199 #endif
199 200
200 static void __init setup_processor(void) 201 static void __init setup_processor(void)
201 { 202 {
202 struct cpu_info *cpu_info; 203 struct cpu_info *cpu_info;
203 u64 features, block; 204 u64 features, block;
204 u32 cwg; 205 u32 cwg;
205 int cls; 206 int cls;
206 207
207 cpu_info = lookup_processor_type(read_cpuid_id()); 208 cpu_info = lookup_processor_type(read_cpuid_id());
208 if (!cpu_info) { 209 if (!cpu_info) {
209 printk("CPU configuration botched (ID %08x), unable to continue.\n", 210 printk("CPU configuration botched (ID %08x), unable to continue.\n",
210 read_cpuid_id()); 211 read_cpuid_id());
211 while (1); 212 while (1);
212 } 213 }
213 214
214 cpu_name = cpu_info->cpu_name; 215 cpu_name = cpu_info->cpu_name;
215 216
216 printk("CPU: %s [%08x] revision %d\n", 217 printk("CPU: %s [%08x] revision %d\n",
217 cpu_name, read_cpuid_id(), read_cpuid_id() & 15); 218 cpu_name, read_cpuid_id(), read_cpuid_id() & 15);
218 219
219 sprintf(init_utsname()->machine, ELF_PLATFORM); 220 sprintf(init_utsname()->machine, ELF_PLATFORM);
220 elf_hwcap = 0; 221 elf_hwcap = 0;
221 222
222 cpuinfo_store_boot_cpu(); 223 cpuinfo_store_boot_cpu();
223 224
224 /* 225 /*
225 * Check for sane CTR_EL0.CWG value. 226 * Check for sane CTR_EL0.CWG value.
226 */ 227 */
227 cwg = cache_type_cwg(); 228 cwg = cache_type_cwg();
228 cls = cache_line_size(); 229 cls = cache_line_size();
229 if (!cwg) 230 if (!cwg)
230 pr_warn("No Cache Writeback Granule information, assuming cache line size %d\n", 231 pr_warn("No Cache Writeback Granule information, assuming cache line size %d\n",
231 cls); 232 cls);
232 if (L1_CACHE_BYTES < cls) 233 if (L1_CACHE_BYTES < cls)
233 pr_warn("L1_CACHE_BYTES smaller than the Cache Writeback Granule (%d < %d)\n", 234 pr_warn("L1_CACHE_BYTES smaller than the Cache Writeback Granule (%d < %d)\n",
234 L1_CACHE_BYTES, cls); 235 L1_CACHE_BYTES, cls);
235 236
236 /* 237 /*
237 * ID_AA64ISAR0_EL1 contains 4-bit wide signed feature blocks. 238 * ID_AA64ISAR0_EL1 contains 4-bit wide signed feature blocks.
238 * The blocks we test below represent incremental functionality 239 * The blocks we test below represent incremental functionality
239 * for non-negative values. Negative values are reserved. 240 * for non-negative values. Negative values are reserved.
240 */ 241 */
241 features = read_cpuid(ID_AA64ISAR0_EL1); 242 features = read_cpuid(ID_AA64ISAR0_EL1);
242 block = (features >> 4) & 0xf; 243 block = (features >> 4) & 0xf;
243 if (!(block & 0x8)) { 244 if (!(block & 0x8)) {
244 switch (block) { 245 switch (block) {
245 default: 246 default:
246 case 2: 247 case 2:
247 elf_hwcap |= HWCAP_PMULL; 248 elf_hwcap |= HWCAP_PMULL;
248 case 1: 249 case 1:
249 elf_hwcap |= HWCAP_AES; 250 elf_hwcap |= HWCAP_AES;
250 case 0: 251 case 0:
251 break; 252 break;
252 } 253 }
253 } 254 }
254 255
255 block = (features >> 8) & 0xf; 256 block = (features >> 8) & 0xf;
256 if (block && !(block & 0x8)) 257 if (block && !(block & 0x8))
257 elf_hwcap |= HWCAP_SHA1; 258 elf_hwcap |= HWCAP_SHA1;
258 259
259 block = (features >> 12) & 0xf; 260 block = (features >> 12) & 0xf;
260 if (block && !(block & 0x8)) 261 if (block && !(block & 0x8))
261 elf_hwcap |= HWCAP_SHA2; 262 elf_hwcap |= HWCAP_SHA2;
262 263
263 block = (features >> 16) & 0xf; 264 block = (features >> 16) & 0xf;
264 if (block && !(block & 0x8)) 265 if (block && !(block & 0x8))
265 elf_hwcap |= HWCAP_CRC32; 266 elf_hwcap |= HWCAP_CRC32;
266 267
267 #ifdef CONFIG_COMPAT 268 #ifdef CONFIG_COMPAT
268 /* 269 /*
269 * ID_ISAR5_EL1 carries similar information as above, but pertaining to 270 * ID_ISAR5_EL1 carries similar information as above, but pertaining to
270 * the Aarch32 32-bit execution state. 271 * the Aarch32 32-bit execution state.
271 */ 272 */
272 features = read_cpuid(ID_ISAR5_EL1); 273 features = read_cpuid(ID_ISAR5_EL1);
273 block = (features >> 4) & 0xf; 274 block = (features >> 4) & 0xf;
274 if (!(block & 0x8)) { 275 if (!(block & 0x8)) {
275 switch (block) { 276 switch (block) {
276 default: 277 default:
277 case 2: 278 case 2:
278 compat_elf_hwcap2 |= COMPAT_HWCAP2_PMULL; 279 compat_elf_hwcap2 |= COMPAT_HWCAP2_PMULL;
279 case 1: 280 case 1:
280 compat_elf_hwcap2 |= COMPAT_HWCAP2_AES; 281 compat_elf_hwcap2 |= COMPAT_HWCAP2_AES;
281 case 0: 282 case 0:
282 break; 283 break;
283 } 284 }
284 } 285 }
285 286
286 block = (features >> 8) & 0xf; 287 block = (features >> 8) & 0xf;
287 if (block && !(block & 0x8)) 288 if (block && !(block & 0x8))
288 compat_elf_hwcap2 |= COMPAT_HWCAP2_SHA1; 289 compat_elf_hwcap2 |= COMPAT_HWCAP2_SHA1;
289 290
290 block = (features >> 12) & 0xf; 291 block = (features >> 12) & 0xf;
291 if (block && !(block & 0x8)) 292 if (block && !(block & 0x8))
292 compat_elf_hwcap2 |= COMPAT_HWCAP2_SHA2; 293 compat_elf_hwcap2 |= COMPAT_HWCAP2_SHA2;
293 294
294 block = (features >> 16) & 0xf; 295 block = (features >> 16) & 0xf;
295 if (block && !(block & 0x8)) 296 if (block && !(block & 0x8))
296 compat_elf_hwcap2 |= COMPAT_HWCAP2_CRC32; 297 compat_elf_hwcap2 |= COMPAT_HWCAP2_CRC32;
297 #endif 298 #endif
298 } 299 }
299 300
300 static void __init setup_machine_fdt(phys_addr_t dt_phys) 301 static void __init setup_machine_fdt(phys_addr_t dt_phys)
301 { 302 {
302 if (!dt_phys || !early_init_dt_scan(phys_to_virt(dt_phys))) { 303 if (!dt_phys || !early_init_dt_scan(phys_to_virt(dt_phys))) {
303 early_print("\n" 304 early_print("\n"
304 "Error: invalid device tree blob at physical address 0x%p (virtual address 0x%p)\n" 305 "Error: invalid device tree blob at physical address 0x%p (virtual address 0x%p)\n"
305 "The dtb must be 8-byte aligned and passed in the first 512MB of memory\n" 306 "The dtb must be 8-byte aligned and passed in the first 512MB of memory\n"
306 "\nPlease check your bootloader.\n", 307 "\nPlease check your bootloader.\n",
307 dt_phys, phys_to_virt(dt_phys)); 308 dt_phys, phys_to_virt(dt_phys));
308 309
309 while (true) 310 while (true)
310 cpu_relax(); 311 cpu_relax();
311 } 312 }
313
314 machine_name = of_flat_dt_get_machine_name();
312 } 315 }
313 316
314 /* 317 /*
315 * Limit the memory size that was specified via FDT. 318 * Limit the memory size that was specified via FDT.
316 */ 319 */
317 static int __init early_mem(char *p) 320 static int __init early_mem(char *p)
318 { 321 {
319 phys_addr_t limit; 322 phys_addr_t limit;
320 323
321 if (!p) 324 if (!p)
322 return 1; 325 return 1;
323 326
324 limit = memparse(p, &p) & PAGE_MASK; 327 limit = memparse(p, &p) & PAGE_MASK;
325 pr_notice("Memory limited to %lldMB\n", limit >> 20); 328 pr_notice("Memory limited to %lldMB\n", limit >> 20);
326 329
327 memblock_enforce_memory_limit(limit); 330 memblock_enforce_memory_limit(limit);
328 331
329 return 0; 332 return 0;
330 } 333 }
331 early_param("mem", early_mem); 334 early_param("mem", early_mem);
332 335
333 static void __init request_standard_resources(void) 336 static void __init request_standard_resources(void)
334 { 337 {
335 struct memblock_region *region; 338 struct memblock_region *region;
336 struct resource *res; 339 struct resource *res;
337 340
338 kernel_code.start = virt_to_phys(_text); 341 kernel_code.start = virt_to_phys(_text);
339 kernel_code.end = virt_to_phys(_etext - 1); 342 kernel_code.end = virt_to_phys(_etext - 1);
340 kernel_data.start = virt_to_phys(_sdata); 343 kernel_data.start = virt_to_phys(_sdata);
341 kernel_data.end = virt_to_phys(_end - 1); 344 kernel_data.end = virt_to_phys(_end - 1);
342 345
343 for_each_memblock(memory, region) { 346 for_each_memblock(memory, region) {
344 res = alloc_bootmem_low(sizeof(*res)); 347 res = alloc_bootmem_low(sizeof(*res));
345 res->name = "System RAM"; 348 res->name = "System RAM";
346 res->start = __pfn_to_phys(memblock_region_memory_base_pfn(region)); 349 res->start = __pfn_to_phys(memblock_region_memory_base_pfn(region));
347 res->end = __pfn_to_phys(memblock_region_memory_end_pfn(region)) - 1; 350 res->end = __pfn_to_phys(memblock_region_memory_end_pfn(region)) - 1;
348 res->flags = IORESOURCE_MEM | IORESOURCE_BUSY; 351 res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
349 352
350 request_resource(&iomem_resource, res); 353 request_resource(&iomem_resource, res);
351 354
352 if (kernel_code.start >= res->start && 355 if (kernel_code.start >= res->start &&
353 kernel_code.end <= res->end) 356 kernel_code.end <= res->end)
354 request_resource(res, &kernel_code); 357 request_resource(res, &kernel_code);
355 if (kernel_data.start >= res->start && 358 if (kernel_data.start >= res->start &&
356 kernel_data.end <= res->end) 359 kernel_data.end <= res->end)
357 request_resource(res, &kernel_data); 360 request_resource(res, &kernel_data);
358 } 361 }
359 } 362 }
360 363
361 u64 __cpu_logical_map[NR_CPUS] = { [0 ... NR_CPUS-1] = INVALID_HWID }; 364 u64 __cpu_logical_map[NR_CPUS] = { [0 ... NR_CPUS-1] = INVALID_HWID };
362 365
363 void __init setup_arch(char **cmdline_p) 366 void __init setup_arch(char **cmdline_p)
364 { 367 {
365 /* 368 /*
366 * Unmask asynchronous aborts early to catch possible system errors. 369 * Unmask asynchronous aborts early to catch possible system errors.
367 */ 370 */
368 local_async_enable(); 371 local_async_enable();
369 372
370 setup_processor(); 373 setup_processor();
371 374
372 setup_machine_fdt(__fdt_pointer); 375 setup_machine_fdt(__fdt_pointer);
373 376
374 init_mm.start_code = (unsigned long) _text; 377 init_mm.start_code = (unsigned long) _text;
375 init_mm.end_code = (unsigned long) _etext; 378 init_mm.end_code = (unsigned long) _etext;
376 init_mm.end_data = (unsigned long) _edata; 379 init_mm.end_data = (unsigned long) _edata;
377 init_mm.brk = (unsigned long) _end; 380 init_mm.brk = (unsigned long) _end;
378 381
379 *cmdline_p = boot_command_line; 382 *cmdline_p = boot_command_line;
380 383
381 early_ioremap_init(); 384 early_ioremap_init();
382 385
383 parse_early_param(); 386 parse_early_param();
384 387
385 efi_init(); 388 efi_init();
386 arm64_memblock_init(); 389 arm64_memblock_init();
387 390
388 paging_init(); 391 paging_init();
389 request_standard_resources(); 392 request_standard_resources();
390 393
391 efi_idmap_init(); 394 efi_idmap_init();
392 395
393 unflatten_device_tree(); 396 unflatten_device_tree();
394 397
395 psci_init(); 398 psci_init();
396 399
397 cpu_logical_map(0) = read_cpuid_mpidr() & MPIDR_HWID_BITMASK; 400 cpu_logical_map(0) = read_cpuid_mpidr() & MPIDR_HWID_BITMASK;
398 cpu_read_bootcpu_ops(); 401 cpu_read_bootcpu_ops();
399 #ifdef CONFIG_SMP 402 #ifdef CONFIG_SMP
400 smp_init_cpus(); 403 smp_init_cpus();
401 smp_build_mpidr_hash(); 404 smp_build_mpidr_hash();
402 #endif 405 #endif
403 406
404 #ifdef CONFIG_VT 407 #ifdef CONFIG_VT
405 #if defined(CONFIG_VGA_CONSOLE) 408 #if defined(CONFIG_VGA_CONSOLE)
406 conswitchp = &vga_con; 409 conswitchp = &vga_con;
407 #elif defined(CONFIG_DUMMY_CONSOLE) 410 #elif defined(CONFIG_DUMMY_CONSOLE)
408 conswitchp = &dummy_con; 411 conswitchp = &dummy_con;
409 #endif 412 #endif
410 #endif 413 #endif
411 } 414 }
412 415
413 static int __init arm64_device_init(void) 416 static int __init arm64_device_init(void)
414 { 417 {
415 of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL); 418 of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
416 return 0; 419 return 0;
417 } 420 }
418 arch_initcall_sync(arm64_device_init); 421 arch_initcall_sync(arm64_device_init);
419 422
420 static int __init topology_init(void) 423 static int __init topology_init(void)
421 { 424 {
422 int i; 425 int i;
423 426
424 for_each_possible_cpu(i) { 427 for_each_possible_cpu(i) {
425 struct cpu *cpu = &per_cpu(cpu_data.cpu, i); 428 struct cpu *cpu = &per_cpu(cpu_data.cpu, i);
426 cpu->hotpluggable = 1; 429 cpu->hotpluggable = 1;
427 register_cpu(cpu, i); 430 register_cpu(cpu, i);
428 } 431 }
429 432
430 return 0; 433 return 0;
431 } 434 }
432 subsys_initcall(topology_init); 435 subsys_initcall(topology_init);
433 436
434 static const char *hwcap_str[] = { 437 static const char *hwcap_str[] = {
435 "fp", 438 "fp",
436 "asimd", 439 "asimd",
437 "evtstrm", 440 "evtstrm",
438 "aes", 441 "aes",
439 "pmull", 442 "pmull",
440 "sha1", 443 "sha1",
441 "sha2", 444 "sha2",
442 "crc32", 445 "crc32",
443 NULL 446 NULL
444 }; 447 };
445 448
446 static int c_show(struct seq_file *m, void *v) 449 static int c_show(struct seq_file *m, void *v)
447 { 450 {
448 int i; 451 int i;
449 452
450 /* 453 seq_printf(m, "Processor\t: %s rev %d (%s)\n",
451 * Dump out the common processor features in a single line. Userspace 454 cpu_name, read_cpuid_id() & 15, ELF_PLATFORM);
452 * should read the hwcaps with getauxval(AT_HWCAP) rather than
453 * attempting to parse this.
454 */
455 seq_puts(m, "features\t:");
456 for (i = 0; hwcap_str[i]; i++)
457 if (elf_hwcap & (1 << i))
458 seq_printf(m, " %s", hwcap_str[i]);
459 seq_puts(m, "\n\n");
460 455
461 for_each_online_cpu(i) { 456 for_each_online_cpu(i) {
462 struct cpuinfo_arm64 *cpuinfo = &per_cpu(cpu_data, i);
463 u32 midr = cpuinfo->reg_midr;
464
465 /* 457 /*
466 * glibc reads /proc/cpuinfo to determine the number of 458 * glibc reads /proc/cpuinfo to determine the number of
467 * online processors, looking for lines beginning with 459 * online processors, looking for lines beginning with
468 * "processor". Give glibc what it expects. 460 * "processor". Give glibc what it expects.
469 */ 461 */
470 #ifdef CONFIG_SMP 462 #ifdef CONFIG_SMP
471 seq_printf(m, "processor\t: %d\n", i); 463 seq_printf(m, "processor\t: %d\n", i);
472 #endif 464 #endif
473 seq_printf(m, "implementer\t: 0x%02x\n",
474 MIDR_IMPLEMENTOR(midr));
475 seq_printf(m, "variant\t\t: 0x%x\n", MIDR_VARIANT(midr));
476 seq_printf(m, "partnum\t\t: 0x%03x\n", MIDR_PARTNUM(midr));
477 seq_printf(m, "revision\t: 0x%x\n\n", MIDR_REVISION(midr));
478 } 465 }
466
467 /* dump out the processor features */
468 seq_puts(m, "Features\t: ");
469
470 for (i = 0; hwcap_str[i]; i++)
471 if (elf_hwcap & (1 << i))
472 seq_printf(m, "%s ", hwcap_str[i]);
473
474 seq_printf(m, "\nCPU implementer\t: 0x%02x\n", read_cpuid_id() >> 24);
475 seq_printf(m, "CPU architecture: AArch64\n");
476 seq_printf(m, "CPU variant\t: 0x%x\n", (read_cpuid_id() >> 20) & 15);
477 seq_printf(m, "CPU part\t: 0x%03x\n", (read_cpuid_id() >> 4) & 0xfff);
478 seq_printf(m, "CPU revision\t: %d\n", read_cpuid_id() & 15);
479
480 seq_puts(m, "\n");
481
482 seq_printf(m, "Hardware\t: %s\n", machine_name);
479 483
480 return 0; 484 return 0;
481 } 485 }
482 486
483 static void *c_start(struct seq_file *m, loff_t *pos) 487 static void *c_start(struct seq_file *m, loff_t *pos)
484 { 488 {
485 return *pos < 1 ? (void *)1 : NULL; 489 return *pos < 1 ? (void *)1 : NULL;
486 } 490 }
487 491
488 static void *c_next(struct seq_file *m, void *v, loff_t *pos) 492 static void *c_next(struct seq_file *m, void *v, loff_t *pos)