Commit c1cc1552616d0f354d040823151e61634e7ad01f
1 parent
4f04d8f005
Exists in
smarc-l5.0.0_1.0.0-ga
and in
5 other branches
arm64: MMU initialisation
This patch contains the initialisation of the memory blocks, MMU attributes and the memory map. Only five memory types are defined: Device nGnRnE (equivalent to Strongly Ordered), Device nGnRE (classic Device memory), Device GRE, Normal Non-cacheable and Normal Cacheable. Cache policies are supported via the memory attributes register (MAIR_EL1) and only affect the Normal Cacheable mappings. This patch also adds the SPARSEMEM_VMEMMAP initialisation. Signed-off-by: Will Deacon <will.deacon@arm.com> Signed-off-by: Catalin Marinas <catalin.marinas@arm.com> Acked-by: Tony Lindgren <tony@atomide.com> Acked-by: Nicolas Pitre <nico@linaro.org> Acked-by: Olof Johansson <olof@lixom.net> Acked-by: Santosh Shilimkar <santosh.shilimkar@ti.com> Acked-by: Arnd Bergmann <arnd@arndb.de>
Showing 3 changed files with 853 additions and 0 deletions Side-by-side Diff
arch/arm64/include/asm/memblock.h
1 | +/* | |
2 | + * Copyright (C) 2012 ARM Ltd. | |
3 | + * | |
4 | + * This program is free software; you can redistribute it and/or modify | |
5 | + * it under the terms of the GNU General Public License version 2 as | |
6 | + * published by the Free Software Foundation. | |
7 | + * | |
8 | + * This program is distributed in the hope that it will be useful, | |
9 | + * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
10 | + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
11 | + * GNU General Public License for more details. | |
12 | + * | |
13 | + * You should have received a copy of the GNU General Public License | |
14 | + * along with this program. If not, see <http://www.gnu.org/licenses/>. | |
15 | + */ | |
16 | +#ifndef __ASM_MEMBLOCK_H | |
17 | +#define __ASM_MEMBLOCK_H | |
18 | + | |
19 | +extern void arm64_memblock_init(void); | |
20 | + | |
21 | +#endif |
arch/arm64/mm/init.c
1 | +/* | |
2 | + * Based on arch/arm/mm/init.c | |
3 | + * | |
4 | + * Copyright (C) 1995-2005 Russell King | |
5 | + * Copyright (C) 2012 ARM Ltd. | |
6 | + * | |
7 | + * This program is free software; you can redistribute it and/or modify | |
8 | + * it under the terms of the GNU General Public License version 2 as | |
9 | + * published by the Free Software Foundation. | |
10 | + * | |
11 | + * This program is distributed in the hope that it will be useful, | |
12 | + * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
13 | + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
14 | + * GNU General Public License for more details. | |
15 | + * | |
16 | + * You should have received a copy of the GNU General Public License | |
17 | + * along with this program. If not, see <http://www.gnu.org/licenses/>. | |
18 | + */ | |
19 | + | |
20 | +#include <linux/kernel.h> | |
21 | +#include <linux/export.h> | |
22 | +#include <linux/errno.h> | |
23 | +#include <linux/swap.h> | |
24 | +#include <linux/init.h> | |
25 | +#include <linux/bootmem.h> | |
26 | +#include <linux/mman.h> | |
27 | +#include <linux/nodemask.h> | |
28 | +#include <linux/initrd.h> | |
29 | +#include <linux/gfp.h> | |
30 | +#include <linux/memblock.h> | |
31 | +#include <linux/sort.h> | |
32 | +#include <linux/of_fdt.h> | |
33 | + | |
34 | +#include <asm/prom.h> | |
35 | +#include <asm/sections.h> | |
36 | +#include <asm/setup.h> | |
37 | +#include <asm/sizes.h> | |
38 | +#include <asm/tlb.h> | |
39 | + | |
40 | +#include "mm.h" | |
41 | + | |
42 | +static unsigned long phys_initrd_start __initdata = 0; | |
43 | +static unsigned long phys_initrd_size __initdata = 0; | |
44 | + | |
45 | +phys_addr_t memstart_addr __read_mostly = 0; | |
46 | + | |
47 | +void __init early_init_dt_setup_initrd_arch(unsigned long start, | |
48 | + unsigned long end) | |
49 | +{ | |
50 | + phys_initrd_start = start; | |
51 | + phys_initrd_size = end - start; | |
52 | +} | |
53 | + | |
54 | +static int __init early_initrd(char *p) | |
55 | +{ | |
56 | + unsigned long start, size; | |
57 | + char *endp; | |
58 | + | |
59 | + start = memparse(p, &endp); | |
60 | + if (*endp == ',') { | |
61 | + size = memparse(endp + 1, NULL); | |
62 | + | |
63 | + phys_initrd_start = start; | |
64 | + phys_initrd_size = size; | |
65 | + } | |
66 | + return 0; | |
67 | +} | |
68 | +early_param("initrd", early_initrd); | |
69 | + | |
70 | +#define MAX_DMA32_PFN ((4UL * 1024 * 1024 * 1024) >> PAGE_SHIFT) | |
71 | + | |
72 | +static void __init zone_sizes_init(unsigned long min, unsigned long max) | |
73 | +{ | |
74 | + struct memblock_region *reg; | |
75 | + unsigned long zone_size[MAX_NR_ZONES], zhole_size[MAX_NR_ZONES]; | |
76 | + unsigned long max_dma32 = min; | |
77 | + | |
78 | + memset(zone_size, 0, sizeof(zone_size)); | |
79 | + | |
80 | +#ifdef CONFIG_ZONE_DMA32 | |
81 | + /* 4GB maximum for 32-bit only capable devices */ | |
82 | + max_dma32 = min(max, MAX_DMA32_PFN); | |
83 | + zone_size[ZONE_DMA32] = max_dma32 - min; | |
84 | +#endif | |
85 | + zone_size[ZONE_NORMAL] = max - max_dma32; | |
86 | + | |
87 | + memcpy(zhole_size, zone_size, sizeof(zhole_size)); | |
88 | + | |
89 | + for_each_memblock(memory, reg) { | |
90 | + unsigned long start = memblock_region_memory_base_pfn(reg); | |
91 | + unsigned long end = memblock_region_memory_end_pfn(reg); | |
92 | + | |
93 | + if (start >= max) | |
94 | + continue; | |
95 | +#ifdef CONFIG_ZONE_DMA32 | |
96 | + if (start < max_dma32) { | |
97 | + unsigned long dma_end = min(end, max_dma32); | |
98 | + zhole_size[ZONE_DMA32] -= dma_end - start; | |
99 | + } | |
100 | +#endif | |
101 | + if (end > max_dma32) { | |
102 | + unsigned long normal_end = min(end, max); | |
103 | + unsigned long normal_start = max(start, max_dma32); | |
104 | + zhole_size[ZONE_NORMAL] -= normal_end - normal_start; | |
105 | + } | |
106 | + } | |
107 | + | |
108 | + free_area_init_node(0, zone_size, min, zhole_size); | |
109 | +} | |
110 | + | |
111 | +#ifdef CONFIG_HAVE_ARCH_PFN_VALID | |
112 | +int pfn_valid(unsigned long pfn) | |
113 | +{ | |
114 | + return memblock_is_memory(pfn << PAGE_SHIFT); | |
115 | +} | |
116 | +EXPORT_SYMBOL(pfn_valid); | |
117 | +#endif | |
118 | + | |
119 | +#ifndef CONFIG_SPARSEMEM | |
120 | +static void arm64_memory_present(void) | |
121 | +{ | |
122 | +} | |
123 | +#else | |
124 | +static void arm64_memory_present(void) | |
125 | +{ | |
126 | + struct memblock_region *reg; | |
127 | + | |
128 | + for_each_memblock(memory, reg) | |
129 | + memory_present(0, memblock_region_memory_base_pfn(reg), | |
130 | + memblock_region_memory_end_pfn(reg)); | |
131 | +} | |
132 | +#endif | |
133 | + | |
134 | +void __init arm64_memblock_init(void) | |
135 | +{ | |
136 | + u64 *reserve_map, base, size; | |
137 | + | |
138 | + /* Register the kernel text, kernel data and initrd with memblock */ | |
139 | + memblock_reserve(__pa(_text), _end - _text); | |
140 | +#ifdef CONFIG_BLK_DEV_INITRD | |
141 | + if (phys_initrd_size) { | |
142 | + memblock_reserve(phys_initrd_start, phys_initrd_size); | |
143 | + | |
144 | + /* Now convert initrd to virtual addresses */ | |
145 | + initrd_start = __phys_to_virt(phys_initrd_start); | |
146 | + initrd_end = initrd_start + phys_initrd_size; | |
147 | + } | |
148 | +#endif | |
149 | + | |
150 | + /* | |
151 | + * Reserve the page tables. These are already in use, | |
152 | + * and can only be in node 0. | |
153 | + */ | |
154 | + memblock_reserve(__pa(swapper_pg_dir), SWAPPER_DIR_SIZE); | |
155 | + memblock_reserve(__pa(idmap_pg_dir), IDMAP_DIR_SIZE); | |
156 | + | |
157 | + /* Reserve the dtb region */ | |
158 | + memblock_reserve(virt_to_phys(initial_boot_params), | |
159 | + be32_to_cpu(initial_boot_params->totalsize)); | |
160 | + | |
161 | + /* | |
162 | + * Process the reserve map. This will probably overlap the initrd | |
163 | + * and dtb locations which are already reserved, but overlapping | |
164 | + * doesn't hurt anything | |
165 | + */ | |
166 | + reserve_map = ((void*)initial_boot_params) + | |
167 | + be32_to_cpu(initial_boot_params->off_mem_rsvmap); | |
168 | + while (1) { | |
169 | + base = be64_to_cpup(reserve_map++); | |
170 | + size = be64_to_cpup(reserve_map++); | |
171 | + if (!size) | |
172 | + break; | |
173 | + memblock_reserve(base, size); | |
174 | + } | |
175 | + | |
176 | + memblock_allow_resize(); | |
177 | + memblock_dump_all(); | |
178 | +} | |
179 | + | |
180 | +void __init bootmem_init(void) | |
181 | +{ | |
182 | + unsigned long min, max; | |
183 | + | |
184 | + min = PFN_UP(memblock_start_of_DRAM()); | |
185 | + max = PFN_DOWN(memblock_end_of_DRAM()); | |
186 | + | |
187 | + /* | |
188 | + * Sparsemem tries to allocate bootmem in memory_present(), so must be | |
189 | + * done after the fixed reservations. | |
190 | + */ | |
191 | + arm64_memory_present(); | |
192 | + | |
193 | + sparse_init(); | |
194 | + zone_sizes_init(min, max); | |
195 | + | |
196 | + high_memory = __va((max << PAGE_SHIFT) - 1) + 1; | |
197 | + max_pfn = max_low_pfn = max; | |
198 | +} | |
199 | + | |
200 | +static inline int free_area(unsigned long pfn, unsigned long end, char *s) | |
201 | +{ | |
202 | + unsigned int pages = 0, size = (end - pfn) << (PAGE_SHIFT - 10); | |
203 | + | |
204 | + for (; pfn < end; pfn++) { | |
205 | + struct page *page = pfn_to_page(pfn); | |
206 | + ClearPageReserved(page); | |
207 | + init_page_count(page); | |
208 | + __free_page(page); | |
209 | + pages++; | |
210 | + } | |
211 | + | |
212 | + if (size && s) | |
213 | + pr_info("Freeing %s memory: %dK\n", s, size); | |
214 | + | |
215 | + return pages; | |
216 | +} | |
217 | + | |
218 | +/* | |
219 | + * Poison init memory with an undefined instruction (0x0). | |
220 | + */ | |
221 | +static inline void poison_init_mem(void *s, size_t count) | |
222 | +{ | |
223 | + memset(s, 0, count); | |
224 | +} | |
225 | + | |
226 | +#ifndef CONFIG_SPARSEMEM_VMEMMAP | |
227 | +static inline void free_memmap(unsigned long start_pfn, unsigned long end_pfn) | |
228 | +{ | |
229 | + struct page *start_pg, *end_pg; | |
230 | + unsigned long pg, pgend; | |
231 | + | |
232 | + /* | |
233 | + * Convert start_pfn/end_pfn to a struct page pointer. | |
234 | + */ | |
235 | + start_pg = pfn_to_page(start_pfn - 1) + 1; | |
236 | + end_pg = pfn_to_page(end_pfn - 1) + 1; | |
237 | + | |
238 | + /* | |
239 | + * Convert to physical addresses, and round start upwards and end | |
240 | + * downwards. | |
241 | + */ | |
242 | + pg = (unsigned long)PAGE_ALIGN(__pa(start_pg)); | |
243 | + pgend = (unsigned long)__pa(end_pg) & PAGE_MASK; | |
244 | + | |
245 | + /* | |
246 | + * If there are free pages between these, free the section of the | |
247 | + * memmap array. | |
248 | + */ | |
249 | + if (pg < pgend) | |
250 | + free_bootmem(pg, pgend - pg); | |
251 | +} | |
252 | + | |
253 | +/* | |
254 | + * The mem_map array can get very big. Free the unused area of the memory map. | |
255 | + */ | |
256 | +static void __init free_unused_memmap(void) | |
257 | +{ | |
258 | + unsigned long start, prev_end = 0; | |
259 | + struct memblock_region *reg; | |
260 | + | |
261 | + for_each_memblock(memory, reg) { | |
262 | + start = __phys_to_pfn(reg->base); | |
263 | + | |
264 | +#ifdef CONFIG_SPARSEMEM | |
265 | + /* | |
266 | + * Take care not to free memmap entries that don't exist due | |
267 | + * to SPARSEMEM sections which aren't present. | |
268 | + */ | |
269 | + start = min(start, ALIGN(prev_end, PAGES_PER_SECTION)); | |
270 | +#endif | |
271 | + /* | |
272 | + * If we had a previous bank, and there is a space between the | |
273 | + * current bank and the previous, free it. | |
274 | + */ | |
275 | + if (prev_end && prev_end < start) | |
276 | + free_memmap(prev_end, start); | |
277 | + | |
278 | + /* | |
279 | + * Align up here since the VM subsystem insists that the | |
280 | + * memmap entries are valid from the bank end aligned to | |
281 | + * MAX_ORDER_NR_PAGES. | |
282 | + */ | |
283 | + prev_end = ALIGN(start + __phys_to_pfn(reg->size), | |
284 | + MAX_ORDER_NR_PAGES); | |
285 | + } | |
286 | + | |
287 | +#ifdef CONFIG_SPARSEMEM | |
288 | + if (!IS_ALIGNED(prev_end, PAGES_PER_SECTION)) | |
289 | + free_memmap(prev_end, ALIGN(prev_end, PAGES_PER_SECTION)); | |
290 | +#endif | |
291 | +} | |
292 | +#endif /* !CONFIG_SPARSEMEM_VMEMMAP */ | |
293 | + | |
294 | +/* | |
295 | + * mem_init() marks the free areas in the mem_map and tells us how much memory | |
296 | + * is free. This is done after various parts of the system have claimed their | |
297 | + * memory after the kernel image. | |
298 | + */ | |
299 | +void __init mem_init(void) | |
300 | +{ | |
301 | + unsigned long reserved_pages, free_pages; | |
302 | + struct memblock_region *reg; | |
303 | + | |
304 | +#if CONFIG_SWIOTLB | |
305 | + extern void __init arm64_swiotlb_init(size_t max_size); | |
306 | + arm64_swiotlb_init(max_pfn << (PAGE_SHIFT - 1)); | |
307 | +#endif | |
308 | + | |
309 | + max_mapnr = pfn_to_page(max_pfn + PHYS_PFN_OFFSET) - mem_map; | |
310 | + | |
311 | +#ifndef CONFIG_SPARSEMEM_VMEMMAP | |
312 | + /* this will put all unused low memory onto the freelists */ | |
313 | + free_unused_memmap(); | |
314 | +#endif | |
315 | + | |
316 | + totalram_pages += free_all_bootmem(); | |
317 | + | |
318 | + reserved_pages = free_pages = 0; | |
319 | + | |
320 | + for_each_memblock(memory, reg) { | |
321 | + unsigned int pfn1, pfn2; | |
322 | + struct page *page, *end; | |
323 | + | |
324 | + pfn1 = __phys_to_pfn(reg->base); | |
325 | + pfn2 = pfn1 + __phys_to_pfn(reg->size); | |
326 | + | |
327 | + page = pfn_to_page(pfn1); | |
328 | + end = pfn_to_page(pfn2 - 1) + 1; | |
329 | + | |
330 | + do { | |
331 | + if (PageReserved(page)) | |
332 | + reserved_pages++; | |
333 | + else if (!page_count(page)) | |
334 | + free_pages++; | |
335 | + page++; | |
336 | + } while (page < end); | |
337 | + } | |
338 | + | |
339 | + /* | |
340 | + * Since our memory may not be contiguous, calculate the real number | |
341 | + * of pages we have in this system. | |
342 | + */ | |
343 | + pr_info("Memory:"); | |
344 | + num_physpages = 0; | |
345 | + for_each_memblock(memory, reg) { | |
346 | + unsigned long pages = memblock_region_memory_end_pfn(reg) - | |
347 | + memblock_region_memory_base_pfn(reg); | |
348 | + num_physpages += pages; | |
349 | + printk(" %ldMB", pages >> (20 - PAGE_SHIFT)); | |
350 | + } | |
351 | + printk(" = %luMB total\n", num_physpages >> (20 - PAGE_SHIFT)); | |
352 | + | |
353 | + pr_notice("Memory: %luk/%luk available, %luk reserved\n", | |
354 | + nr_free_pages() << (PAGE_SHIFT-10), | |
355 | + free_pages << (PAGE_SHIFT-10), | |
356 | + reserved_pages << (PAGE_SHIFT-10)); | |
357 | + | |
358 | +#define MLK(b, t) b, t, ((t) - (b)) >> 10 | |
359 | +#define MLM(b, t) b, t, ((t) - (b)) >> 20 | |
360 | +#define MLK_ROUNDUP(b, t) b, t, DIV_ROUND_UP(((t) - (b)), SZ_1K) | |
361 | + | |
362 | + pr_notice("Virtual kernel memory layout:\n" | |
363 | + " vmalloc : 0x%16lx - 0x%16lx (%6ld MB)\n" | |
364 | +#ifdef CONFIG_SPARSEMEM_VMEMMAP | |
365 | + " vmemmap : 0x%16lx - 0x%16lx (%6ld MB)\n" | |
366 | +#endif | |
367 | + " modules : 0x%16lx - 0x%16lx (%6ld MB)\n" | |
368 | + " memory : 0x%16lx - 0x%16lx (%6ld MB)\n" | |
369 | + " .init : 0x%p" " - 0x%p" " (%6ld kB)\n" | |
370 | + " .text : 0x%p" " - 0x%p" " (%6ld kB)\n" | |
371 | + " .data : 0x%p" " - 0x%p" " (%6ld kB)\n", | |
372 | + MLM(VMALLOC_START, VMALLOC_END), | |
373 | +#ifdef CONFIG_SPARSEMEM_VMEMMAP | |
374 | + MLM((unsigned long)virt_to_page(PAGE_OFFSET), | |
375 | + (unsigned long)virt_to_page(high_memory)), | |
376 | +#endif | |
377 | + MLM(MODULES_VADDR, MODULES_END), | |
378 | + MLM(PAGE_OFFSET, (unsigned long)high_memory), | |
379 | + | |
380 | + MLK_ROUNDUP(__init_begin, __init_end), | |
381 | + MLK_ROUNDUP(_text, _etext), | |
382 | + MLK_ROUNDUP(_sdata, _edata)); | |
383 | + | |
384 | +#undef MLK | |
385 | +#undef MLM | |
386 | +#undef MLK_ROUNDUP | |
387 | + | |
388 | + /* | |
389 | + * Check boundaries twice: Some fundamental inconsistencies can be | |
390 | + * detected at build time already. | |
391 | + */ | |
392 | +#ifdef CONFIG_COMPAT | |
393 | + BUILD_BUG_ON(TASK_SIZE_32 > TASK_SIZE_64); | |
394 | +#endif | |
395 | + BUILD_BUG_ON(TASK_SIZE_64 > MODULES_VADDR); | |
396 | + BUG_ON(TASK_SIZE_64 > MODULES_VADDR); | |
397 | + | |
398 | + if (PAGE_SIZE >= 16384 && num_physpages <= 128) { | |
399 | + extern int sysctl_overcommit_memory; | |
400 | + /* | |
401 | + * On a machine this small we won't get anywhere without | |
402 | + * overcommit, so turn it on by default. | |
403 | + */ | |
404 | + sysctl_overcommit_memory = OVERCOMMIT_ALWAYS; | |
405 | + } | |
406 | +} | |
407 | + | |
408 | +void free_initmem(void) | |
409 | +{ | |
410 | + poison_init_mem(__init_begin, __init_end - __init_begin); | |
411 | + totalram_pages += free_area(__phys_to_pfn(__pa(__init_begin)), | |
412 | + __phys_to_pfn(__pa(__init_end)), | |
413 | + "init"); | |
414 | +} | |
415 | + | |
416 | +#ifdef CONFIG_BLK_DEV_INITRD | |
417 | + | |
418 | +static int keep_initrd; | |
419 | + | |
420 | +void free_initrd_mem(unsigned long start, unsigned long end) | |
421 | +{ | |
422 | + if (!keep_initrd) { | |
423 | + poison_init_mem((void *)start, PAGE_ALIGN(end) - start); | |
424 | + totalram_pages += free_area(__phys_to_pfn(__pa(start)), | |
425 | + __phys_to_pfn(__pa(end)), | |
426 | + "initrd"); | |
427 | + } | |
428 | +} | |
429 | + | |
430 | +static int __init keepinitrd_setup(char *__unused) | |
431 | +{ | |
432 | + keep_initrd = 1; | |
433 | + return 1; | |
434 | +} | |
435 | + | |
436 | +__setup("keepinitrd", keepinitrd_setup); | |
437 | +#endif |
arch/arm64/mm/mmu.c
1 | +/* | |
2 | + * Based on arch/arm/mm/mmu.c | |
3 | + * | |
4 | + * Copyright (C) 1995-2005 Russell King | |
5 | + * Copyright (C) 2012 ARM Ltd. | |
6 | + * | |
7 | + * This program is free software; you can redistribute it and/or modify | |
8 | + * it under the terms of the GNU General Public License version 2 as | |
9 | + * published by the Free Software Foundation. | |
10 | + * | |
11 | + * This program is distributed in the hope that it will be useful, | |
12 | + * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
13 | + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
14 | + * GNU General Public License for more details. | |
15 | + * | |
16 | + * You should have received a copy of the GNU General Public License | |
17 | + * along with this program. If not, see <http://www.gnu.org/licenses/>. | |
18 | + */ | |
19 | + | |
20 | +#include <linux/export.h> | |
21 | +#include <linux/kernel.h> | |
22 | +#include <linux/errno.h> | |
23 | +#include <linux/init.h> | |
24 | +#include <linux/mman.h> | |
25 | +#include <linux/nodemask.h> | |
26 | +#include <linux/memblock.h> | |
27 | +#include <linux/fs.h> | |
28 | + | |
29 | +#include <asm/cputype.h> | |
30 | +#include <asm/sections.h> | |
31 | +#include <asm/setup.h> | |
32 | +#include <asm/sizes.h> | |
33 | +#include <asm/tlb.h> | |
34 | +#include <asm/mmu_context.h> | |
35 | + | |
36 | +#include "mm.h" | |
37 | + | |
38 | +/* | |
39 | + * Empty_zero_page is a special page that is used for zero-initialized data | |
40 | + * and COW. | |
41 | + */ | |
42 | +struct page *empty_zero_page; | |
43 | +EXPORT_SYMBOL(empty_zero_page); | |
44 | + | |
45 | +pgprot_t pgprot_default; | |
46 | +EXPORT_SYMBOL(pgprot_default); | |
47 | + | |
48 | +static pmdval_t prot_sect_kernel; | |
49 | + | |
50 | +struct cachepolicy { | |
51 | + const char policy[16]; | |
52 | + u64 mair; | |
53 | + u64 tcr; | |
54 | +}; | |
55 | + | |
56 | +static struct cachepolicy cache_policies[] __initdata = { | |
57 | + { | |
58 | + .policy = "uncached", | |
59 | + .mair = 0x44, /* inner, outer non-cacheable */ | |
60 | + .tcr = TCR_IRGN_NC | TCR_ORGN_NC, | |
61 | + }, { | |
62 | + .policy = "writethrough", | |
63 | + .mair = 0xaa, /* inner, outer write-through, read-allocate */ | |
64 | + .tcr = TCR_IRGN_WT | TCR_ORGN_WT, | |
65 | + }, { | |
66 | + .policy = "writeback", | |
67 | + .mair = 0xee, /* inner, outer write-back, read-allocate */ | |
68 | + .tcr = TCR_IRGN_WBnWA | TCR_ORGN_WBnWA, | |
69 | + } | |
70 | +}; | |
71 | + | |
72 | +/* | |
73 | + * These are useful for identifying cache coherency problems by allowing the | |
74 | + * cache or the cache and writebuffer to be turned off. It changes the Normal | |
75 | + * memory caching attributes in the MAIR_EL1 register. | |
76 | + */ | |
77 | +static int __init early_cachepolicy(char *p) | |
78 | +{ | |
79 | + int i; | |
80 | + u64 tmp; | |
81 | + | |
82 | + for (i = 0; i < ARRAY_SIZE(cache_policies); i++) { | |
83 | + int len = strlen(cache_policies[i].policy); | |
84 | + | |
85 | + if (memcmp(p, cache_policies[i].policy, len) == 0) | |
86 | + break; | |
87 | + } | |
88 | + if (i == ARRAY_SIZE(cache_policies)) { | |
89 | + pr_err("ERROR: unknown or unsupported cache policy: %s\n", p); | |
90 | + return 0; | |
91 | + } | |
92 | + | |
93 | + flush_cache_all(); | |
94 | + | |
95 | + /* | |
96 | + * Modify MT_NORMAL attributes in MAIR_EL1. | |
97 | + */ | |
98 | + asm volatile( | |
99 | + " mrs %0, mair_el1\n" | |
100 | + " bfi %0, %1, #%2, #8\n" | |
101 | + " msr mair_el1, %0\n" | |
102 | + " isb\n" | |
103 | + : "=&r" (tmp) | |
104 | + : "r" (cache_policies[i].mair), "i" (MT_NORMAL * 8)); | |
105 | + | |
106 | + /* | |
107 | + * Modify TCR PTW cacheability attributes. | |
108 | + */ | |
109 | + asm volatile( | |
110 | + " mrs %0, tcr_el1\n" | |
111 | + " bic %0, %0, %2\n" | |
112 | + " orr %0, %0, %1\n" | |
113 | + " msr tcr_el1, %0\n" | |
114 | + " isb\n" | |
115 | + : "=&r" (tmp) | |
116 | + : "r" (cache_policies[i].tcr), "r" (TCR_IRGN_MASK | TCR_ORGN_MASK)); | |
117 | + | |
118 | + flush_cache_all(); | |
119 | + | |
120 | + return 0; | |
121 | +} | |
122 | +early_param("cachepolicy", early_cachepolicy); | |
123 | + | |
124 | +/* | |
125 | + * Adjust the PMD section entries according to the CPU in use. | |
126 | + */ | |
127 | +static void __init init_mem_pgprot(void) | |
128 | +{ | |
129 | + pteval_t default_pgprot; | |
130 | + int i; | |
131 | + | |
132 | + default_pgprot = PTE_ATTRINDX(MT_NORMAL); | |
133 | + prot_sect_kernel = PMD_TYPE_SECT | PMD_SECT_AF | PMD_ATTRINDX(MT_NORMAL); | |
134 | + | |
135 | +#ifdef CONFIG_SMP | |
136 | + /* | |
137 | + * Mark memory with the "shared" attribute for SMP systems | |
138 | + */ | |
139 | + default_pgprot |= PTE_SHARED; | |
140 | + prot_sect_kernel |= PMD_SECT_S; | |
141 | +#endif | |
142 | + | |
143 | + for (i = 0; i < 16; i++) { | |
144 | + unsigned long v = pgprot_val(protection_map[i]); | |
145 | + protection_map[i] = __pgprot(v | default_pgprot); | |
146 | + } | |
147 | + | |
148 | + pgprot_default = __pgprot(PTE_TYPE_PAGE | PTE_AF | default_pgprot); | |
149 | +} | |
150 | + | |
151 | +pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, | |
152 | + unsigned long size, pgprot_t vma_prot) | |
153 | +{ | |
154 | + if (!pfn_valid(pfn)) | |
155 | + return pgprot_noncached(vma_prot); | |
156 | + else if (file->f_flags & O_SYNC) | |
157 | + return pgprot_writecombine(vma_prot); | |
158 | + return vma_prot; | |
159 | +} | |
160 | +EXPORT_SYMBOL(phys_mem_access_prot); | |
161 | + | |
162 | +static void __init *early_alloc(unsigned long sz) | |
163 | +{ | |
164 | + void *ptr = __va(memblock_alloc(sz, sz)); | |
165 | + memset(ptr, 0, sz); | |
166 | + return ptr; | |
167 | +} | |
168 | + | |
169 | +static void __init alloc_init_pte(pmd_t *pmd, unsigned long addr, | |
170 | + unsigned long end, unsigned long pfn) | |
171 | +{ | |
172 | + pte_t *pte; | |
173 | + | |
174 | + if (pmd_none(*pmd)) { | |
175 | + pte = early_alloc(PTRS_PER_PTE * sizeof(pte_t)); | |
176 | + __pmd_populate(pmd, __pa(pte), PMD_TYPE_TABLE); | |
177 | + } | |
178 | + BUG_ON(pmd_bad(*pmd)); | |
179 | + | |
180 | + pte = pte_offset_kernel(pmd, addr); | |
181 | + do { | |
182 | + set_pte(pte, pfn_pte(pfn, PAGE_KERNEL_EXEC)); | |
183 | + pfn++; | |
184 | + } while (pte++, addr += PAGE_SIZE, addr != end); | |
185 | +} | |
186 | + | |
187 | +static void __init alloc_init_pmd(pud_t *pud, unsigned long addr, | |
188 | + unsigned long end, phys_addr_t phys) | |
189 | +{ | |
190 | + pmd_t *pmd; | |
191 | + unsigned long next; | |
192 | + | |
193 | + /* | |
194 | + * Check for initial section mappings in the pgd/pud and remove them. | |
195 | + */ | |
196 | + if (pud_none(*pud) || pud_bad(*pud)) { | |
197 | + pmd = early_alloc(PTRS_PER_PMD * sizeof(pmd_t)); | |
198 | + pud_populate(&init_mm, pud, pmd); | |
199 | + } | |
200 | + | |
201 | + pmd = pmd_offset(pud, addr); | |
202 | + do { | |
203 | + next = pmd_addr_end(addr, end); | |
204 | + /* try section mapping first */ | |
205 | + if (((addr | next | phys) & ~SECTION_MASK) == 0) | |
206 | + set_pmd(pmd, __pmd(phys | prot_sect_kernel)); | |
207 | + else | |
208 | + alloc_init_pte(pmd, addr, next, __phys_to_pfn(phys)); | |
209 | + phys += next - addr; | |
210 | + } while (pmd++, addr = next, addr != end); | |
211 | +} | |
212 | + | |
213 | +static void __init alloc_init_pud(pgd_t *pgd, unsigned long addr, | |
214 | + unsigned long end, unsigned long phys) | |
215 | +{ | |
216 | + pud_t *pud = pud_offset(pgd, addr); | |
217 | + unsigned long next; | |
218 | + | |
219 | + do { | |
220 | + next = pud_addr_end(addr, end); | |
221 | + alloc_init_pmd(pud, addr, next, phys); | |
222 | + phys += next - addr; | |
223 | + } while (pud++, addr = next, addr != end); | |
224 | +} | |
225 | + | |
226 | +/* | |
227 | + * Create the page directory entries and any necessary page tables for the | |
228 | + * mapping specified by 'md'. | |
229 | + */ | |
230 | +static void __init create_mapping(phys_addr_t phys, unsigned long virt, | |
231 | + phys_addr_t size) | |
232 | +{ | |
233 | + unsigned long addr, length, end, next; | |
234 | + pgd_t *pgd; | |
235 | + | |
236 | + if (virt < VMALLOC_START) { | |
237 | + pr_warning("BUG: not creating mapping for 0x%016llx at 0x%016lx - outside kernel range\n", | |
238 | + phys, virt); | |
239 | + return; | |
240 | + } | |
241 | + | |
242 | + addr = virt & PAGE_MASK; | |
243 | + length = PAGE_ALIGN(size + (virt & ~PAGE_MASK)); | |
244 | + | |
245 | + pgd = pgd_offset_k(addr); | |
246 | + end = addr + length; | |
247 | + do { | |
248 | + next = pgd_addr_end(addr, end); | |
249 | + alloc_init_pud(pgd, addr, next, phys); | |
250 | + phys += next - addr; | |
251 | + } while (pgd++, addr = next, addr != end); | |
252 | +} | |
253 | + | |
254 | +static void __init map_mem(void) | |
255 | +{ | |
256 | + struct memblock_region *reg; | |
257 | + | |
258 | + /* map all the memory banks */ | |
259 | + for_each_memblock(memory, reg) { | |
260 | + phys_addr_t start = reg->base; | |
261 | + phys_addr_t end = start + reg->size; | |
262 | + | |
263 | + if (start >= end) | |
264 | + break; | |
265 | + | |
266 | + create_mapping(start, __phys_to_virt(start), end - start); | |
267 | + } | |
268 | +} | |
269 | + | |
270 | +/* | |
271 | + * paging_init() sets up the page tables, initialises the zone memory | |
272 | + * maps and sets up the zero page. | |
273 | + */ | |
274 | +void __init paging_init(void) | |
275 | +{ | |
276 | + void *zero_page; | |
277 | + | |
278 | + /* | |
279 | + * Maximum PGDIR_SIZE addressable via the initial direct kernel | |
280 | + * mapping in swapper_pg_dir. | |
281 | + */ | |
282 | + memblock_set_current_limit((PHYS_OFFSET & PGDIR_MASK) + PGDIR_SIZE); | |
283 | + | |
284 | + init_mem_pgprot(); | |
285 | + map_mem(); | |
286 | + | |
287 | + /* | |
288 | + * Finally flush the caches and tlb to ensure that we're in a | |
289 | + * consistent state. | |
290 | + */ | |
291 | + flush_cache_all(); | |
292 | + flush_tlb_all(); | |
293 | + | |
294 | + /* allocate the zero page. */ | |
295 | + zero_page = early_alloc(PAGE_SIZE); | |
296 | + | |
297 | + bootmem_init(); | |
298 | + | |
299 | + empty_zero_page = virt_to_page(zero_page); | |
300 | + __flush_dcache_page(empty_zero_page); | |
301 | + | |
302 | + /* | |
303 | + * TTBR0 is only used for the identity mapping at this stage. Make it | |
304 | + * point to zero page to avoid speculatively fetching new entries. | |
305 | + */ | |
306 | + cpu_set_reserved_ttbr0(); | |
307 | + flush_tlb_all(); | |
308 | +} | |
309 | + | |
310 | +/* | |
311 | + * Enable the identity mapping to allow the MMU disabling. | |
312 | + */ | |
313 | +void setup_mm_for_reboot(void) | |
314 | +{ | |
315 | + cpu_switch_mm(idmap_pg_dir, &init_mm); | |
316 | + flush_tlb_all(); | |
317 | +} | |
318 | + | |
319 | +/* | |
320 | + * Check whether a kernel address is valid (derived from arch/x86/). | |
321 | + */ | |
322 | +int kern_addr_valid(unsigned long addr) | |
323 | +{ | |
324 | + pgd_t *pgd; | |
325 | + pud_t *pud; | |
326 | + pmd_t *pmd; | |
327 | + pte_t *pte; | |
328 | + | |
329 | + if ((((long)addr) >> VA_BITS) != -1UL) | |
330 | + return 0; | |
331 | + | |
332 | + pgd = pgd_offset_k(addr); | |
333 | + if (pgd_none(*pgd)) | |
334 | + return 0; | |
335 | + | |
336 | + pud = pud_offset(pgd, addr); | |
337 | + if (pud_none(*pud)) | |
338 | + return 0; | |
339 | + | |
340 | + pmd = pmd_offset(pud, addr); | |
341 | + if (pmd_none(*pmd)) | |
342 | + return 0; | |
343 | + | |
344 | + pte = pte_offset_kernel(pmd, addr); | |
345 | + if (pte_none(*pte)) | |
346 | + return 0; | |
347 | + | |
348 | + return pfn_valid(pte_pfn(*pte)); | |
349 | +} | |
350 | +#ifdef CONFIG_SPARSEMEM_VMEMMAP | |
351 | +#ifdef CONFIG_ARM64_64K_PAGES | |
352 | +int __meminit vmemmap_populate(struct page *start_page, | |
353 | + unsigned long size, int node) | |
354 | +{ | |
355 | + return vmemmap_populate_basepages(start_page, size, node); | |
356 | +} | |
357 | +#else /* !CONFIG_ARM64_64K_PAGES */ | |
358 | +int __meminit vmemmap_populate(struct page *start_page, | |
359 | + unsigned long size, int node) | |
360 | +{ | |
361 | + unsigned long addr = (unsigned long)start_page; | |
362 | + unsigned long end = (unsigned long)(start_page + size); | |
363 | + unsigned long next; | |
364 | + pgd_t *pgd; | |
365 | + pud_t *pud; | |
366 | + pmd_t *pmd; | |
367 | + | |
368 | + do { | |
369 | + next = pmd_addr_end(addr, end); | |
370 | + | |
371 | + pgd = vmemmap_pgd_populate(addr, node); | |
372 | + if (!pgd) | |
373 | + return -ENOMEM; | |
374 | + | |
375 | + pud = vmemmap_pud_populate(pgd, addr, node); | |
376 | + if (!pud) | |
377 | + return -ENOMEM; | |
378 | + | |
379 | + pmd = pmd_offset(pud, addr); | |
380 | + if (pmd_none(*pmd)) { | |
381 | + void *p = NULL; | |
382 | + | |
383 | + p = vmemmap_alloc_block_buf(PMD_SIZE, node); | |
384 | + if (!p) | |
385 | + return -ENOMEM; | |
386 | + | |
387 | + set_pmd(pmd, __pmd(__pa(p) | prot_sect_kernel)); | |
388 | + } else | |
389 | + vmemmap_verify((pte_t *)pmd, node, addr, next); | |
390 | + } while (addr = next, addr != end); | |
391 | + | |
392 | + return 0; | |
393 | +} | |
394 | +#endif /* CONFIG_ARM64_64K_PAGES */ | |
395 | +#endif /* CONFIG_SPARSEMEM_VMEMMAP */ |