Blame view
arch/openrisc/mm/init.c
5.24 KB
2874c5fd2 treewide: Replace... |
1 |
// SPDX-License-Identifier: GPL-2.0-or-later |
61e85e367 OpenRISC: Memory ... |
2 3 4 5 6 7 8 9 10 11 |
/* * OpenRISC idle.c * * Linux architectural port borrowing liberally from similar works of * others. All original copyrights apply as per the original source * declaration. * * Modifications for the OpenRISC architecture: * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com> * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se> |
61e85e367 OpenRISC: Memory ... |
12 13 14 15 16 17 18 19 20 21 22 23 24 |
*/ #include <linux/signal.h> #include <linux/sched.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/string.h> #include <linux/types.h> #include <linux/ptrace.h> #include <linux/mman.h> #include <linux/mm.h> #include <linux/swap.h> #include <linux/smp.h> |
57c8a661d mm: remove includ... |
25 |
#include <linux/memblock.h> |
61e85e367 OpenRISC: Memory ... |
26 27 28 29 |
#include <linux/init.h> #include <linux/delay.h> #include <linux/blkdev.h> /* for initrd_* */ #include <linux/pagemap.h> |
61e85e367 OpenRISC: Memory ... |
30 |
|
61e85e367 OpenRISC: Memory ... |
31 |
#include <asm/pgalloc.h> |
61e85e367 OpenRISC: Memory ... |
32 33 34 35 36 37 38 |
#include <asm/dma.h> #include <asm/io.h> #include <asm/tlb.h> #include <asm/mmu_context.h> #include <asm/kmap_types.h> #include <asm/fixmap.h> #include <asm/tlbflush.h> |
7932f61ba mm/openrisc: use ... |
39 |
#include <asm/sections.h> |
61e85e367 OpenRISC: Memory ... |
40 41 42 43 44 45 46 |
int mem_init_done; DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); static void __init zone_sizes_init(void) { |
fa3354e4e mm: free_area_ini... |
47 |
unsigned long max_zone_pfn[MAX_NR_ZONES] = { 0 }; |
61e85e367 OpenRISC: Memory ... |
48 49 50 51 |
/* * We use only ZONE_NORMAL */ |
fa3354e4e mm: free_area_ini... |
52 |
max_zone_pfn[ZONE_NORMAL] = max_low_pfn; |
61e85e367 OpenRISC: Memory ... |
53 |
|
fa3354e4e mm: free_area_ini... |
54 |
free_area_init(max_zone_pfn); |
61e85e367 OpenRISC: Memory ... |
55 56 57 58 59 60 61 62 63 64 65 66 |
} extern const char _s_kernel_ro[], _e_kernel_ro[]; /* * Map all physical memory into kernel's address space. * * This is explicitly coded for two-level page tables, so if you need * something else then this needs to change. */ static void __init map_ram(void) { |
b10d6bca8 arch, drivers: re... |
67 |
phys_addr_t start, end; |
61e85e367 OpenRISC: Memory ... |
68 69 70 |
unsigned long v, p, e; pgprot_t prot; pgd_t *pge; |
b187fb7fc openrisc: add sup... |
71 |
p4d_t *p4e; |
61e85e367 OpenRISC: Memory ... |
72 73 74 |
pud_t *pue; pmd_t *pme; pte_t *pte; |
b10d6bca8 arch, drivers: re... |
75 |
u64 i; |
61e85e367 OpenRISC: Memory ... |
76 77 78 79 80 81 |
/* These mark extents of read-only kernel pages... * ...from vmlinux.lds.S */ struct memblock_region *region; v = PAGE_OFFSET; |
b10d6bca8 arch, drivers: re... |
82 83 84 |
for_each_mem_range(i, &start, &end) { p = (u32) start & PAGE_MASK; e = (u32) end; |
61e85e367 OpenRISC: Memory ... |
85 86 87 88 89 90 |
v = (u32) __va(p); pge = pgd_offset_k(v); while (p < e) { int j; |
b187fb7fc openrisc: add sup... |
91 92 |
p4e = p4d_offset(pge, v); pue = pud_offset(p4e, v); |
61e85e367 OpenRISC: Memory ... |
93 94 95 96 97 98 99 100 101 |
pme = pmd_offset(pue, v); if ((u32) pue != (u32) pge || (u32) pme != (u32) pge) { panic("%s: OR1K kernel hardcoded for " "two-level page tables", __func__); } /* Alloc one page for holding PTE's... */ |
fb054d0d9 openrisc: prefer ... |
102 103 104 105 106 |
pte = memblock_alloc_raw(PAGE_SIZE, PAGE_SIZE); if (!pte) panic("%s: Failed to allocate page for PTEs ", __func__); |
61e85e367 OpenRISC: Memory ... |
107 108 109 |
set_pmd(pme, __pmd(_KERNPG_TABLE + __pa(pte))); /* Fill the newly allocated page with PTE'S */ |
f47706099 openrisc: fix PTR... |
110 |
for (j = 0; p < e && j < PTRS_PER_PTE; |
61e85e367 OpenRISC: Memory ... |
111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 |
v += PAGE_SIZE, p += PAGE_SIZE, j++, pte++) { if (v >= (u32) _e_kernel_ro || v < (u32) _s_kernel_ro) prot = PAGE_KERNEL; else prot = PAGE_KERNEL_RO; set_pte(pte, mk_pte_phys(p, prot)); } pge++; } printk(KERN_INFO "%s: Memory: 0x%x-0x%x ", __func__, region->base, region->base + region->size); } } void __init paging_init(void) { extern void tlb_init(void); unsigned long end; int i; printk(KERN_INFO "Setting up paging and PTEs. "); /* clear out the init_mm.pgd that will contain the kernel's mappings */ for (i = 0; i < PTRS_PER_PGD; i++) swapper_pg_dir[i] = __pgd(0); /* make sure the current pgd table points to something sane * (even if it is most probably not used until the next * switch_mm) */ |
8e6d08e0a openrisc: initial... |
149 |
current_pgd[smp_processor_id()] = init_mm.pgd; |
61e85e367 OpenRISC: Memory ... |
150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 |
end = (unsigned long)__va(max_low_pfn * PAGE_SIZE); map_ram(); zone_sizes_init(); /* self modifying code ;) */ /* Since the old TLB miss handler has been running up until now, * the kernel pages are still all RW, so we can still modify the * text directly... after this change and a TLB flush, the kernel * pages will become RO. */ { extern unsigned long dtlb_miss_handler; extern unsigned long itlb_miss_handler; unsigned long *dtlb_vector = __va(0x900); unsigned long *itlb_vector = __va(0xa00); |
8668480eb openrisc: update ... |
169 170 171 172 173 174 175 176 177 |
printk(KERN_INFO "itlb_miss_handler %p ", &itlb_miss_handler); *itlb_vector = ((unsigned long)&itlb_miss_handler - (unsigned long)itlb_vector) >> 2; /* Soft ordering constraint to ensure that dtlb_vector is * the last thing updated */ barrier(); |
61e85e367 OpenRISC: Memory ... |
178 179 180 181 |
printk(KERN_INFO "dtlb_miss_handler %p ", &dtlb_miss_handler); *dtlb_vector = ((unsigned long)&dtlb_miss_handler - (unsigned long)dtlb_vector) >> 2; |
61e85e367 OpenRISC: Memory ... |
182 |
} |
8668480eb openrisc: update ... |
183 184 185 186 |
/* Soft ordering constraint to ensure that cache invalidation and * TLB flush really happen _after_ code has been modified. */ barrier(); |
61e85e367 OpenRISC: Memory ... |
187 188 189 190 191 192 193 194 195 196 197 198 199 |
/* Invalidate instruction caches after code modification */ mtspr(SPR_ICBIR, 0x900); mtspr(SPR_ICBIR, 0xa00); /* New TLB miss handlers and kernel page tables are in now place. * Make sure that page flags get updated for all pages in TLB by * flushing the TLB and forcing all TLB entries to be recreated * from their page table flags. */ flush_tlb_all(); } /* References to section boundaries */ |
61e85e367 OpenRISC: Memory ... |
200 201 |
void __init mem_init(void) { |
2e1c958de arch/openrisc/mm/... |
202 |
BUG_ON(!mem_map); |
61e85e367 OpenRISC: Memory ... |
203 |
|
1173db12b mm/openrisc: prep... |
204 |
max_mapnr = max_low_pfn; |
61e85e367 OpenRISC: Memory ... |
205 206 207 208 |
high_memory = (void *)__va(max_low_pfn * PAGE_SIZE); /* clear the zero-page */ memset((void *)empty_zero_page, 0, PAGE_SIZE); |
1173db12b mm/openrisc: prep... |
209 |
/* this will put all low memory onto the freelists */ |
c6ffc5ca8 memblock: rename ... |
210 |
memblock_free_all(); |
61e85e367 OpenRISC: Memory ... |
211 |
|
1173db12b mm/openrisc: prep... |
212 |
mem_init_print_info(NULL); |
61e85e367 OpenRISC: Memory ... |
213 214 215 216 217 218 |
printk("mem_init_done ........................................... "); mem_init_done = 1; return; } |