Blame view
mm/sparse-vmemmap.c
7.95 KB
b24413180 License cleanup: ... |
1 |
// SPDX-License-Identifier: GPL-2.0 |
8f6aac419 Generic Virtual M... |
2 3 4 |
/* * Virtual Memory Map support * |
cde535359 Christoph has moved |
5 |
* (C) 2007 sgi. Christoph Lameter. |
8f6aac419 Generic Virtual M... |
6 7 8 9 10 11 12 |
* * Virtual memory maps allow VM primitives pfn_to_page, page_to_pfn, * virt_to_page, page_address() to be implemented as a base offset * calculation without memory access. * * However, virtual mappings need a page table and TLBs. Many Linux * architectures already map their physical space using 1-1 mappings |
b595076a1 tree-wide: fix co... |
13 |
* via TLBs. For those arches the virtual memory map is essentially |
8f6aac419 Generic Virtual M... |
14 15 16 17 |
* for free if we use the same page size as the 1-1 mappings. In that * case the overhead consists of a few additional pages that are * allocated to create a view of memory for vmemmap. * |
29c71111d vmemmap: generify... |
18 19 |
* The architecture is expected to provide a vmemmap_populate() function * to instantiate the mapping. |
8f6aac419 Generic Virtual M... |
20 21 22 23 |
*/ #include <linux/mm.h> #include <linux/mmzone.h> #include <linux/bootmem.h> |
4b94ffdc4 x86, mm: introduc... |
24 |
#include <linux/memremap.h> |
8f6aac419 Generic Virtual M... |
25 |
#include <linux/highmem.h> |
5a0e3ad6a include cleanup: ... |
26 |
#include <linux/slab.h> |
8f6aac419 Generic Virtual M... |
27 28 |
#include <linux/spinlock.h> #include <linux/vmalloc.h> |
8bca44bbd mm/sparse-vmemmap... |
29 |
#include <linux/sched.h> |
8f6aac419 Generic Virtual M... |
30 31 32 33 34 35 36 37 38 |
#include <asm/dma.h> #include <asm/pgalloc.h> #include <asm/pgtable.h> /* * Allocate a block of memory to be used to back the virtual memory map * or to back the page tables that are used to create the mapping. * Uses the main allocators if they are available, else bootmem. */ |
e0dc3a53d memory hotplug fi... |
39 |
|
bd721ea73 treewide: replace... |
40 |
static void * __ref __earlyonly_bootmem_alloc(int node, |
e0dc3a53d memory hotplug fi... |
41 42 43 44 |
unsigned long size, unsigned long align, unsigned long goal) { |
f7f99100d mm: stop zeroing ... |
45 |
return memblock_virt_alloc_try_nid_raw(size, align, goal, |
bb016b841 mm/sparse: use me... |
46 |
BOOTMEM_ALLOC_ACCESSIBLE, node); |
e0dc3a53d memory hotplug fi... |
47 |
} |
9bdac9142 sparsemem: Put me... |
48 49 |
static void *vmemmap_buf; static void *vmemmap_buf_end; |
e0dc3a53d memory hotplug fi... |
50 |
|
8f6aac419 Generic Virtual M... |
51 52 53 54 |
void * __meminit vmemmap_alloc_block(unsigned long size, int node) { /* If the main allocator is up use that, fallback to bootmem. */ if (slab_is_available()) { |
fcdaf842b mm, sparse: do no... |
55 56 57 |
gfp_t gfp_mask = GFP_KERNEL|__GFP_RETRY_MAYFAIL|__GFP_NOWARN; int order = get_order(size); static bool warned; |
f52407ce2 memory hotplug: a... |
58 |
struct page *page; |
fcdaf842b mm, sparse: do no... |
59 |
page = alloc_pages_node(node, gfp_mask, order); |
8f6aac419 Generic Virtual M... |
60 61 |
if (page) return page_address(page); |
fcdaf842b mm, sparse: do no... |
62 63 64 65 66 67 |
if (!warned) { warn_alloc(gfp_mask & ~__GFP_NOWARN, NULL, "vmemmap alloc failure: order:%u", order); warned = true; } |
8f6aac419 Generic Virtual M... |
68 69 |
return NULL; } else |
e0dc3a53d memory hotplug fi... |
70 |
return __earlyonly_bootmem_alloc(node, size, size, |
8f6aac419 Generic Virtual M... |
71 72 |
__pa(MAX_DMA_ADDRESS)); } |
9bdac9142 sparsemem: Put me... |
73 |
/* need to make sure size is all the same during early stage */ |
a8fc357b2 mm: split altmap ... |
74 |
void * __meminit vmemmap_alloc_block_buf(unsigned long size, int node) |
9bdac9142 sparsemem: Put me... |
75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 |
{ void *ptr; if (!vmemmap_buf) return vmemmap_alloc_block(size, node); /* take the from buf */ ptr = (void *)ALIGN((unsigned long)vmemmap_buf, size); if (ptr + size > vmemmap_buf_end) return vmemmap_alloc_block(size, node); vmemmap_buf = ptr + size; return ptr; } |
4b94ffdc4 x86, mm: introduc... |
90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 |
static unsigned long __meminit vmem_altmap_next_pfn(struct vmem_altmap *altmap) { return altmap->base_pfn + altmap->reserve + altmap->alloc + altmap->align; } static unsigned long __meminit vmem_altmap_nr_free(struct vmem_altmap *altmap) { unsigned long allocated = altmap->alloc + altmap->align; if (altmap->free > allocated) return altmap->free - allocated; return 0; } /** |
eb8045335 mm: merge vmem_al... |
106 107 108 |
* altmap_alloc_block_buf - allocate pages from the device page map * @altmap: device page map * @size: size (in bytes) of the allocation |
4b94ffdc4 x86, mm: introduc... |
109 |
* |
eb8045335 mm: merge vmem_al... |
110 |
* Allocations are aligned to the size of the request. |
4b94ffdc4 x86, mm: introduc... |
111 |
*/ |
a8fc357b2 mm: split altmap ... |
112 |
void * __meminit altmap_alloc_block_buf(unsigned long size, |
4b94ffdc4 x86, mm: introduc... |
113 114 |
struct vmem_altmap *altmap) { |
eb8045335 mm: merge vmem_al... |
115 |
unsigned long pfn, nr_pfns, nr_align; |
4b94ffdc4 x86, mm: introduc... |
116 117 118 119 120 121 122 |
if (size & ~PAGE_MASK) { pr_warn_once("%s: allocations must be multiple of PAGE_SIZE (%ld) ", __func__, size); return NULL; } |
eb8045335 mm: merge vmem_al... |
123 |
pfn = vmem_altmap_next_pfn(altmap); |
4b94ffdc4 x86, mm: introduc... |
124 |
nr_pfns = size >> PAGE_SHIFT; |
eb8045335 mm: merge vmem_al... |
125 126 127 128 129 130 131 132 |
nr_align = 1UL << find_first_bit(&nr_pfns, BITS_PER_LONG); nr_align = ALIGN(pfn, nr_align) - pfn; if (nr_pfns + nr_align > vmem_altmap_nr_free(altmap)) return NULL; altmap->alloc += nr_pfns; altmap->align += nr_align; pfn += nr_align; |
4b94ffdc4 x86, mm: introduc... |
133 134 135 |
pr_debug("%s: pfn: %#lx alloc: %ld align: %ld nr: %#lx ", __func__, pfn, altmap->alloc, altmap->align, nr_pfns); |
eb8045335 mm: merge vmem_al... |
136 |
return __va(__pfn_to_phys(pfn)); |
4b94ffdc4 x86, mm: introduc... |
137 |
} |
8f6aac419 Generic Virtual M... |
138 139 140 141 142 |
void __meminit vmemmap_verify(pte_t *pte, int node, unsigned long start, unsigned long end) { unsigned long pfn = pte_pfn(*pte); int actual_node = early_pfn_to_nid(pfn); |
b41ad14c3 vmemmap: warn abo... |
143 |
if (node_distance(actual_node, node) > LOCAL_DISTANCE) |
1170532bb mm: convert print... |
144 145 146 |
pr_warn("[%lx-%lx] potential offnode page_structs ", start, end - 1); |
8f6aac419 Generic Virtual M... |
147 |
} |
29c71111d vmemmap: generify... |
148 |
pte_t * __meminit vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node) |
8f6aac419 Generic Virtual M... |
149 |
{ |
29c71111d vmemmap: generify... |
150 151 152 |
pte_t *pte = pte_offset_kernel(pmd, addr); if (pte_none(*pte)) { pte_t entry; |
a8fc357b2 mm: split altmap ... |
153 |
void *p = vmemmap_alloc_block_buf(PAGE_SIZE, node); |
29c71111d vmemmap: generify... |
154 |
if (!p) |
9dce07f1a NULL noise: fs/*,... |
155 |
return NULL; |
29c71111d vmemmap: generify... |
156 157 158 159 |
entry = pfn_pte(__pa(p) >> PAGE_SHIFT, PAGE_KERNEL); set_pte_at(&init_mm, addr, pte, entry); } return pte; |
8f6aac419 Generic Virtual M... |
160 |
} |
f7f99100d mm: stop zeroing ... |
161 162 163 164 165 166 167 168 169 170 |
static void * __meminit vmemmap_alloc_block_zero(unsigned long size, int node) { void *p = vmemmap_alloc_block(size, node); if (!p) return NULL; memset(p, 0, size); return p; } |
29c71111d vmemmap: generify... |
171 |
pmd_t * __meminit vmemmap_pmd_populate(pud_t *pud, unsigned long addr, int node) |
8f6aac419 Generic Virtual M... |
172 |
{ |
29c71111d vmemmap: generify... |
173 174 |
pmd_t *pmd = pmd_offset(pud, addr); if (pmd_none(*pmd)) { |
f7f99100d mm: stop zeroing ... |
175 |
void *p = vmemmap_alloc_block_zero(PAGE_SIZE, node); |
29c71111d vmemmap: generify... |
176 |
if (!p) |
9dce07f1a NULL noise: fs/*,... |
177 |
return NULL; |
29c71111d vmemmap: generify... |
178 |
pmd_populate_kernel(&init_mm, pmd, p); |
8f6aac419 Generic Virtual M... |
179 |
} |
29c71111d vmemmap: generify... |
180 |
return pmd; |
8f6aac419 Generic Virtual M... |
181 |
} |
8f6aac419 Generic Virtual M... |
182 |
|
c2febafc6 mm: convert gener... |
183 |
pud_t * __meminit vmemmap_pud_populate(p4d_t *p4d, unsigned long addr, int node) |
8f6aac419 Generic Virtual M... |
184 |
{ |
c2febafc6 mm: convert gener... |
185 |
pud_t *pud = pud_offset(p4d, addr); |
29c71111d vmemmap: generify... |
186 |
if (pud_none(*pud)) { |
f7f99100d mm: stop zeroing ... |
187 |
void *p = vmemmap_alloc_block_zero(PAGE_SIZE, node); |
29c71111d vmemmap: generify... |
188 |
if (!p) |
9dce07f1a NULL noise: fs/*,... |
189 |
return NULL; |
29c71111d vmemmap: generify... |
190 191 192 193 |
pud_populate(&init_mm, pud, p); } return pud; } |
8f6aac419 Generic Virtual M... |
194 |
|
c2febafc6 mm: convert gener... |
195 196 197 198 |
p4d_t * __meminit vmemmap_p4d_populate(pgd_t *pgd, unsigned long addr, int node) { p4d_t *p4d = p4d_offset(pgd, addr); if (p4d_none(*p4d)) { |
f7f99100d mm: stop zeroing ... |
199 |
void *p = vmemmap_alloc_block_zero(PAGE_SIZE, node); |
c2febafc6 mm: convert gener... |
200 201 202 203 204 205 |
if (!p) return NULL; p4d_populate(&init_mm, p4d, p); } return p4d; } |
29c71111d vmemmap: generify... |
206 207 208 209 |
pgd_t * __meminit vmemmap_pgd_populate(unsigned long addr, int node) { pgd_t *pgd = pgd_offset_k(addr); if (pgd_none(*pgd)) { |
f7f99100d mm: stop zeroing ... |
210 |
void *p = vmemmap_alloc_block_zero(PAGE_SIZE, node); |
29c71111d vmemmap: generify... |
211 |
if (!p) |
9dce07f1a NULL noise: fs/*,... |
212 |
return NULL; |
29c71111d vmemmap: generify... |
213 |
pgd_populate(&init_mm, pgd, p); |
8f6aac419 Generic Virtual M... |
214 |
} |
29c71111d vmemmap: generify... |
215 |
return pgd; |
8f6aac419 Generic Virtual M... |
216 |
} |
0aad818b2 sparse-vmemmap: s... |
217 218 |
int __meminit vmemmap_populate_basepages(unsigned long start, unsigned long end, int node) |
8f6aac419 Generic Virtual M... |
219 |
{ |
0aad818b2 sparse-vmemmap: s... |
220 |
unsigned long addr = start; |
29c71111d vmemmap: generify... |
221 |
pgd_t *pgd; |
c2febafc6 mm: convert gener... |
222 |
p4d_t *p4d; |
29c71111d vmemmap: generify... |
223 224 225 |
pud_t *pud; pmd_t *pmd; pte_t *pte; |
8f6aac419 Generic Virtual M... |
226 |
|
29c71111d vmemmap: generify... |
227 228 229 230 |
for (; addr < end; addr += PAGE_SIZE) { pgd = vmemmap_pgd_populate(addr, node); if (!pgd) return -ENOMEM; |
c2febafc6 mm: convert gener... |
231 232 233 234 |
p4d = vmemmap_p4d_populate(pgd, addr, node); if (!p4d) return -ENOMEM; pud = vmemmap_pud_populate(p4d, addr, node); |
29c71111d vmemmap: generify... |
235 236 237 238 239 240 241 242 243 |
if (!pud) return -ENOMEM; pmd = vmemmap_pmd_populate(pud, addr, node); if (!pmd) return -ENOMEM; pte = vmemmap_pte_populate(pmd, addr, node); if (!pte) return -ENOMEM; vmemmap_verify(pte, node, addr, addr + PAGE_SIZE); |
8f6aac419 Generic Virtual M... |
244 |
} |
29c71111d vmemmap: generify... |
245 246 |
return 0; |
8f6aac419 Generic Virtual M... |
247 |
} |
8f6aac419 Generic Virtual M... |
248 |
|
7b73d978a mm: pass the vmem... |
249 250 |
struct page * __meminit sparse_mem_map_populate(unsigned long pnum, int nid, struct vmem_altmap *altmap) |
8f6aac419 Generic Virtual M... |
251 |
{ |
0aad818b2 sparse-vmemmap: s... |
252 253 254 255 256 257 258 |
unsigned long start; unsigned long end; struct page *map; map = pfn_to_page(pnum * PAGES_PER_SECTION); start = (unsigned long)map; end = (unsigned long)(map + PAGES_PER_SECTION); |
7b73d978a mm: pass the vmem... |
259 |
if (vmemmap_populate(start, end, nid, altmap)) |
8f6aac419 Generic Virtual M... |
260 261 262 263 |
return NULL; return map; } |
9bdac9142 sparsemem: Put me... |
264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 |
void __init sparse_mem_maps_populate_node(struct page **map_map, unsigned long pnum_begin, unsigned long pnum_end, unsigned long map_count, int nodeid) { unsigned long pnum; unsigned long size = sizeof(struct page) * PAGES_PER_SECTION; void *vmemmap_buf_start; size = ALIGN(size, PMD_SIZE); vmemmap_buf_start = __earlyonly_bootmem_alloc(nodeid, size * map_count, PMD_SIZE, __pa(MAX_DMA_ADDRESS)); if (vmemmap_buf_start) { vmemmap_buf = vmemmap_buf_start; vmemmap_buf_end = vmemmap_buf_start + size * map_count; } for (pnum = pnum_begin; pnum < pnum_end; pnum++) { struct mem_section *ms; if (!present_section_nr(pnum)) continue; |
7b73d978a mm: pass the vmem... |
288 |
map_map[pnum] = sparse_mem_map_populate(pnum, nodeid, NULL); |
9bdac9142 sparsemem: Put me... |
289 290 291 |
if (map_map[pnum]) continue; ms = __nr_to_section(pnum); |
1170532bb mm: convert print... |
292 293 |
pr_err("%s: sparsemem memory map backing failed some memory will not be available ", |
756a025f0 mm: coalesce spli... |
294 |
__func__); |
9bdac9142 sparsemem: Put me... |
295 296 297 298 299 |
ms->section_mem_map = 0; } if (vmemmap_buf_start) { /* need to free left buf */ |
bb016b841 mm/sparse: use me... |
300 301 |
memblock_free_early(__pa(vmemmap_buf), vmemmap_buf_end - vmemmap_buf); |
9bdac9142 sparsemem: Put me... |
302 303 304 305 |
vmemmap_buf = NULL; vmemmap_buf_end = NULL; } } |