Commit e63075a3c9377536d085bc013cd3fe6323162449

Authored by Benjamin Herrenschmidt
1 parent 27f574c223

memblock: Introduce default allocation limit and use it to replace explicit ones

This introduce memblock.current_limit which is used to limit allocations
from memblock_alloc() or memblock_alloc_base(..., MEMBLOCK_ALLOC_ACCESSIBLE).

The old MEMBLOCK_ALLOC_ANYWHERE changes value from 0 to ~(u64)0 and can still
be used with memblock_alloc_base() to allocate really anywhere.

It is -no-longer- cropped to MEMBLOCK_REAL_LIMIT which disappears.

Note to archs: I'm leaving the default limit to MEMBLOCK_ALLOC_ANYWHERE. I
strongly recommend that you ensure that you set an appropriate limit
during boot in order to guarantee that an memblock_alloc() at any time
results in something that is accessible with a simple __va().

The reason is that a subsequent patch will introduce the ability for
the array to resize itself by reallocating itself. The MEMBLOCK core will
honor the current limit when performing those allocations.

Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>

Showing 14 changed files with 63 additions and 53 deletions Side-by-side Diff

arch/microblaze/include/asm/memblock.h
... ... @@ -9,8 +9,5 @@
9 9 #ifndef _ASM_MICROBLAZE_MEMBLOCK_H
10 10 #define _ASM_MICROBLAZE_MEMBLOCK_H
11 11  
12   -/* MEMBLOCK limit is OFF */
13   -#define MEMBLOCK_REAL_LIMIT 0xFFFFFFFF
14   -
15 12 #endif /* _ASM_MICROBLAZE_MEMBLOCK_H */
arch/powerpc/include/asm/memblock.h
... ... @@ -5,12 +5,5 @@
5 5  
6 6 #define MEMBLOCK_DBG(fmt...) udbg_printf(fmt)
7 7  
8   -#ifdef CONFIG_PPC32
9   -extern phys_addr_t lowmem_end_addr;
10   -#define MEMBLOCK_REAL_LIMIT lowmem_end_addr
11   -#else
12   -#define MEMBLOCK_REAL_LIMIT 0
13   -#endif
14   -
15 8 #endif /* _ASM_POWERPC_MEMBLOCK_H */
arch/powerpc/kernel/prom.c
... ... @@ -98,7 +98,7 @@
98 98  
99 99 if ((memory_limit && (start + size) > memory_limit) ||
100 100 overlaps_crashkernel(start, size)) {
101   - p = __va(memblock_alloc_base(size, PAGE_SIZE, memblock.rmo_size));
  101 + p = __va(memblock_alloc(size, PAGE_SIZE));
102 102 memcpy(p, initial_boot_params, size);
103 103 initial_boot_params = (struct boot_param_header *)p;
104 104 DBG("Moved device tree to 0x%p\n", p);
... ... @@ -655,6 +655,21 @@
655 655 static inline void __init phyp_dump_reserve_mem(void) {}
656 656 #endif /* CONFIG_PHYP_DUMP && CONFIG_PPC_RTAS */
657 657  
  658 +static void set_boot_memory_limit(void)
  659 +{
  660 +#ifdef CONFIG_PPC32
  661 + /* 601 can only access 16MB at the moment */
  662 + if (PVR_VER(mfspr(SPRN_PVR)) == 1)
  663 + memblock_set_current_limit(0x01000000);
  664 + /* 8xx can only access 8MB at the moment */
  665 + else if (PVR_VER(mfspr(SPRN_PVR)) == 0x50)
  666 + memblock_set_current_limit(0x00800000);
  667 + else
  668 + memblock_set_current_limit(0x10000000);
  669 +#else
  670 + memblock_set_current_limit(memblock.rmo_size);
  671 +#endif
  672 +}
658 673  
659 674 void __init early_init_devtree(void *params)
660 675 {
... ... @@ -683,6 +698,7 @@
683 698  
684 699 /* Scan memory nodes and rebuild MEMBLOCKs */
685 700 memblock_init();
  701 +
686 702 of_scan_flat_dt(early_init_dt_scan_root, NULL);
687 703 of_scan_flat_dt(early_init_dt_scan_memory_ppc, NULL);
688 704  
... ... @@ -717,6 +733,8 @@
717 733 memblock_dump_all();
718 734  
719 735 DBG("Phys. mem: %llx\n", memblock_phys_mem_size());
  736 +
  737 + set_boot_memory_limit();
720 738  
721 739 /* We may need to relocate the flat tree, do it now.
722 740 * FIXME .. and the initrd too? */
arch/powerpc/kernel/setup_32.c
... ... @@ -246,7 +246,7 @@
246 246 unsigned int i;
247 247  
248 248 /* interrupt stacks must be in lowmem, we get that for free on ppc32
249   - * as the memblock is limited to lowmem by MEMBLOCK_REAL_LIMIT */
  249 + * as the memblock is limited to lowmem by default */
250 250 for_each_possible_cpu(i) {
251 251 softirq_ctx[i] = (struct thread_info *)
252 252 __va(memblock_alloc(THREAD_SIZE, THREAD_SIZE));
arch/powerpc/mm/40x_mmu.c
... ... @@ -35,6 +35,7 @@
35 35 #include <linux/init.h>
36 36 #include <linux/delay.h>
37 37 #include <linux/highmem.h>
  38 +#include <linux/memblock.h>
38 39  
39 40 #include <asm/pgalloc.h>
40 41 #include <asm/prom.h>
... ... @@ -47,6 +48,7 @@
47 48 #include <asm/bootx.h>
48 49 #include <asm/machdep.h>
49 50 #include <asm/setup.h>
  51 +
50 52 #include "mmu_decl.h"
51 53  
52 54 extern int __map_without_ltlbs;
... ... @@ -139,8 +141,7 @@
139 141 * coverage with normal-sized pages (or other reasons) do not
140 142 * attempt to allocate outside the allowed range.
141 143 */
142   -
143   - __initial_memory_limit_addr = memstart_addr + mapped;
  144 + memblock_set_current_limit(memstart_addr + mapped);
144 145  
145 146 return mapped;
146 147 }
arch/powerpc/mm/fsl_booke_mmu.c
... ... @@ -40,6 +40,7 @@
40 40 #include <linux/init.h>
41 41 #include <linux/delay.h>
42 42 #include <linux/highmem.h>
  43 +#include <linux/memblock.h>
43 44  
44 45 #include <asm/pgalloc.h>
45 46 #include <asm/prom.h>
... ... @@ -212,6 +213,6 @@
212 213 pr_cont("%lu Mb, residual: %dMb\n", tlbcam_sz(tlbcam_index - 1) >> 20,
213 214 (unsigned int)((total_lowmem - __max_low_memory) >> 20));
214 215  
215   - __initial_memory_limit_addr = memstart_addr + __max_low_memory;
  216 + memblock_set_current_limit(memstart_addr + __max_low_memory);
216 217 }
arch/powerpc/mm/hash_utils_64.c
... ... @@ -696,7 +696,8 @@
696 696 #endif /* CONFIG_U3_DART */
697 697 BUG_ON(htab_bolt_mapping(base, base + size, __pa(base),
698 698 prot, mmu_linear_psize, mmu_kernel_ssize));
699   - }
  699 + }
  700 + memblock_set_current_limit(MEMBLOCK_ALLOC_ANYWHERE);
700 701  
701 702 /*
702 703 * If we have a memory_limit and we've allocated TCEs then we need to
arch/powerpc/mm/init_32.c
... ... @@ -92,12 +92,6 @@
92 92 unsigned long __max_low_memory = MAX_LOW_MEM;
93 93  
94 94 /*
95   - * address of the limit of what is accessible with initial MMU setup -
96   - * 256MB usually, but only 16MB on 601.
97   - */
98   -phys_addr_t __initial_memory_limit_addr = (phys_addr_t)0x10000000;
99   -
100   -/*
101 95 * Check for command-line options that affect what MMU_init will do.
102 96 */
103 97 void MMU_setup(void)
... ... @@ -126,13 +120,6 @@
126 120 if (ppc_md.progress)
127 121 ppc_md.progress("MMU:enter", 0x111);
128 122  
129   - /* 601 can only access 16MB at the moment */
130   - if (PVR_VER(mfspr(SPRN_PVR)) == 1)
131   - __initial_memory_limit_addr = 0x01000000;
132   - /* 8xx can only access 8MB at the moment */
133   - if (PVR_VER(mfspr(SPRN_PVR)) == 0x50)
134   - __initial_memory_limit_addr = 0x00800000;
135   -
136 123 /* parse args from command line */
137 124 MMU_setup();
138 125  
139 126  
... ... @@ -190,20 +177,18 @@
190 177 #ifdef CONFIG_BOOTX_TEXT
191 178 btext_unmap();
192 179 #endif
  180 +
  181 + /* Shortly after that, the entire linear mapping will be available */
  182 + memblock_set_current_limit(lowmem_end_addr);
193 183 }
194 184  
195 185 /* This is only called until mem_init is done. */
196 186 void __init *early_get_page(void)
197 187 {
198   - void *p;
199   -
200   - if (init_bootmem_done) {
201   - p = alloc_bootmem_pages(PAGE_SIZE);
202   - } else {
203   - p = __va(memblock_alloc_base(PAGE_SIZE, PAGE_SIZE,
204   - __initial_memory_limit_addr));
205   - }
206   - return p;
  188 + if (init_bootmem_done)
  189 + return alloc_bootmem_pages(PAGE_SIZE);
  190 + else
  191 + return __va(memblock_alloc(PAGE_SIZE, PAGE_SIZE));
207 192 }
208 193  
209 194 /* Free up now-unused memory */
arch/powerpc/mm/ppc_mmu_32.c
... ... @@ -223,8 +223,7 @@
223 223 * Find some memory for the hash table.
224 224 */
225 225 if ( ppc_md.progress ) ppc_md.progress("hash:find piece", 0x322);
226   - Hash = __va(memblock_alloc_base(Hash_size, Hash_size,
227   - __initial_memory_limit_addr));
  226 + Hash = __va(memblock_alloc(Hash_size, Hash_size));
228 227 cacheable_memzero(Hash, Hash_size);
229 228 _SDR1 = __pa(Hash) | SDR1_LOW_BITS;
230 229  
arch/powerpc/mm/tlb_nohash.c
... ... @@ -432,6 +432,8 @@
432 432 * the MMU configuration
433 433 */
434 434 mb();
  435 +
  436 + memblock_set_current_limit(linear_map_top);
435 437 }
436 438  
437 439 void __init early_init_mmu(void)
arch/sh/include/asm/memblock.h
1 1 #ifndef __ASM_SH_MEMBLOCK_H
2 2 #define __ASM_SH_MEMBLOCK_H
3 3  
4   -#define MEMBLOCK_REAL_LIMIT 0
5   -
6 4 #endif /* __ASM_SH_MEMBLOCK_H */
arch/sparc/include/asm/memblock.h
... ... @@ -5,7 +5,5 @@
5 5  
6 6 #define MEMBLOCK_DBG(fmt...) prom_printf(fmt)
7 7  
8   -#define MEMBLOCK_REAL_LIMIT 0
9   -
10 8 #endif /* !(_SPARC64_MEMBLOCK_H) */
include/linux/memblock.h
... ... @@ -34,6 +34,7 @@
34 34 struct memblock {
35 35 unsigned long debug;
36 36 u64 rmo_size;
  37 + u64 current_limit;
37 38 struct memblock_type memory;
38 39 struct memblock_type reserved;
39 40 };
40 41  
41 42  
... ... @@ -46,11 +47,16 @@
46 47 extern long memblock_remove(u64 base, u64 size);
47 48 extern long __init memblock_free(u64 base, u64 size);
48 49 extern long __init memblock_reserve(u64 base, u64 size);
  50 +
49 51 extern u64 __init memblock_alloc_nid(u64 size, u64 align, int nid);
50 52 extern u64 __init memblock_alloc(u64 size, u64 align);
  53 +
  54 +/* Flags for memblock_alloc_base() amd __memblock_alloc_base() */
  55 +#define MEMBLOCK_ALLOC_ANYWHERE (~(u64)0)
  56 +#define MEMBLOCK_ALLOC_ACCESSIBLE 0
  57 +
51 58 extern u64 __init memblock_alloc_base(u64 size,
52 59 u64, u64 max_addr);
53   -#define MEMBLOCK_ALLOC_ANYWHERE 0
54 60 extern u64 __init __memblock_alloc_base(u64 size,
55 61 u64 align, u64 max_addr);
56 62 extern u64 __init memblock_phys_mem_size(void);
... ... @@ -65,6 +71,14 @@
65 71  
66 72 /* Provided by the architecture */
67 73 extern u64 memblock_nid_range(u64 start, u64 end, int *nid);
  74 +
  75 +/**
  76 + * memblock_set_current_limit - Set the current allocation limit to allow
  77 + * limiting allocations to what is currently
  78 + * accessible during boot
  79 + * @limit: New limit value (physical address)
  80 + */
  81 +extern void memblock_set_current_limit(u64 limit);
68 82  
69 83  
70 84 /*
... ... @@ -115,6 +115,8 @@
115 115 memblock.reserved.regions[0].base = 0;
116 116 memblock.reserved.regions[0].size = 0;
117 117 memblock.reserved.cnt = 1;
  118 +
  119 + memblock.current_limit = MEMBLOCK_ALLOC_ANYWHERE;
118 120 }
119 121  
120 122 void __init memblock_analyze(void)
... ... @@ -373,7 +375,7 @@
373 375  
374 376 u64 __init memblock_alloc(u64 size, u64 align)
375 377 {
376   - return memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ANYWHERE);
  378 + return memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ACCESSIBLE);
377 379 }
378 380  
379 381 u64 __init memblock_alloc_base(u64 size, u64 align, u64 max_addr)
380 382  
... ... @@ -399,14 +401,9 @@
399 401  
400 402 size = memblock_align_up(size, align);
401 403  
402   - /* On some platforms, make sure we allocate lowmem */
403   - /* Note that MEMBLOCK_REAL_LIMIT may be MEMBLOCK_ALLOC_ANYWHERE */
404   - if (max_addr == MEMBLOCK_ALLOC_ANYWHERE)
405   - max_addr = MEMBLOCK_REAL_LIMIT;
406   -
407 404 /* Pump up max_addr */
408   - if (max_addr == MEMBLOCK_ALLOC_ANYWHERE)
409   - max_addr = ~(u64)0;
  405 + if (max_addr == MEMBLOCK_ALLOC_ACCESSIBLE)
  406 + max_addr = memblock.current_limit;
410 407  
411 408 /* We do a top-down search, this tends to limit memory
412 409 * fragmentation by keeping early boot allocs near the
... ... @@ -525,5 +522,11 @@
525 522 int memblock_is_region_reserved(u64 base, u64 size)
526 523 {
527 524 return memblock_overlaps_region(&memblock.reserved, base, size) >= 0;
  525 +}
  526 +
  527 +
  528 +void __init memblock_set_current_limit(u64 limit)
  529 +{
  530 + memblock.current_limit = limit;
528 531 }