Commit cd3db0c4ca3d237e7ad20f7107216e575705d2b0
1 parent
e63075a3c9
Exists in
master
and in
20 other branches
memblock: Remove rmo_size, burry it in arch/powerpc where it belongs
The RMA (RMO is a misnomer) is a concept specific to ppc64 (in fact server ppc64 though I hijack it on embedded ppc64 for similar purposes) and represents the area of memory that can be accessed in real mode (aka with MMU off), or on embedded, from the exception vectors (which is bolted in the TLB) which pretty much boils down to the same thing. We take that out of the generic MEMBLOCK data structure and move it into arch/powerpc where it belongs, renaming it to "RMA" while at it. Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Showing 15 changed files with 125 additions and 40 deletions Side-by-side Diff
- arch/powerpc/include/asm/mmu.h
- arch/powerpc/kernel/head_40x.S
- arch/powerpc/kernel/paca.c
- arch/powerpc/kernel/prom.c
- arch/powerpc/kernel/rtas.c
- arch/powerpc/kernel/setup_64.c
- arch/powerpc/mm/40x_mmu.c
- arch/powerpc/mm/44x_mmu.c
- arch/powerpc/mm/fsl_booke_mmu.c
- arch/powerpc/mm/hash_utils_64.c
- arch/powerpc/mm/init_32.c
- arch/powerpc/mm/ppc_mmu_32.c
- arch/powerpc/mm/tlb_nohash.c
- include/linux/memblock.h
- mm/memblock.c
arch/powerpc/include/asm/mmu.h
... | ... | @@ -2,6 +2,8 @@ |
2 | 2 | #define _ASM_POWERPC_MMU_H_ |
3 | 3 | #ifdef __KERNEL__ |
4 | 4 | |
5 | +#include <linux/types.h> | |
6 | + | |
5 | 7 | #include <asm/asm-compat.h> |
6 | 8 | #include <asm/feature-fixups.h> |
7 | 9 | |
... | ... | @@ -81,6 +83,16 @@ |
81 | 83 | /* MMU initialization (64-bit only fo now) */ |
82 | 84 | extern void early_init_mmu(void); |
83 | 85 | extern void early_init_mmu_secondary(void); |
86 | + | |
87 | +extern void setup_initial_memory_limit(phys_addr_t first_memblock_base, | |
88 | + phys_addr_t first_memblock_size); | |
89 | + | |
90 | +#ifdef CONFIG_PPC64 | |
91 | +/* This is our real memory area size on ppc64 server, on embedded, we | |
92 | + * make it match the size our of bolted TLB area | |
93 | + */ | |
94 | +extern u64 ppc64_rma_size; | |
95 | +#endif /* CONFIG_PPC64 */ | |
84 | 96 | |
85 | 97 | #endif /* !__ASSEMBLY__ */ |
86 | 98 |
arch/powerpc/kernel/head_40x.S
... | ... | @@ -923,11 +923,7 @@ |
923 | 923 | mtspr SPRN_PID,r0 |
924 | 924 | sync |
925 | 925 | |
926 | - /* Configure and load two entries into TLB slots 62 and 63. | |
927 | - * In case we are pinning TLBs, these are reserved in by the | |
928 | - * other TLB functions. If not reserving, then it doesn't | |
929 | - * matter where they are loaded. | |
930 | - */ | |
926 | + /* Configure and load one entry into TLB slots 63 */ | |
931 | 927 | clrrwi r4,r4,10 /* Mask off the real page number */ |
932 | 928 | ori r4,r4,(TLB_WR | TLB_EX) /* Set the write and execute bits */ |
933 | 929 |
arch/powerpc/kernel/paca.c
... | ... | @@ -117,7 +117,7 @@ |
117 | 117 | * the first segment. On iSeries they must be within the area mapped |
118 | 118 | * by the HV, which is HvPagesToMap * HVPAGESIZE bytes. |
119 | 119 | */ |
120 | - limit = min(0x10000000ULL, memblock.rmo_size); | |
120 | + limit = min(0x10000000ULL, ppc64_rma_size); | |
121 | 121 | if (firmware_has_feature(FW_FEATURE_ISERIES)) |
122 | 122 | limit = min(limit, HvPagesToMap * HVPAGESIZE); |
123 | 123 |
arch/powerpc/kernel/prom.c
... | ... | @@ -66,6 +66,7 @@ |
66 | 66 | int __initdata iommu_is_off; |
67 | 67 | int __initdata iommu_force_on; |
68 | 68 | unsigned long tce_alloc_start, tce_alloc_end; |
69 | +u64 ppc64_rma_size; | |
69 | 70 | #endif |
70 | 71 | |
71 | 72 | static int __init early_parse_mem(char *p) |
... | ... | @@ -492,7 +493,7 @@ |
492 | 493 | |
493 | 494 | void __init early_init_dt_add_memory_arch(u64 base, u64 size) |
494 | 495 | { |
495 | -#if defined(CONFIG_PPC64) | |
496 | +#ifdef CONFIG_PPC64 | |
496 | 497 | if (iommu_is_off) { |
497 | 498 | if (base >= 0x80000000ul) |
498 | 499 | return; |
499 | 500 | |
... | ... | @@ -501,9 +502,13 @@ |
501 | 502 | } |
502 | 503 | #endif |
503 | 504 | |
504 | - memblock_add(base, size); | |
505 | - | |
505 | + /* First MEMBLOCK added, do some special initializations */ | |
506 | + if (memstart_addr == ~(phys_addr_t)0) | |
507 | + setup_initial_memory_limit(base, size); | |
506 | 508 | memstart_addr = min((u64)memstart_addr, base); |
509 | + | |
510 | + /* Add the chunk to the MEMBLOCK list */ | |
511 | + memblock_add(base, size); | |
507 | 512 | } |
508 | 513 | |
509 | 514 | u64 __init early_init_dt_alloc_memory_arch(u64 size, u64 align) |
... | ... | @@ -655,22 +660,6 @@ |
655 | 660 | static inline void __init phyp_dump_reserve_mem(void) {} |
656 | 661 | #endif /* CONFIG_PHYP_DUMP && CONFIG_PPC_RTAS */ |
657 | 662 | |
658 | -static void set_boot_memory_limit(void) | |
659 | -{ | |
660 | -#ifdef CONFIG_PPC32 | |
661 | - /* 601 can only access 16MB at the moment */ | |
662 | - if (PVR_VER(mfspr(SPRN_PVR)) == 1) | |
663 | - memblock_set_current_limit(0x01000000); | |
664 | - /* 8xx can only access 8MB at the moment */ | |
665 | - else if (PVR_VER(mfspr(SPRN_PVR)) == 0x50) | |
666 | - memblock_set_current_limit(0x00800000); | |
667 | - else | |
668 | - memblock_set_current_limit(0x10000000); | |
669 | -#else | |
670 | - memblock_set_current_limit(memblock.rmo_size); | |
671 | -#endif | |
672 | -} | |
673 | - | |
674 | 663 | void __init early_init_devtree(void *params) |
675 | 664 | { |
676 | 665 | phys_addr_t limit; |
... | ... | @@ -733,8 +722,6 @@ |
733 | 722 | memblock_dump_all(); |
734 | 723 | |
735 | 724 | DBG("Phys. mem: %llx\n", memblock_phys_mem_size()); |
736 | - | |
737 | - set_boot_memory_limit(); | |
738 | 725 | |
739 | 726 | /* We may need to relocate the flat tree, do it now. |
740 | 727 | * FIXME .. and the initrd too? */ |
arch/powerpc/kernel/rtas.c
... | ... | @@ -934,7 +934,7 @@ |
934 | 934 | */ |
935 | 935 | #ifdef CONFIG_PPC64 |
936 | 936 | if (machine_is(pseries) && firmware_has_feature(FW_FEATURE_LPAR)) { |
937 | - rtas_region = min(memblock.rmo_size, RTAS_INSTANTIATE_MAX); | |
937 | + rtas_region = min(ppc64_rma_size, RTAS_INSTANTIATE_MAX); | |
938 | 938 | ibm_suspend_me_token = rtas_token("ibm,suspend-me"); |
939 | 939 | } |
940 | 940 | #endif |
arch/powerpc/kernel/setup_64.c
... | ... | @@ -487,7 +487,7 @@ |
487 | 487 | * bringup, we need to get at them in real mode. This means they |
488 | 488 | * must also be within the RMO region. |
489 | 489 | */ |
490 | - limit = min(slb0_limit(), memblock.rmo_size); | |
490 | + limit = min(slb0_limit(), ppc64_rma_size); | |
491 | 491 | |
492 | 492 | for_each_possible_cpu(i) { |
493 | 493 | unsigned long sp; |
arch/powerpc/mm/40x_mmu.c
... | ... | @@ -141,8 +141,20 @@ |
141 | 141 | * coverage with normal-sized pages (or other reasons) do not |
142 | 142 | * attempt to allocate outside the allowed range. |
143 | 143 | */ |
144 | - memblock_set_current_limit(memstart_addr + mapped); | |
144 | + memblock_set_current_limit(mapped); | |
145 | 145 | |
146 | 146 | return mapped; |
147 | +} | |
148 | + | |
149 | +void setup_initial_memory_limit(phys_addr_t first_memblock_base, | |
150 | + phys_addr_t first_memblock_size) | |
151 | +{ | |
152 | + /* We don't currently support the first MEMBLOCK not mapping 0 | |
153 | + * physical on those processors | |
154 | + */ | |
155 | + BUG_ON(first_memblock_base != 0); | |
156 | + | |
157 | + /* 40x can only access 16MB at the moment (see head_40x.S) */ | |
158 | + memblock_set_current_limit(min_t(u64, first_memblock_size, 0x00800000)); | |
147 | 159 | } |
arch/powerpc/mm/44x_mmu.c
... | ... | @@ -24,6 +24,8 @@ |
24 | 24 | */ |
25 | 25 | |
26 | 26 | #include <linux/init.h> |
27 | +#include <linux/memblock.h> | |
28 | + | |
27 | 29 | #include <asm/mmu.h> |
28 | 30 | #include <asm/system.h> |
29 | 31 | #include <asm/page.h> |
... | ... | @@ -211,6 +213,18 @@ |
211 | 213 | #endif /* DEBUG */ |
212 | 214 | } |
213 | 215 | return total_lowmem; |
216 | +} | |
217 | + | |
218 | +void setup_initial_memory_limit(phys_addr_t first_memblock_base, | |
219 | + phys_addr_t first_memblock_size) | |
220 | +{ | |
221 | + /* We don't currently support the first MEMBLOCK not mapping 0 | |
222 | + * physical on those processors | |
223 | + */ | |
224 | + BUG_ON(first_memblock_base != 0); | |
225 | + | |
226 | + /* 44x has a 256M TLB entry pinned at boot */ | |
227 | + memblock_set_current_limit(min_t(u64, first_memblock_size, PPC_PIN_SIZE)); | |
214 | 228 | } |
215 | 229 | |
216 | 230 | #ifdef CONFIG_SMP |
arch/powerpc/mm/fsl_booke_mmu.c
... | ... | @@ -215,4 +215,13 @@ |
215 | 215 | |
216 | 216 | memblock_set_current_limit(memstart_addr + __max_low_memory); |
217 | 217 | } |
218 | + | |
219 | +void setup_initial_memory_limit(phys_addr_t first_memblock_base, | |
220 | + phys_addr_t first_memblock_size) | |
221 | +{ | |
222 | + phys_addr_t limit = first_memblock_base + first_memblock_size; | |
223 | + | |
224 | + /* 64M mapped initially according to head_fsl_booke.S */ | |
225 | + memblock_set_current_limit(min_t(u64, limit, 0x04000000)); | |
226 | +} |
arch/powerpc/mm/hash_utils_64.c
... | ... | @@ -649,7 +649,7 @@ |
649 | 649 | #ifdef CONFIG_DEBUG_PAGEALLOC |
650 | 650 | linear_map_hash_count = memblock_end_of_DRAM() >> PAGE_SHIFT; |
651 | 651 | linear_map_hash_slots = __va(memblock_alloc_base(linear_map_hash_count, |
652 | - 1, memblock.rmo_size)); | |
652 | + 1, ppc64_rma_size)); | |
653 | 653 | memset(linear_map_hash_slots, 0, linear_map_hash_count); |
654 | 654 | #endif /* CONFIG_DEBUG_PAGEALLOC */ |
655 | 655 | |
... | ... | @@ -1248,4 +1248,24 @@ |
1248 | 1248 | local_irq_restore(flags); |
1249 | 1249 | } |
1250 | 1250 | #endif /* CONFIG_DEBUG_PAGEALLOC */ |
1251 | + | |
1252 | +void setup_initial_memory_limit(phys_addr_t first_memblock_base, | |
1253 | + phys_addr_t first_memblock_size) | |
1254 | +{ | |
1255 | + /* We don't currently support the first MEMBLOCK not mapping 0 | |
1256 | + * physical on those processors | |
1257 | + */ | |
1258 | + BUG_ON(first_memblock_base != 0); | |
1259 | + | |
1260 | + /* On LPAR systems, the first entry is our RMA region, | |
1261 | + * non-LPAR 64-bit hash MMU systems don't have a limitation | |
1262 | + * on real mode access, but using the first entry works well | |
1263 | + * enough. We also clamp it to 1G to avoid some funky things | |
1264 | + * such as RTAS bugs etc... | |
1265 | + */ | |
1266 | + ppc64_rma_size = min_t(u64, first_memblock_size, 0x40000000); | |
1267 | + | |
1268 | + /* Finally limit subsequent allocations */ | |
1269 | + memblock_set_current_limit(ppc64_rma_size); | |
1270 | +} |
arch/powerpc/mm/init_32.c
... | ... | @@ -236,4 +236,19 @@ |
236 | 236 | } |
237 | 237 | } |
238 | 238 | #endif |
239 | + | |
240 | + | |
241 | +#ifdef CONFIG_8xx /* No 8xx specific .c file to put that in ... */ | |
242 | +void setup_initial_memory_limit(phys_addr_t first_memblock_base, | |
243 | + phys_addr_t first_memblock_size) | |
244 | +{ | |
245 | + /* We don't currently support the first MEMBLOCK not mapping 0 | |
246 | + * physical on those processors | |
247 | + */ | |
248 | + BUG_ON(first_memblock_base != 0); | |
249 | + | |
250 | + /* 8xx can only access 8MB at the moment */ | |
251 | + memblock_set_current_limit(min_t(u64, first_memblock_size, 0x00800000)); | |
252 | +} | |
253 | +#endif /* CONFIG_8xx */ |
arch/powerpc/mm/ppc_mmu_32.c
... | ... | @@ -271,4 +271,19 @@ |
271 | 271 | |
272 | 272 | if ( ppc_md.progress ) ppc_md.progress("hash:done", 0x205); |
273 | 273 | } |
274 | + | |
275 | +void setup_initial_memory_limit(phys_addr_t first_memblock_base, | |
276 | + phys_addr_t first_memblock_size) | |
277 | +{ | |
278 | + /* We don't currently support the first MEMBLOCK not mapping 0 | |
279 | + * physical on those processors | |
280 | + */ | |
281 | + BUG_ON(first_memblock_base != 0); | |
282 | + | |
283 | + /* 601 can only access 16MB at the moment */ | |
284 | + if (PVR_VER(mfspr(SPRN_PVR)) == 1) | |
285 | + memblock_set_current_limit(min_t(u64, first_memblock_size, 0x01000000)); | |
286 | + else /* Anything else has 256M mapped */ | |
287 | + memblock_set_current_limit(min_t(u64, first_memblock_size, 0x10000000)); | |
288 | +} |
arch/powerpc/mm/tlb_nohash.c
... | ... | @@ -446,5 +446,19 @@ |
446 | 446 | __early_init_mmu(0); |
447 | 447 | } |
448 | 448 | |
449 | +void setup_initial_memory_limit(phys_addr_t first_memblock_base, | |
450 | + phys_addr_t first_memblock_size) | |
451 | +{ | |
452 | + /* On Embedded 64-bit, we adjust the RMA size to match | |
453 | + * the bolted TLB entry. We know for now that only 1G | |
454 | + * entries are supported though that may eventually | |
455 | + * change. We crop it to the size of the first MEMBLOCK to | |
456 | + * avoid going over total available memory just in case... | |
457 | + */ | |
458 | + ppc64_rma_size = min_t(u64, first_memblock_size, 0x40000000); | |
459 | + | |
460 | + /* Finally limit subsequent allocations */ | |
461 | + memblock_set_current_limit(ppc64_memblock_base + ppc64_rma_size); | |
462 | +} | |
449 | 463 | #endif /* CONFIG_PPC64 */ |
include/linux/memblock.h
mm/memblock.c
... | ... | @@ -49,7 +49,6 @@ |
49 | 49 | return; |
50 | 50 | |
51 | 51 | pr_info("MEMBLOCK configuration:\n"); |
52 | - pr_info(" rmo_size = 0x%llx\n", (unsigned long long)memblock.rmo_size); | |
53 | 52 | pr_info(" memory.size = 0x%llx\n", (unsigned long long)memblock.memory.size); |
54 | 53 | |
55 | 54 | memblock_dump(&memblock.memory, "memory"); |
... | ... | @@ -195,10 +194,6 @@ |
195 | 194 | |
196 | 195 | long memblock_add(u64 base, u64 size) |
197 | 196 | { |
198 | - /* On pSeries LPAR systems, the first MEMBLOCK is our RMO region. */ | |
199 | - if (base == 0) | |
200 | - memblock.rmo_size = size; | |
201 | - | |
202 | 197 | return memblock_add_region(&memblock.memory, base, size); |
203 | 198 | |
204 | 199 | } |
... | ... | @@ -458,9 +453,6 @@ |
458 | 453 | memblock.memory.cnt = i + 1; |
459 | 454 | break; |
460 | 455 | } |
461 | - | |
462 | - if (memblock.memory.regions[0].size < memblock.rmo_size) | |
463 | - memblock.rmo_size = memblock.memory.regions[0].size; | |
464 | 456 | |
465 | 457 | memory_limit = memblock_end_of_DRAM(); |
466 | 458 |