Commit 7676bef9c183fd573822cac9992927ef596d584c

Authored by Heiko Carstens
Committed by Martin Schwidefsky
1 parent cb601d41c1

[S390] Have s390 use add_active_range() and free_area_init_nodes.

Size zones and holes in an architecture independent manner for s390.

Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com>

Showing 4 changed files with 27 additions and 67 deletions Side-by-side Diff

... ... @@ -233,6 +233,9 @@
233 233 This allows you to specify the maximum frame size a function may
234 234 have without the compiler complaining about it.
235 235  
  236 +config ARCH_POPULATES_NODE_MAP
  237 + def_bool y
  238 +
236 239 source "mm/Kconfig"
237 240  
238 241 comment "I/O subsystem configuration"
... ... @@ -118,6 +118,7 @@
118 118 CONFIG_CHECK_STACK=y
119 119 CONFIG_STACK_GUARD=256
120 120 # CONFIG_WARN_STACK is not set
  121 +CONFIG_ARCH_POPULATES_NODE_MAP=y
121 122 CONFIG_SELECT_MEMORY_MODEL=y
122 123 CONFIG_FLATMEM_MANUAL=y
123 124 # CONFIG_DISCONTIGMEM_MANUAL is not set
arch/s390/kernel/setup.c
... ... @@ -70,7 +70,6 @@
70 70 #define CHUNK_READ_WRITE 0
71 71 #define CHUNK_READ_ONLY 1
72 72 volatile int __cpu_logical_map[NR_CPUS]; /* logical cpu to cpu address */
73   -unsigned long __initdata zholes_size[MAX_NR_ZONES];
74 73 static unsigned long __initdata memory_end;
75 74  
76 75 /*
... ... @@ -358,21 +357,6 @@
358 357 */
359 358 void (*pm_power_off)(void) = machine_power_off;
360 359  
361   -static void __init
362   -add_memory_hole(unsigned long start, unsigned long end)
363   -{
364   - unsigned long dma_pfn = MAX_DMA_ADDRESS >> PAGE_SHIFT;
365   -
366   - if (end <= dma_pfn)
367   - zholes_size[ZONE_DMA] += end - start + 1;
368   - else if (start > dma_pfn)
369   - zholes_size[ZONE_NORMAL] += end - start + 1;
370   - else {
371   - zholes_size[ZONE_DMA] += dma_pfn - start + 1;
372   - zholes_size[ZONE_NORMAL] += end - dma_pfn;
373   - }
374   -}
375   -
376 360 static int __init early_parse_mem(char *p)
377 361 {
378 362 memory_end = memparse(p, &p);
... ... @@ -494,7 +478,6 @@
494 478 {
495 479 unsigned long bootmap_size;
496 480 unsigned long start_pfn, end_pfn, init_pfn;
497   - unsigned long last_rw_end;
498 481 int i;
499 482  
500 483 /*
501 484  
502 485  
503 486  
504 487  
... ... @@ -543,46 +526,34 @@
543 526 #endif
544 527  
545 528 /*
546   - * Initialize the boot-time allocator (with low memory only):
  529 + * Initialize the boot-time allocator
547 530 */
548 531 bootmap_size = init_bootmem(start_pfn, end_pfn);
549 532  
550 533 /*
551 534 * Register RAM areas with the bootmem allocator.
552 535 */
553   - last_rw_end = start_pfn;
554 536  
555 537 for (i = 0; i < MEMORY_CHUNKS && memory_chunk[i].size > 0; i++) {
556   - unsigned long start_chunk, end_chunk;
  538 + unsigned long start_chunk, end_chunk, pfn;
557 539  
558 540 if (memory_chunk[i].type != CHUNK_READ_WRITE)
559 541 continue;
560   - start_chunk = (memory_chunk[i].addr + PAGE_SIZE - 1);
561   - start_chunk >>= PAGE_SHIFT;
562   - end_chunk = (memory_chunk[i].addr + memory_chunk[i].size);
563   - end_chunk >>= PAGE_SHIFT;
564   - if (start_chunk < start_pfn)
565   - start_chunk = start_pfn;
566   - if (end_chunk > end_pfn)
567   - end_chunk = end_pfn;
568   - if (start_chunk < end_chunk) {
569   - /* Initialize storage key for RAM pages */
570   - for (init_pfn = start_chunk ; init_pfn < end_chunk;
571   - init_pfn++)
572   - page_set_storage_key(init_pfn << PAGE_SHIFT,
573   - PAGE_DEFAULT_KEY);
574   - free_bootmem(start_chunk << PAGE_SHIFT,
575   - (end_chunk - start_chunk) << PAGE_SHIFT);
576   - if (last_rw_end < start_chunk)
577   - add_memory_hole(last_rw_end, start_chunk - 1);
578   - last_rw_end = end_chunk;
579   - }
  542 + start_chunk = PFN_DOWN(memory_chunk[i].addr);
  543 + end_chunk = start_chunk + PFN_DOWN(memory_chunk[i].size) - 1;
  544 + end_chunk = min(end_chunk, end_pfn);
  545 + if (start_chunk >= end_chunk)
  546 + continue;
  547 + add_active_range(0, start_chunk, end_chunk);
  548 + pfn = max(start_chunk, start_pfn);
  549 + for (; pfn <= end_chunk; pfn++)
  550 + page_set_storage_key(PFN_PHYS(pfn), PAGE_DEFAULT_KEY);
580 551 }
581 552  
582 553 psw_set_key(PAGE_DEFAULT_KEY);
583 554  
584   - if (last_rw_end < end_pfn - 1)
585   - add_memory_hole(last_rw_end, end_pfn - 1);
  555 + free_bootmem_with_active_regions(0, max_pfn);
  556 + reserve_bootmem(0, PFN_PHYS(start_pfn));
586 557  
587 558 /*
588 559 * Reserve the bootmem bitmap itself as well. We do this in two
... ... @@ -82,7 +82,6 @@
82 82 printk("%d pages swap cached\n",cached);
83 83 }
84 84  
85   -extern unsigned long __initdata zholes_size[];
86 85 /*
87 86 * paging_init() sets up the page tables
88 87 */
89 88  
... ... @@ -99,16 +98,15 @@
99 98 unsigned long pgdir_k = (__pa(swapper_pg_dir) & PAGE_MASK) | _KERNSEG_TABLE;
100 99 static const int ssm_mask = 0x04000000L;
101 100 unsigned long ro_start_pfn, ro_end_pfn;
102   - unsigned long zones_size[MAX_NR_ZONES];
  101 + unsigned long max_zone_pfns[MAX_NR_ZONES];
103 102  
104 103 ro_start_pfn = PFN_DOWN((unsigned long)&__start_rodata);
105 104 ro_end_pfn = PFN_UP((unsigned long)&__end_rodata);
106 105  
107   - memset(zones_size, 0, sizeof(zones_size));
108   - zones_size[ZONE_DMA] = max_low_pfn;
109   - free_area_init_node(0, &contig_page_data, zones_size,
110   - __pa(PAGE_OFFSET) >> PAGE_SHIFT,
111   - zholes_size);
  106 + memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
  107 + max_zone_pfns[ZONE_DMA] = max_low_pfn;
  108 + max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
  109 + free_area_init_nodes(max_zone_pfns);
112 110  
113 111 /* unmap whole virtual address space */
114 112  
... ... @@ -153,7 +151,6 @@
153 151 __raw_local_irq_ssm(ssm_mask);
154 152  
155 153 local_flush_tlb();
156   - return;
157 154 }
158 155  
159 156 #else /* CONFIG_64BIT */
160 157  
161 158  
162 159  
163 160  
... ... @@ -169,27 +166,17 @@
169 166 unsigned long pgdir_k = (__pa(swapper_pg_dir) & PAGE_MASK) |
170 167 _KERN_REGION_TABLE;
171 168 static const int ssm_mask = 0x04000000L;
172   - unsigned long zones_size[MAX_NR_ZONES];
173   - unsigned long dma_pfn, high_pfn;
174 169 unsigned long ro_start_pfn, ro_end_pfn;
  170 + unsigned long max_zone_pfns[MAX_NR_ZONES];
175 171  
176   - memset(zones_size, 0, sizeof(zones_size));
177   - dma_pfn = MAX_DMA_ADDRESS >> PAGE_SHIFT;
178   - high_pfn = max_low_pfn;
179 172 ro_start_pfn = PFN_DOWN((unsigned long)&__start_rodata);
180 173 ro_end_pfn = PFN_UP((unsigned long)&__end_rodata);
181 174  
182   - if (dma_pfn > high_pfn)
183   - zones_size[ZONE_DMA] = high_pfn;
184   - else {
185   - zones_size[ZONE_DMA] = dma_pfn;
186   - zones_size[ZONE_NORMAL] = high_pfn - dma_pfn;
187   - }
  175 + memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
  176 + max_zone_pfns[ZONE_DMA] = PFN_DOWN(MAX_DMA_ADDRESS);
  177 + max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
  178 + free_area_init_nodes(max_zone_pfns);
188 179  
189   - /* Initialize mem_map[]. */
190   - free_area_init_node(0, &contig_page_data, zones_size,
191   - __pa(PAGE_OFFSET) >> PAGE_SHIFT, zholes_size);
192   -
193 180 /*
194 181 * map whole physical memory to virtual memory (identity mapping)
195 182 */
... ... @@ -237,8 +224,6 @@
237 224 __raw_local_irq_ssm(ssm_mask);
238 225  
239 226 local_flush_tlb();
240   -
241   - return;
242 227 }
243 228 #endif /* CONFIG_64BIT */
244 229