Commit bcc8bcb1f0cc51c0042497d5de2d79743050e3bb

Authored by Heiko Carstens
Committed by Martin Schwidefsky
1 parent d1ed6a3ea1

[S390] revert add_active_range() usage patch.

Commit 7676bef9c183fd573822cac9992927ef596d584c breaks DCSS support on
s390. DCSS needs initialized struct pages to work. With the usage of
add_active_range() only the struct pages for physically present pages
are initialized.
This could be fixed if the DCSS driver would initiliaze the struct pages
itself, but this doesn't work too. This is because the mem_map array
does not include holes after the last present memory area and therefore
there is nothing that could be initialized.
To fix this and to avoid some dirty hacks revert this patch for now.
Will be added later when we move to a virtual mem_map.

Cc: Carsten Otte <cotte@de.ibm.com>
Cc: Adrian Bunk <bunk@stusta.de>
Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com>
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>

Showing 4 changed files with 63 additions and 26 deletions Side-by-side Diff

... ... @@ -236,9 +236,6 @@
236 236 This allows you to specify the maximum frame size a function may
237 237 have without the compiler complaining about it.
238 238  
239   -config ARCH_POPULATES_NODE_MAP
240   - def_bool y
241   -
242 239 source "mm/Kconfig"
243 240  
244 241 comment "I/O subsystem configuration"
... ... @@ -119,7 +119,6 @@
119 119 CONFIG_CHECK_STACK=y
120 120 CONFIG_STACK_GUARD=256
121 121 # CONFIG_WARN_STACK is not set
122   -CONFIG_ARCH_POPULATES_NODE_MAP=y
123 122 CONFIG_SELECT_MEMORY_MODEL=y
124 123 CONFIG_FLATMEM_MANUAL=y
125 124 # CONFIG_DISCONTIGMEM_MANUAL is not set
arch/s390/kernel/setup.c
... ... @@ -70,6 +70,7 @@
70 70 #define CHUNK_READ_WRITE 0
71 71 #define CHUNK_READ_ONLY 1
72 72 volatile int __cpu_logical_map[NR_CPUS]; /* logical cpu to cpu address */
  73 +unsigned long __initdata zholes_size[MAX_NR_ZONES];
73 74 static unsigned long __initdata memory_end;
74 75  
75 76 /*
... ... @@ -357,6 +358,21 @@
357 358 */
358 359 void (*pm_power_off)(void) = machine_power_off;
359 360  
  361 +static void __init
  362 +add_memory_hole(unsigned long start, unsigned long end)
  363 +{
  364 + unsigned long dma_pfn = MAX_DMA_ADDRESS >> PAGE_SHIFT;
  365 +
  366 + if (end <= dma_pfn)
  367 + zholes_size[ZONE_DMA] += end - start + 1;
  368 + else if (start > dma_pfn)
  369 + zholes_size[ZONE_NORMAL] += end - start + 1;
  370 + else {
  371 + zholes_size[ZONE_DMA] += dma_pfn - start + 1;
  372 + zholes_size[ZONE_NORMAL] += end - dma_pfn;
  373 + }
  374 +}
  375 +
360 376 static int __init early_parse_mem(char *p)
361 377 {
362 378 memory_end = memparse(p, &p);
... ... @@ -478,6 +494,7 @@
478 494 {
479 495 unsigned long bootmap_size;
480 496 unsigned long start_pfn, end_pfn, init_pfn;
  497 + unsigned long last_rw_end;
481 498 int i;
482 499  
483 500 /*
484 501  
485 502  
486 503  
... ... @@ -533,27 +550,39 @@
533 550 /*
534 551 * Register RAM areas with the bootmem allocator.
535 552 */
  553 + last_rw_end = start_pfn;
536 554  
537 555 for (i = 0; i < MEMORY_CHUNKS && memory_chunk[i].size > 0; i++) {
538   - unsigned long start_chunk, end_chunk, pfn;
  556 + unsigned long start_chunk, end_chunk;
539 557  
540 558 if (memory_chunk[i].type != CHUNK_READ_WRITE)
541 559 continue;
542   - start_chunk = PFN_DOWN(memory_chunk[i].addr);
543   - end_chunk = start_chunk + PFN_DOWN(memory_chunk[i].size) - 1;
544   - end_chunk = min(end_chunk, end_pfn);
545   - if (start_chunk >= end_chunk)
546   - continue;
547   - add_active_range(0, start_chunk, end_chunk);
548   - pfn = max(start_chunk, start_pfn);
549   - for (; pfn <= end_chunk; pfn++)
550   - page_set_storage_key(PFN_PHYS(pfn), PAGE_DEFAULT_KEY);
  560 + start_chunk = (memory_chunk[i].addr + PAGE_SIZE - 1);
  561 + start_chunk >>= PAGE_SHIFT;
  562 + end_chunk = (memory_chunk[i].addr + memory_chunk[i].size);
  563 + end_chunk >>= PAGE_SHIFT;
  564 + if (start_chunk < start_pfn)
  565 + start_chunk = start_pfn;
  566 + if (end_chunk > end_pfn)
  567 + end_chunk = end_pfn;
  568 + if (start_chunk < end_chunk) {
  569 + /* Initialize storage key for RAM pages */
  570 + for (init_pfn = start_chunk ; init_pfn < end_chunk;
  571 + init_pfn++)
  572 + page_set_storage_key(init_pfn << PAGE_SHIFT,
  573 + PAGE_DEFAULT_KEY);
  574 + free_bootmem(start_chunk << PAGE_SHIFT,
  575 + (end_chunk - start_chunk) << PAGE_SHIFT);
  576 + if (last_rw_end < start_chunk)
  577 + add_memory_hole(last_rw_end, start_chunk - 1);
  578 + last_rw_end = end_chunk;
  579 + }
551 580 }
552 581  
553 582 psw_set_key(PAGE_DEFAULT_KEY);
554 583  
555   - free_bootmem_with_active_regions(0, max_pfn);
556   - reserve_bootmem(0, PFN_PHYS(start_pfn));
  584 + if (last_rw_end < end_pfn - 1)
  585 + add_memory_hole(last_rw_end, end_pfn - 1);
557 586  
558 587 /*
559 588 * Reserve the bootmem bitmap itself as well. We do this in two
... ... @@ -84,6 +84,7 @@
84 84 printk("%d pages swap cached\n",cached);
85 85 }
86 86  
  87 +extern unsigned long __initdata zholes_size[];
87 88 /*
88 89 * paging_init() sets up the page tables
89 90 */
90 91  
... ... @@ -100,15 +101,16 @@
100 101 unsigned long pgdir_k = (__pa(swapper_pg_dir) & PAGE_MASK) | _KERNSEG_TABLE;
101 102 static const int ssm_mask = 0x04000000L;
102 103 unsigned long ro_start_pfn, ro_end_pfn;
103   - unsigned long max_zone_pfns[MAX_NR_ZONES];
  104 + unsigned long zones_size[MAX_NR_ZONES];
104 105  
105 106 ro_start_pfn = PFN_DOWN((unsigned long)&__start_rodata);
106 107 ro_end_pfn = PFN_UP((unsigned long)&__end_rodata);
107 108  
108   - memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
109   - max_zone_pfns[ZONE_DMA] = max_low_pfn;
110   - max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
111   - free_area_init_nodes(max_zone_pfns);
  109 + memset(zones_size, 0, sizeof(zones_size));
  110 + zones_size[ZONE_DMA] = max_low_pfn;
  111 + free_area_init_node(0, &contig_page_data, zones_size,
  112 + __pa(PAGE_OFFSET) >> PAGE_SHIFT,
  113 + zholes_size);
112 114  
113 115 /* unmap whole virtual address space */
114 116  
115 117  
116 118  
117 119  
... ... @@ -168,16 +170,26 @@
168 170 unsigned long pgdir_k = (__pa(swapper_pg_dir) & PAGE_MASK) |
169 171 _KERN_REGION_TABLE;
170 172 static const int ssm_mask = 0x04000000L;
  173 + unsigned long zones_size[MAX_NR_ZONES];
  174 + unsigned long dma_pfn, high_pfn;
171 175 unsigned long ro_start_pfn, ro_end_pfn;
172   - unsigned long max_zone_pfns[MAX_NR_ZONES];
173 176  
  177 + memset(zones_size, 0, sizeof(zones_size));
  178 + dma_pfn = MAX_DMA_ADDRESS >> PAGE_SHIFT;
  179 + high_pfn = max_low_pfn;
174 180 ro_start_pfn = PFN_DOWN((unsigned long)&__start_rodata);
175 181 ro_end_pfn = PFN_UP((unsigned long)&__end_rodata);
176 182  
177   - memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
178   - max_zone_pfns[ZONE_DMA] = PFN_DOWN(MAX_DMA_ADDRESS);
179   - max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
180   - free_area_init_nodes(max_zone_pfns);
  183 + if (dma_pfn > high_pfn)
  184 + zones_size[ZONE_DMA] = high_pfn;
  185 + else {
  186 + zones_size[ZONE_DMA] = dma_pfn;
  187 + zones_size[ZONE_NORMAL] = high_pfn - dma_pfn;
  188 + }
  189 +
  190 + /* Initialize mem_map[]. */
  191 + free_area_init_node(0, &contig_page_data, zones_size,
  192 + __pa(PAGE_OFFSET) >> PAGE_SHIFT, zholes_size);
181 193  
182 194 /*
183 195 * map whole physical memory to virtual memory (identity mapping)