Commit 996b4a7d8f4e5dd531369396f2312b97e9400cdc

Authored by Heiko Carstens
Committed by Martin Schwidefsky
1 parent d3383632d4

s390/mem_detect: remove artificial kdump memory types

Simplify the memory detection code a bit by removing the CHUNK_OLDMEM
and CHUNK_CRASHK memory types.
They are not needed. Everything that is needed is a mechanism to
insert holes into the detected memory.

Reviewed-by: Michael Holzheu <holzheu@linux.vnet.ibm.com>
Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com>
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>

Showing 5 changed files with 41 additions and 103 deletions Side-by-side Diff

arch/s390/include/asm/setup.h
... ... @@ -33,8 +33,6 @@
33 33  
34 34 #define CHUNK_READ_WRITE 0
35 35 #define CHUNK_READ_ONLY 1
36   -#define CHUNK_OLDMEM 4
37   -#define CHUNK_CRASHK 5
38 36  
39 37 struct mem_chunk {
40 38 unsigned long addr;
... ... @@ -47,8 +45,8 @@
47 45 extern unsigned long memory_end;
48 46  
49 47 void detect_memory_layout(struct mem_chunk chunk[], unsigned long maxsize);
50   -void create_mem_hole(struct mem_chunk memory_chunk[], unsigned long addr,
51   - unsigned long size, int type);
  48 +void create_mem_hole(struct mem_chunk mem_chunk[], unsigned long addr,
  49 + unsigned long size);
52 50  
53 51 #define PRIMARY_SPACE_MODE 0
54 52 #define ACCESS_REGISTER_MODE 1
arch/s390/kernel/crash_dump.c
... ... @@ -89,7 +89,7 @@
89 89  
90 90 chunk_array = kzalloc_panic(MEMORY_CHUNKS * sizeof(struct mem_chunk));
91 91 detect_memory_layout(chunk_array, 0);
92   - create_mem_hole(chunk_array, OLDMEM_BASE, OLDMEM_SIZE, CHUNK_CRASHK);
  92 + create_mem_hole(chunk_array, OLDMEM_BASE, OLDMEM_SIZE);
93 93 return chunk_array;
94 94 }
95 95  
... ... @@ -344,7 +344,7 @@
344 344 for (i = 0; i < MEMORY_CHUNKS; i++) {
345 345 mem_chunk = &chunk_array[i];
346 346 if (mem_chunk->size == 0)
347   - break;
  347 + continue;
348 348 if (chunk_array[i].type != CHUNK_READ_WRITE &&
349 349 chunk_array[i].type != CHUNK_READ_ONLY)
350 350 continue;
arch/s390/kernel/setup.c
... ... @@ -463,14 +463,10 @@
463 463 for (i = 0; i < MEMORY_CHUNKS; i++) {
464 464 if (!memory_chunk[i].size)
465 465 continue;
466   - if (memory_chunk[i].type == CHUNK_OLDMEM ||
467   - memory_chunk[i].type == CHUNK_CRASHK)
468   - continue;
469 466 res = alloc_bootmem_low(sizeof(*res));
470 467 res->flags = IORESOURCE_BUSY | IORESOURCE_MEM;
471 468 switch (memory_chunk[i].type) {
472 469 case CHUNK_READ_WRITE:
473   - case CHUNK_CRASHK:
474 470 res->name = "System RAM";
475 471 break;
476 472 case CHUNK_READ_ONLY:
... ... @@ -527,7 +523,7 @@
527 523 unsigned long align;
528 524  
529 525 chunk = &memory_chunk[i];
530   - if (chunk->type == CHUNK_OLDMEM)
  526 + if (!chunk->size)
531 527 continue;
532 528 align = 1UL << (MAX_ORDER + PAGE_SHIFT - 1);
533 529 start = (chunk->addr + align - 1) & ~(align - 1);
... ... @@ -579,7 +575,7 @@
579 575 for (i = 0; i < MEMORY_CHUNKS; i++) {
580 576 struct mem_chunk *chunk = &memory_chunk[i];
581 577  
582   - if (chunk->type == CHUNK_OLDMEM)
  578 + if (!chunk->size)
583 579 continue;
584 580 if (chunk->addr >= memory_end) {
585 581 memset(chunk, 0, sizeof(*chunk));
... ... @@ -681,15 +677,6 @@
681 677 }
682 678  
683 679 /*
684   - * Reserve kdump memory by creating a memory hole in the mem_chunk array
685   - */
686   -static void __init reserve_kdump_bootmem(unsigned long addr, unsigned long size,
687   - int type)
688   -{
689   - create_mem_hole(memory_chunk, addr, size, type);
690   -}
691   -
692   -/*
693 680 * When kdump is enabled, we have to ensure that no memory from
694 681 * the area [0 - crashkernel memory size] and
695 682 * [crashk_res.start - crashk_res.end] is set offline.
... ... @@ -730,8 +717,8 @@
730 717  
731 718 real_size = max(real_size, chunk->addr + chunk->size);
732 719 }
733   - reserve_kdump_bootmem(OLDMEM_BASE, OLDMEM_SIZE, CHUNK_OLDMEM);
734   - reserve_kdump_bootmem(OLDMEM_SIZE, real_size - OLDMEM_SIZE, CHUNK_OLDMEM);
  720 + create_mem_hole(memory_chunk, OLDMEM_BASE, OLDMEM_SIZE);
  721 + create_mem_hole(memory_chunk, OLDMEM_SIZE, real_size - OLDMEM_SIZE);
735 722 if (OLDMEM_BASE + OLDMEM_SIZE == real_size)
736 723 saved_max_pfn = PFN_DOWN(OLDMEM_BASE) - 1;
737 724 else
... ... @@ -774,7 +761,7 @@
774 761 crashk_res.start = crash_base;
775 762 crashk_res.end = crash_base + crash_size - 1;
776 763 insert_resource(&iomem_resource, &crashk_res);
777   - reserve_kdump_bootmem(crash_base, crash_size, CHUNK_CRASHK);
  764 + create_mem_hole(memory_chunk, crash_base, crash_size);
778 765 pr_info("Reserving %lluMB of memory at %lluMB "
779 766 "for crashkernel (System RAM: %luMB)\n",
780 767 crash_size >> 20, crash_base >> 20, memory_end >> 20);
781 768  
... ... @@ -846,11 +833,10 @@
846 833 * Register RAM areas with the bootmem allocator.
847 834 */
848 835  
849   - for (i = 0; i < MEMORY_CHUNKS && memory_chunk[i].size > 0; i++) {
  836 + for (i = 0; i < MEMORY_CHUNKS; i++) {
850 837 unsigned long start_chunk, end_chunk, pfn;
851 838  
852   - if (memory_chunk[i].type != CHUNK_READ_WRITE &&
853   - memory_chunk[i].type != CHUNK_CRASHK)
  839 + if (!memory_chunk[i].size)
854 840 continue;
855 841 start_chunk = PFN_DOWN(memory_chunk[i].addr);
856 842 end_chunk = start_chunk + PFN_DOWN(memory_chunk[i].size);
arch/s390/mm/mem_detect.c
... ... @@ -95,82 +95,40 @@
95 95 EXPORT_SYMBOL(detect_memory_layout);
96 96  
97 97 /*
98   - * Move memory chunks array from index "from" to index "to"
  98 + * Create memory hole with given address and size.
99 99 */
100   -static void mem_chunk_move(struct mem_chunk chunk[], int to, int from)
  100 +void create_mem_hole(struct mem_chunk mem_chunk[], unsigned long addr,
  101 + unsigned long size)
101 102 {
102   - int cnt = MEMORY_CHUNKS - to;
  103 + int i;
103 104  
104   - memmove(&chunk[to], &chunk[from], cnt * sizeof(struct mem_chunk));
105   -}
106   -
107   -/*
108   - * Initialize memory chunk
109   - */
110   -static void mem_chunk_init(struct mem_chunk *chunk, unsigned long addr,
111   - unsigned long size, int type)
112   -{
113   - chunk->type = type;
114   - chunk->addr = addr;
115   - chunk->size = size;
116   -}
117   -
118   -/*
119   - * Create memory hole with given address, size, and type
120   - */
121   -void create_mem_hole(struct mem_chunk chunk[], unsigned long addr,
122   - unsigned long size, int type)
123   -{
124   - unsigned long lh_start, lh_end, lh_size, ch_start, ch_end, ch_size;
125   - int i, ch_type;
126   -
127 105 for (i = 0; i < MEMORY_CHUNKS; i++) {
128   - if (chunk[i].size == 0)
  106 + struct mem_chunk *chunk = &mem_chunk[i];
  107 +
  108 + if (chunk->size == 0)
129 109 continue;
  110 + if (addr > chunk->addr + chunk->size)
  111 + continue;
  112 + if (addr + size <= chunk->addr)
  113 + continue;
  114 + /* Split */
  115 + if ((addr > chunk->addr) &&
  116 + (addr + size < chunk->addr + chunk->size)) {
  117 + struct mem_chunk *new = chunk + 1;
130 118  
131   - /* Define chunk properties */
132   - ch_start = chunk[i].addr;
133   - ch_size = chunk[i].size;
134   - ch_end = ch_start + ch_size - 1;
135   - ch_type = chunk[i].type;
136   -
137   - /* Is memory chunk hit by memory hole? */
138   - if (addr + size <= ch_start)
139   - continue; /* No: memory hole in front of chunk */
140   - if (addr > ch_end)
141   - continue; /* No: memory hole after chunk */
142   -
143   - /* Yes: Define local hole properties */
144   - lh_start = max(addr, chunk[i].addr);
145   - lh_end = min(addr + size - 1, ch_end);
146   - lh_size = lh_end - lh_start + 1;
147   -
148   - if (lh_start == ch_start && lh_end == ch_end) {
149   - /* Hole covers complete memory chunk */
150   - mem_chunk_init(&chunk[i], lh_start, lh_size, type);
151   - } else if (lh_end == ch_end) {
152   - /* Hole starts in memory chunk and convers chunk end */
153   - mem_chunk_move(chunk, i + 1, i);
154   - mem_chunk_init(&chunk[i], ch_start, ch_size - lh_size,
155   - ch_type);
156   - mem_chunk_init(&chunk[i + 1], lh_start, lh_size, type);
157   - i += 1;
158   - } else if (lh_start == ch_start) {
159   - /* Hole ends in memory chunk */
160   - mem_chunk_move(chunk, i + 1, i);
161   - mem_chunk_init(&chunk[i], lh_start, lh_size, type);
162   - mem_chunk_init(&chunk[i + 1], lh_end + 1,
163   - ch_size - lh_size, ch_type);
164   - break;
165   - } else {
166   - /* Hole splits memory chunk */
167   - mem_chunk_move(chunk, i + 2, i);
168   - mem_chunk_init(&chunk[i], ch_start,
169   - lh_start - ch_start, ch_type);
170   - mem_chunk_init(&chunk[i + 1], lh_start, lh_size, type);
171   - mem_chunk_init(&chunk[i + 2], lh_end + 1,
172   - ch_end - lh_end, ch_type);
173   - break;
  119 + memmove(new, chunk, (MEMORY_CHUNKS-i-1) * sizeof(*new));
  120 + new->addr = addr + size;
  121 + new->size = chunk->addr + chunk->size - new->addr;
  122 + chunk->size = addr - chunk->addr;
  123 + continue;
  124 + } else if ((addr <= chunk->addr) &&
  125 + (addr + size >= chunk->addr + chunk->size)) {
  126 + memset(chunk, 0 , sizeof(*chunk));
  127 + } else if (addr + size < chunk->addr + chunk->size) {
  128 + chunk->size = chunk->addr + chunk->size - addr - size;
  129 + chunk->addr = addr + size;
  130 + } else if (addr > chunk->addr) {
  131 + chunk->size = addr - chunk->addr;
174 132 }
175 133 }
176 134 }
... ... @@ -375,9 +375,8 @@
375 375  
376 376 ro_start = PFN_ALIGN((unsigned long)&_stext);
377 377 ro_end = (unsigned long)&_eshared & PAGE_MASK;
378   - for (i = 0; i < MEMORY_CHUNKS && memory_chunk[i].size > 0; i++) {
379   - if (memory_chunk[i].type == CHUNK_CRASHK ||
380   - memory_chunk[i].type == CHUNK_OLDMEM)
  378 + for (i = 0; i < MEMORY_CHUNKS; i++) {
  379 + if (!memory_chunk[i].size)
381 380 continue;
382 381 start = memory_chunk[i].addr;
383 382 end = memory_chunk[i].addr + memory_chunk[i].size;
... ... @@ -411,9 +410,6 @@
411 410 mutex_lock(&vmem_mutex);
412 411 for (i = 0; i < MEMORY_CHUNKS; i++) {
413 412 if (!memory_chunk[i].size)
414   - continue;
415   - if (memory_chunk[i].type == CHUNK_CRASHK ||
416   - memory_chunk[i].type == CHUNK_OLDMEM)
417 413 continue;
418 414 seg = kzalloc(sizeof(*seg), GFP_KERNEL);
419 415 if (!seg)