Blame view

arch/s390/kernel/mem_detect.c 3.71 KB
23d174217   Heiko Carstens   [S390] Move memor...
1
  /*
155af2f95   Hans-Joachim Picht   [S390] s390: hibe...
2
3
4
   * Copyright IBM Corp. 2008, 2009
   *
   * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
23d174217   Heiko Carstens   [S390] Move memor...
5
6
7
8
9
10
11
   */
  
  #include <linux/kernel.h>
  #include <linux/module.h>
  #include <asm/ipl.h>
  #include <asm/sclp.h>
  #include <asm/setup.h>
23d174217   Heiko Carstens   [S390] Move memor...
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
  #define ADDR2G (1ULL << 31)
  
  static void find_memory_chunks(struct mem_chunk chunk[])
  {
  	unsigned long long memsize, rnmax, rzm;
  	unsigned long addr = 0, size;
  	int i = 0, type;
  
  	rzm = sclp_get_rzm();
  	rnmax = sclp_get_rnmax();
  	memsize = rzm * rnmax;
  	if (!rzm)
  		rzm = 1ULL << 17;
  	if (sizeof(long) == 4) {
  		rzm = min(ADDR2G, rzm);
  		memsize = memsize ? min(ADDR2G, memsize) : ADDR2G;
  	}
  	do {
  		size = 0;
  		type = tprot(addr);
  		do {
  			size += rzm;
  			if (memsize && addr + size >= memsize)
  				break;
  		} while (type == tprot(addr + size));
  		if (type == CHUNK_READ_WRITE || type == CHUNK_READ_ONLY) {
  			chunk[i].addr = addr;
  			chunk[i].size = size;
  			chunk[i].type = type;
  			i++;
  		}
  		addr += size;
  	} while (addr < memsize && i < MEMORY_CHUNKS);
  }
  
  void detect_memory_layout(struct mem_chunk chunk[])
  {
  	unsigned long flags, cr0;
  
  	memset(chunk, 0, MEMORY_CHUNKS * sizeof(struct mem_chunk));
23d174217   Heiko Carstens   [S390] Move memor...
52
53
54
55
  	/* Disable IRQs, DAT and low address protection so tprot does the
  	 * right thing and we don't get scheduled away with low address
  	 * protection disabled.
  	 */
df9ee2927   David Howells   Fix IRQ flag hand...
56
  	flags = __arch_local_irq_stnsm(0xf8);
23d174217   Heiko Carstens   [S390] Move memor...
57
58
59
60
  	__ctl_store(cr0, 0, 0);
  	__ctl_clear_bit(0, 28);
  	find_memory_chunks(chunk);
  	__ctl_load(cr0, 0, 0);
df9ee2927   David Howells   Fix IRQ flag hand...
61
  	arch_local_irq_restore(flags);
23d174217   Heiko Carstens   [S390] Move memor...
62
63
  }
  EXPORT_SYMBOL(detect_memory_layout);
60a0c68df   Michael Holzheu   [S390] kdump back...
64
65
  
  /*
44e5ddc4e   Michael Holzheu   [S390] Rework cre...
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
   * Move memory chunks array from index "from" to index "to"
   */
  static void mem_chunk_move(struct mem_chunk chunk[], int to, int from)
  {
  	int cnt = MEMORY_CHUNKS - to;
  
  	memmove(&chunk[to], &chunk[from], cnt * sizeof(struct mem_chunk));
  }
  
  /*
   * Initialize memory chunk
   */
  static void mem_chunk_init(struct mem_chunk *chunk, unsigned long addr,
  			   unsigned long size, int type)
  {
  	chunk->type = type;
  	chunk->addr = addr;
  	chunk->size = size;
  }
  
  /*
60a0c68df   Michael Holzheu   [S390] kdump back...
87
88
   * Create memory hole with given address, size, and type
   */
44e5ddc4e   Michael Holzheu   [S390] Rework cre...
89
  void create_mem_hole(struct mem_chunk chunk[], unsigned long addr,
60a0c68df   Michael Holzheu   [S390] kdump back...
90
91
  		     unsigned long size, int type)
  {
44e5ddc4e   Michael Holzheu   [S390] Rework cre...
92
93
  	unsigned long lh_start, lh_end, lh_size, ch_start, ch_end, ch_size;
  	int i, ch_type;
60a0c68df   Michael Holzheu   [S390] kdump back...
94
95
  
  	for (i = 0; i < MEMORY_CHUNKS; i++) {
44e5ddc4e   Michael Holzheu   [S390] Rework cre...
96
  		if (chunk[i].size == 0)
60a0c68df   Michael Holzheu   [S390] kdump back...
97
  			continue;
44e5ddc4e   Michael Holzheu   [S390] Rework cre...
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
  
  		/* Define chunk properties */
  		ch_start = chunk[i].addr;
  		ch_size = chunk[i].size;
  		ch_end = ch_start + ch_size - 1;
  		ch_type = chunk[i].type;
  
  		/* Is memory chunk hit by memory hole? */
  		if (addr + size <= ch_start)
  			continue; /* No: memory hole in front of chunk */
  		if (addr > ch_end)
  			continue; /* No: memory hole after chunk */
  
  		/* Yes: Define local hole properties */
  		lh_start = max(addr, chunk[i].addr);
  		lh_end = min(addr + size - 1, ch_end);
  		lh_size = lh_end - lh_start + 1;
  
  		if (lh_start == ch_start && lh_end == ch_end) {
  			/* Hole covers complete memory chunk */
  			mem_chunk_init(&chunk[i], lh_start, lh_size, type);
  		} else if (lh_end == ch_end) {
  			/* Hole starts in memory chunk and convers chunk end */
  			mem_chunk_move(chunk, i + 1, i);
  			mem_chunk_init(&chunk[i], ch_start, ch_size - lh_size,
  				       ch_type);
  			mem_chunk_init(&chunk[i + 1], lh_start, lh_size, type);
60a0c68df   Michael Holzheu   [S390] kdump back...
125
  			i += 1;
44e5ddc4e   Michael Holzheu   [S390] Rework cre...
126
127
128
129
130
131
132
  		} else if (lh_start == ch_start) {
  			/* Hole ends in memory chunk */
  			mem_chunk_move(chunk, i + 1, i);
  			mem_chunk_init(&chunk[i], lh_start, lh_size, type);
  			mem_chunk_init(&chunk[i + 1], lh_end + 1,
  				       ch_size - lh_size, ch_type);
  			break;
60a0c68df   Michael Holzheu   [S390] kdump back...
133
  		} else {
44e5ddc4e   Michael Holzheu   [S390] Rework cre...
134
135
136
137
138
139
140
141
  			/* Hole splits memory chunk */
  			mem_chunk_move(chunk, i + 2, i);
  			mem_chunk_init(&chunk[i], ch_start,
  				       lh_start - ch_start, ch_type);
  			mem_chunk_init(&chunk[i + 1], lh_start, lh_size, type);
  			mem_chunk_init(&chunk[i + 2], lh_end + 1,
  				       ch_end - lh_end, ch_type);
  			break;
60a0c68df   Michael Holzheu   [S390] kdump back...
142
143
144
  		}
  	}
  }