Blame view

arch/x86/mm/init_32.c 24.7 KB
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1
  /*
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2
3
4
5
6
   *
   *  Copyright (C) 1995  Linus Torvalds
   *
   *  Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
   */
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
  #include <linux/module.h>
  #include <linux/signal.h>
  #include <linux/sched.h>
  #include <linux/kernel.h>
  #include <linux/errno.h>
  #include <linux/string.h>
  #include <linux/types.h>
  #include <linux/ptrace.h>
  #include <linux/mman.h>
  #include <linux/mm.h>
  #include <linux/hugetlb.h>
  #include <linux/swap.h>
  #include <linux/smp.h>
  #include <linux/init.h>
  #include <linux/highmem.h>
  #include <linux/pagemap.h>
cfb80c9ea   Jeremy Fitzhardinge   x86: unify pci io...
23
  #include <linux/pci.h>
6fb14755a   Jan Beulich   [PATCH] x86: tigh...
24
  #include <linux/pfn.h>
c9cf55285   Randy Dunlap   [PATCH] add poiso...
25
  #include <linux/poison.h>
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
26
  #include <linux/bootmem.h>
a9ce6bc15   Yinghai Lu   x86, memblock: Re...
27
  #include <linux/memblock.h>
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
28
  #include <linux/proc_fs.h>
05039b926   Dave Hansen   [PATCH] memory ho...
29
  #include <linux/memory_hotplug.h>
27d99f7ea   Adrian Bunk   [PATCH] arch/i386...
30
  #include <linux/initrd.h>
55b2355ee   Shaohua Li   [PATCH] don't use...
31
  #include <linux/cpumask.h>
5a0e3ad6a   Tejun Heo   include cleanup: ...
32
  #include <linux/gfp.h>
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
33

f832ff18e   H. Peter Anvin   x86: use _ASM_EXT...
34
  #include <asm/asm.h>
46eaa6702   Ingo Molnar   x86: memory corru...
35
  #include <asm/bios_ebda.h>
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
36
37
38
39
40
41
42
43
  #include <asm/processor.h>
  #include <asm/system.h>
  #include <asm/uaccess.h>
  #include <asm/pgtable.h>
  #include <asm/dma.h>
  #include <asm/fixmap.h>
  #include <asm/e820.h>
  #include <asm/apic.h>
8550eb998   Ingo Molnar   x86: arch/x86/mm/...
44
  #include <asm/bugs.h>
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
45
46
  #include <asm/tlb.h>
  #include <asm/tlbflush.h>
c10d1e260   Andres Salomon   x86, olpc: Add OL...
47
  #include <asm/olpc_ofw.h>
a5a19c63f   Jeremy Fitzhardinge   x86: demacro asm-...
48
  #include <asm/pgalloc.h>
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
49
  #include <asm/sections.h>
b239fb250   Jeremy Fitzhardinge   [PATCH] i386: PAR...
50
  #include <asm/paravirt.h>
551889a6e   Ian Campbell   x86: construct 32...
51
  #include <asm/setup.h>
7bfeab9af   Harvey Harrison   x86: include prop...
52
  #include <asm/cacheflush.h>
2b72394e4   Pekka Enberg   x86: move max_pfn...
53
  #include <asm/page_types.h>
4fcb20839   Pekka Enberg   x86: move functio...
54
  #include <asm/init.h>
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
55

1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
56
  unsigned long highstart_pfn, highend_pfn;
8550eb998   Ingo Molnar   x86: arch/x86/mm/...
57
  static noinline int do_test_wp_bit(void);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
58

dc16ecf7f   Jeremy Fitzhardinge   x86-32: use speci...
59
  bool __read_mostly __vmalloc_start_set = false;
4e29684c4   Yinghai Lu   x86: introduce in...
60

d6be89ad6   Jan Beulich   x86, 32-bit: simp...
61
  static __init void *alloc_low_page(void)
4e29684c4   Yinghai Lu   x86: introduce in...
62
  {
d1b19426b   Yinghai Lu   x86: Rename e820_...
63
  	unsigned long pfn = pgt_buf_end++;
4e29684c4   Yinghai Lu   x86: introduce in...
64
  	void *adr;
d1b19426b   Yinghai Lu   x86: Rename e820_...
65
  	if (pfn >= pgt_buf_top)
4e29684c4   Yinghai Lu   x86: introduce in...
66
67
68
  		panic("alloc_low_page: ran out of memory");
  
  	adr = __va(pfn * PAGE_SIZE);
234bb549e   Jan Beulich   x86, cleanups: Us...
69
  	clear_page(adr);
4e29684c4   Yinghai Lu   x86: introduce in...
70
71
  	return adr;
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
72
73
74
75
76
77
78
79
80
  /*
   * Creates a middle page table and puts a pointer to it in the
   * given global directory entry. This only returns the gd entry
   * in non-PAE compilation mode, since the middle layer is folded.
   */
  static pmd_t * __init one_md_table_init(pgd_t *pgd)
  {
  	pud_t *pud;
  	pmd_t *pmd_table;
8550eb998   Ingo Molnar   x86: arch/x86/mm/...
81

1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
82
  #ifdef CONFIG_X86_PAE
b239fb250   Jeremy Fitzhardinge   [PATCH] i386: PAR...
83
  	if (!(pgd_val(*pgd) & _PAGE_PRESENT)) {
c464573cb   Pekka Enberg   x86: rename after...
84
  		if (after_bootmem)
3c1596efe   Jan Beulich   mm: don't use all...
85
  			pmd_table = (pmd_t *)alloc_bootmem_pages(PAGE_SIZE);
4e29684c4   Yinghai Lu   x86: introduce in...
86
  		else
d6be89ad6   Jan Beulich   x86, 32-bit: simp...
87
  			pmd_table = (pmd_t *)alloc_low_page();
6944a9c89   Jeremy Fitzhardinge   x86: rename parav...
88
  		paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT);
b239fb250   Jeremy Fitzhardinge   [PATCH] i386: PAR...
89
90
  		set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
  		pud = pud_offset(pgd, 0);
8550eb998   Ingo Molnar   x86: arch/x86/mm/...
91
  		BUG_ON(pmd_table != pmd_offset(pud, 0));
a376f30a9   Zhaolei   x86: avoid duplic...
92
93
  
  		return pmd_table;
b239fb250   Jeremy Fitzhardinge   [PATCH] i386: PAR...
94
95
  	}
  #endif
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
96
97
  	pud = pud_offset(pgd, 0);
  	pmd_table = pmd_offset(pud, 0);
8550eb998   Ingo Molnar   x86: arch/x86/mm/...
98

1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
99
100
101
102
103
  	return pmd_table;
  }
  
  /*
   * Create a page table and place a pointer to it in a middle page
8550eb998   Ingo Molnar   x86: arch/x86/mm/...
104
   * directory entry:
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
105
106
107
   */
  static pte_t * __init one_page_table_init(pmd_t *pmd)
  {
b239fb250   Jeremy Fitzhardinge   [PATCH] i386: PAR...
108
  	if (!(pmd_val(*pmd) & _PAGE_PRESENT)) {
509a80c49   Ingo Molnar   x86: fix CONFIG_P...
109
  		pte_t *page_table = NULL;
c464573cb   Pekka Enberg   x86: rename after...
110
  		if (after_bootmem) {
f85612967   Vegard Nossum   x86: add hooks fo...
111
  #if defined(CONFIG_DEBUG_PAGEALLOC) || defined(CONFIG_KMEMCHECK)
4e29684c4   Yinghai Lu   x86: introduce in...
112
  			page_table = (pte_t *) alloc_bootmem_pages(PAGE_SIZE);
509a80c49   Ingo Molnar   x86: fix CONFIG_P...
113
  #endif
4e29684c4   Yinghai Lu   x86: introduce in...
114
115
  			if (!page_table)
  				page_table =
3c1596efe   Jan Beulich   mm: don't use all...
116
  				(pte_t *)alloc_bootmem_pages(PAGE_SIZE);
d6be89ad6   Jan Beulich   x86, 32-bit: simp...
117
118
  		} else
  			page_table = (pte_t *)alloc_low_page();
b239fb250   Jeremy Fitzhardinge   [PATCH] i386: PAR...
119

6944a9c89   Jeremy Fitzhardinge   x86: rename parav...
120
  		paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
121
  		set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
b239fb250   Jeremy Fitzhardinge   [PATCH] i386: PAR...
122
  		BUG_ON(page_table != pte_offset_kernel(pmd, 0));
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
123
  	}
509a80c49   Ingo Molnar   x86: fix CONFIG_P...
124

1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
125
126
  	return pte_offset_kernel(pmd, 0);
  }
458a3e644   Tejun Heo   x86: update popul...
127
  pmd_t * __init populate_extra_pmd(unsigned long vaddr)
11124411a   Tejun Heo   x86: convert to t...
128
129
130
  {
  	int pgd_idx = pgd_index(vaddr);
  	int pmd_idx = pmd_index(vaddr);
458a3e644   Tejun Heo   x86: update popul...
131
132
133
134
135
136
137
  
  	return one_md_table_init(swapper_pg_dir + pgd_idx) + pmd_idx;
  }
  
  pte_t * __init populate_extra_pte(unsigned long vaddr)
  {
  	int pte_idx = pte_index(vaddr);
11124411a   Tejun Heo   x86: convert to t...
138
  	pmd_t *pmd;
458a3e644   Tejun Heo   x86: update popul...
139
140
  	pmd = populate_extra_pmd(vaddr);
  	return one_page_table_init(pmd) + pte_idx;
11124411a   Tejun Heo   x86: convert to t...
141
  }
a3c6018e5   Jan Beulich   x86: fix assumed ...
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
  static pte_t *__init page_table_kmap_check(pte_t *pte, pmd_t *pmd,
  					   unsigned long vaddr, pte_t *lastpte)
  {
  #ifdef CONFIG_HIGHMEM
  	/*
  	 * Something (early fixmap) may already have put a pte
  	 * page here, which causes the page table allocation
  	 * to become nonlinear. Attempt to fix it, and if it
  	 * is still nonlinear then we have to bug.
  	 */
  	int pmd_idx_kmap_begin = fix_to_virt(FIX_KMAP_END) >> PMD_SHIFT;
  	int pmd_idx_kmap_end = fix_to_virt(FIX_KMAP_BEGIN) >> PMD_SHIFT;
  
  	if (pmd_idx_kmap_begin != pmd_idx_kmap_end
  	    && (vaddr >> PMD_SHIFT) >= pmd_idx_kmap_begin
  	    && (vaddr >> PMD_SHIFT) <= pmd_idx_kmap_end
d1b19426b   Yinghai Lu   x86: Rename e820_...
158
159
  	    && ((__pa(pte) >> PAGE_SHIFT) < pgt_buf_start
  		|| (__pa(pte) >> PAGE_SHIFT) >= pgt_buf_end)) {
a3c6018e5   Jan Beulich   x86: fix assumed ...
160
161
  		pte_t *newpte;
  		int i;
c464573cb   Pekka Enberg   x86: rename after...
162
  		BUG_ON(after_bootmem);
a3c6018e5   Jan Beulich   x86: fix assumed ...
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
  		newpte = alloc_low_page();
  		for (i = 0; i < PTRS_PER_PTE; i++)
  			set_pte(newpte + i, pte[i]);
  
  		paravirt_alloc_pte(&init_mm, __pa(newpte) >> PAGE_SHIFT);
  		set_pmd(pmd, __pmd(__pa(newpte)|_PAGE_TABLE));
  		BUG_ON(newpte != pte_offset_kernel(pmd, 0));
  		__flush_tlb_all();
  
  		paravirt_release_pte(__pa(pte) >> PAGE_SHIFT);
  		pte = newpte;
  	}
  	BUG_ON(vaddr < fix_to_virt(FIX_KMAP_BEGIN - 1)
  	       && vaddr > fix_to_virt(FIX_KMAP_END)
  	       && lastpte && lastpte + PTRS_PER_PTE != pte);
  #endif
  	return pte;
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
181
  /*
8550eb998   Ingo Molnar   x86: arch/x86/mm/...
182
   * This function initializes a certain range of kernel virtual memory
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
183
184
   * with new bootmem page tables, everywhere page tables are missing in
   * the given range.
8550eb998   Ingo Molnar   x86: arch/x86/mm/...
185
186
187
   *
   * NOTE: The pagetables are allocated contiguous on the physical space
   * so we can cache the place of the first one and move around without
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
188
189
   * checking the pgd every time.
   */
8550eb998   Ingo Molnar   x86: arch/x86/mm/...
190
191
  static void __init
  page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
192
  {
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
193
194
  	int pgd_idx, pmd_idx;
  	unsigned long vaddr;
8550eb998   Ingo Molnar   x86: arch/x86/mm/...
195
196
  	pgd_t *pgd;
  	pmd_t *pmd;
a3c6018e5   Jan Beulich   x86: fix assumed ...
197
  	pte_t *pte = NULL;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
198
199
200
201
202
203
204
  
  	vaddr = start;
  	pgd_idx = pgd_index(vaddr);
  	pmd_idx = pmd_index(vaddr);
  	pgd = pgd_base + pgd_idx;
  
  	for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
b239fb250   Jeremy Fitzhardinge   [PATCH] i386: PAR...
205
206
  		pmd = one_md_table_init(pgd);
  		pmd = pmd + pmd_index(vaddr);
8550eb998   Ingo Molnar   x86: arch/x86/mm/...
207
208
  		for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
  							pmd++, pmd_idx++) {
a3c6018e5   Jan Beulich   x86: fix assumed ...
209
210
  			pte = page_table_kmap_check(one_page_table_init(pmd),
  			                            pmd, vaddr, pte);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
211
212
213
214
215
216
217
218
219
  
  			vaddr += PMD_SIZE;
  		}
  		pmd_idx = 0;
  	}
  }
  
  static inline int is_kernel_text(unsigned long addr)
  {
5bd5a4526   Matthieu CASTET   x86: Add NX prote...
220
  	if (addr >= (unsigned long)_text && addr <= (unsigned long)__init_end)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
221
222
223
224
225
  		return 1;
  	return 0;
  }
  
  /*
8550eb998   Ingo Molnar   x86: arch/x86/mm/...
226
227
228
   * This maps the physical memory to kernel virtual address space, a total
   * of max_low_pfn pages, by creating page tables starting from address
   * PAGE_OFFSET:
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
229
   */
e53fb04fc   Pekka Enberg   x86: unify kernel...
230
231
232
233
  unsigned long __init
  kernel_physical_mapping_init(unsigned long start,
  			     unsigned long end,
  			     unsigned long page_size_mask)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
234
  {
e53fb04fc   Pekka Enberg   x86: unify kernel...
235
  	int use_pse = page_size_mask == (1<<PG_LEVEL_2M);
c1fd1b438   Pekka Enberg   x86, mm: Unify ke...
236
  	unsigned long last_map_addr = end;
e53fb04fc   Pekka Enberg   x86: unify kernel...
237
  	unsigned long start_pfn, end_pfn;
e7179853e   Pekka Enberg   x86: move pgd_bas...
238
  	pgd_t *pgd_base = swapper_pg_dir;
8550eb998   Ingo Molnar   x86: arch/x86/mm/...
239
  	int pgd_idx, pmd_idx, pte_ofs;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
240
241
242
243
  	unsigned long pfn;
  	pgd_t *pgd;
  	pmd_t *pmd;
  	pte_t *pte;
a2699e477   Suresh Siddha   x86, cpa: make th...
244
245
  	unsigned pages_2m, pages_4k;
  	int mapping_iter;
e53fb04fc   Pekka Enberg   x86: unify kernel...
246
247
  	start_pfn = start >> PAGE_SHIFT;
  	end_pfn = end >> PAGE_SHIFT;
a2699e477   Suresh Siddha   x86, cpa: make th...
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
  	/*
  	 * First iteration will setup identity mapping using large/small pages
  	 * based on use_pse, with other attributes same as set by
  	 * the early code in head_32.S
  	 *
  	 * Second iteration will setup the appropriate attributes (NX, GLOBAL..)
  	 * as desired for the kernel identity mapping.
  	 *
  	 * This two pass mechanism conforms to the TLB app note which says:
  	 *
  	 *     "Software should not write to a paging-structure entry in a way
  	 *      that would change, for any linear address, both the page size
  	 *      and either the page frame or attributes."
  	 */
  	mapping_iter = 1;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
263

a04ad82d0   Yinghai Lu   x86: fix init_mem...
264
265
  	if (!cpu_has_pse)
  		use_pse = 0;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
266

a2699e477   Suresh Siddha   x86, cpa: make th...
267
268
  repeat:
  	pages_2m = pages_4k = 0;
a04ad82d0   Yinghai Lu   x86: fix init_mem...
269
270
271
  	pfn = start_pfn;
  	pgd_idx = pgd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
  	pgd = pgd_base + pgd_idx;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
272
273
  	for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
  		pmd = one_md_table_init(pgd);
8550eb998   Ingo Molnar   x86: arch/x86/mm/...
274

a04ad82d0   Yinghai Lu   x86: fix init_mem...
275
276
277
278
279
280
281
282
283
  		if (pfn >= end_pfn)
  			continue;
  #ifdef CONFIG_X86_PAE
  		pmd_idx = pmd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
  		pmd += pmd_idx;
  #else
  		pmd_idx = 0;
  #endif
  		for (; pmd_idx < PTRS_PER_PMD && pfn < end_pfn;
f3f20de87   Jeremy Fitzhardinge   x86: clean up mm/...
284
  		     pmd++, pmd_idx++) {
8550eb998   Ingo Molnar   x86: arch/x86/mm/...
285
  			unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
286

8550eb998   Ingo Molnar   x86: arch/x86/mm/...
287
288
289
290
  			/*
  			 * Map with big pages if possible, otherwise
  			 * create normal page tables:
  			 */
a04ad82d0   Yinghai Lu   x86: fix init_mem...
291
  			if (use_pse) {
8550eb998   Ingo Molnar   x86: arch/x86/mm/...
292
  				unsigned int addr2;
f3f20de87   Jeremy Fitzhardinge   x86: clean up mm/...
293
  				pgprot_t prot = PAGE_KERNEL_LARGE;
a2699e477   Suresh Siddha   x86, cpa: make th...
294
295
296
297
298
299
300
  				/*
  				 * first pass will use the same initial
  				 * identity mapping attribute + _PAGE_PSE.
  				 */
  				pgprot_t init_prot =
  					__pgprot(PTE_IDENT_ATTR |
  						 _PAGE_PSE);
f3f20de87   Jeremy Fitzhardinge   x86: clean up mm/...
301

8550eb998   Ingo Molnar   x86: arch/x86/mm/...
302
  				addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE +
f3f20de87   Jeremy Fitzhardinge   x86: clean up mm/...
303
  					PAGE_OFFSET + PAGE_SIZE-1;
8550eb998   Ingo Molnar   x86: arch/x86/mm/...
304
305
  				if (is_kernel_text(addr) ||
  				    is_kernel_text(addr2))
f3f20de87   Jeremy Fitzhardinge   x86: clean up mm/...
306
  					prot = PAGE_KERNEL_LARGE_EXEC;
ce0c0e50f   Andi Kleen   x86, generic: CPA...
307
  				pages_2m++;
a2699e477   Suresh Siddha   x86, cpa: make th...
308
309
310
311
  				if (mapping_iter == 1)
  					set_pmd(pmd, pfn_pmd(pfn, init_prot));
  				else
  					set_pmd(pmd, pfn_pmd(pfn, prot));
b239fb250   Jeremy Fitzhardinge   [PATCH] i386: PAR...
312

1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
313
  				pfn += PTRS_PER_PTE;
8550eb998   Ingo Molnar   x86: arch/x86/mm/...
314
315
316
  				continue;
  			}
  			pte = one_page_table_init(pmd);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
317

a04ad82d0   Yinghai Lu   x86: fix init_mem...
318
319
320
  			pte_ofs = pte_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
  			pte += pte_ofs;
  			for (; pte_ofs < PTRS_PER_PTE && pfn < end_pfn;
8550eb998   Ingo Molnar   x86: arch/x86/mm/...
321
322
  			     pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) {
  				pgprot_t prot = PAGE_KERNEL;
a2699e477   Suresh Siddha   x86, cpa: make th...
323
324
325
326
327
  				/*
  				 * first pass will use the same initial
  				 * identity mapping attribute.
  				 */
  				pgprot_t init_prot = __pgprot(PTE_IDENT_ATTR);
f3f20de87   Jeremy Fitzhardinge   x86: clean up mm/...
328

8550eb998   Ingo Molnar   x86: arch/x86/mm/...
329
330
  				if (is_kernel_text(addr))
  					prot = PAGE_KERNEL_EXEC;
f3f20de87   Jeremy Fitzhardinge   x86: clean up mm/...
331

ce0c0e50f   Andi Kleen   x86, generic: CPA...
332
  				pages_4k++;
c1fd1b438   Pekka Enberg   x86, mm: Unify ke...
333
  				if (mapping_iter == 1) {
a2699e477   Suresh Siddha   x86, cpa: make th...
334
  					set_pte(pte, pfn_pte(pfn, init_prot));
c1fd1b438   Pekka Enberg   x86, mm: Unify ke...
335
336
  					last_map_addr = (pfn << PAGE_SHIFT) + PAGE_SIZE;
  				} else
a2699e477   Suresh Siddha   x86, cpa: make th...
337
  					set_pte(pte, pfn_pte(pfn, prot));
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
338
339
340
  			}
  		}
  	}
a2699e477   Suresh Siddha   x86, cpa: make th...
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
  	if (mapping_iter == 1) {
  		/*
  		 * update direct mapping page count only in the first
  		 * iteration.
  		 */
  		update_page_count(PG_LEVEL_2M, pages_2m);
  		update_page_count(PG_LEVEL_4K, pages_4k);
  
  		/*
  		 * local global flush tlb, which will flush the previous
  		 * mappings present in both small and large page TLB's.
  		 */
  		__flush_tlb_all();
  
  		/*
  		 * Second iteration will set the actual desired PTE attributes.
  		 */
  		mapping_iter = 2;
  		goto repeat;
  	}
c1fd1b438   Pekka Enberg   x86, mm: Unify ke...
361
  	return last_map_addr;
ae531c26c   Arjan van de Ven   x86: introduce /d...
362
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
363
364
  pte_t *kmap_pte;
  pgprot_t kmap_prot;
8550eb998   Ingo Molnar   x86: arch/x86/mm/...
365
366
367
368
369
  static inline pte_t *kmap_get_fixmap_pte(unsigned long vaddr)
  {
  	return pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr),
  			vaddr), vaddr), vaddr);
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
370
371
372
373
  
  static void __init kmap_init(void)
  {
  	unsigned long kmap_vstart;
8550eb998   Ingo Molnar   x86: arch/x86/mm/...
374
375
376
  	/*
  	 * Cache the first kmap pte:
  	 */
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
377
378
379
380
381
  	kmap_vstart = __fix_to_virt(FIX_KMAP_BEGIN);
  	kmap_pte = kmap_get_fixmap_pte(kmap_vstart);
  
  	kmap_prot = PAGE_KERNEL;
  }
fd9409343   Keith Packard   x86: add iomap_at...
382
  #ifdef CONFIG_HIGHMEM
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
383
384
  static void __init permanent_kmaps_init(pgd_t *pgd_base)
  {
8550eb998   Ingo Molnar   x86: arch/x86/mm/...
385
  	unsigned long vaddr;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
386
387
388
389
  	pgd_t *pgd;
  	pud_t *pud;
  	pmd_t *pmd;
  	pte_t *pte;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
390
391
392
393
394
395
396
397
  
  	vaddr = PKMAP_BASE;
  	page_table_range_init(vaddr, vaddr + PAGE_SIZE*LAST_PKMAP, pgd_base);
  
  	pgd = swapper_pg_dir + pgd_index(vaddr);
  	pud = pud_offset(pgd, vaddr);
  	pmd = pmd_offset(pud, vaddr);
  	pte = pte_offset_kernel(pmd, vaddr);
8550eb998   Ingo Molnar   x86: arch/x86/mm/...
398
  	pkmap_page_table = pte;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
399
  }
b1258ac29   Minchan Kim   x86: Remove pfn i...
400
  static void __init add_one_highpage_init(struct page *page)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
401
  {
cc9f7a0cc   Yinghai Lu   x86: kill bad_ppro
402
403
404
405
  	ClearPageReserved(page);
  	init_page_count(page);
  	__free_page(page);
  	totalhigh_pages++;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
406
  }
1d931264a   Yinghai Lu   x86-32, memblock:...
407
408
  void __init add_highpages_with_active_regions(int nid,
  			 unsigned long start_pfn, unsigned long end_pfn)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
409
  {
8a9ca34c1   Tejun Heo   memblock, x86: Re...
410
411
412
413
414
415
416
417
418
419
420
  	phys_addr_t start, end;
  	u64 i;
  
  	for_each_free_mem_range(i, nid, &start, &end, NULL) {
  		unsigned long pfn = clamp_t(unsigned long, PFN_UP(start),
  					    start_pfn, end_pfn);
  		unsigned long e_pfn = clamp_t(unsigned long, PFN_DOWN(end),
  					      start_pfn, end_pfn);
  		for ( ; pfn < e_pfn; pfn++)
  			if (pfn_valid(pfn))
  				add_one_highpage_init(pfn_to_page(pfn));
23be8c7dd   Ingo Molnar   x86: fix boot cra...
421
  	}
b5bc6c0e5   Yinghai Lu   x86, mm: use add_...
422
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
423
  #else
e8e323262   Ingo Brueckl   Fix compiler warn...
424
425
426
  static inline void permanent_kmaps_init(pgd_t *pgd_base)
  {
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
427
  #endif /* CONFIG_HIGHMEM */
b239fb250   Jeremy Fitzhardinge   [PATCH] i386: PAR...
428
  void __init native_pagetable_setup_start(pgd_t *base)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
429
  {
551889a6e   Ian Campbell   x86: construct 32...
430
431
432
433
434
  	unsigned long pfn, va;
  	pgd_t *pgd;
  	pud_t *pud;
  	pmd_t *pmd;
  	pte_t *pte;
b239fb250   Jeremy Fitzhardinge   [PATCH] i386: PAR...
435
436
  
  	/*
551889a6e   Ian Campbell   x86: construct 32...
437
438
  	 * Remove any mappings which extend past the end of physical
  	 * memory from the boot time page table:
b239fb250   Jeremy Fitzhardinge   [PATCH] i386: PAR...
439
  	 */
551889a6e   Ian Campbell   x86: construct 32...
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
  	for (pfn = max_low_pfn + 1; pfn < 1<<(32-PAGE_SHIFT); pfn++) {
  		va = PAGE_OFFSET + (pfn<<PAGE_SHIFT);
  		pgd = base + pgd_index(va);
  		if (!pgd_present(*pgd))
  			break;
  
  		pud = pud_offset(pgd, va);
  		pmd = pmd_offset(pud, va);
  		if (!pmd_present(*pmd))
  			break;
  
  		pte = pte_offset_kernel(pmd, va);
  		if (!pte_present(*pte))
  			break;
  
  		pte_clear(NULL, va, pte);
  	}
6944a9c89   Jeremy Fitzhardinge   x86: rename parav...
457
  	paravirt_alloc_pmd(&init_mm, __pa(base) >> PAGE_SHIFT);
b239fb250   Jeremy Fitzhardinge   [PATCH] i386: PAR...
458
459
460
461
  }
  
  void __init native_pagetable_setup_done(pgd_t *base)
  {
b239fb250   Jeremy Fitzhardinge   [PATCH] i386: PAR...
462
463
464
465
466
467
468
469
  }
  
  /*
   * Build a proper pagetable for the kernel mappings.  Up until this
   * point, we've been running on some set of pagetables constructed by
   * the boot process.
   *
   * If we're booting on native hardware, this will be a pagetable
551889a6e   Ian Campbell   x86: construct 32...
470
471
   * constructed in arch/x86/kernel/head_32.S.  The root of the
   * pagetable will be swapper_pg_dir.
b239fb250   Jeremy Fitzhardinge   [PATCH] i386: PAR...
472
473
474
475
476
477
478
479
480
481
482
   *
   * If we're booting paravirtualized under a hypervisor, then there are
   * more options: we may already be running PAE, and the pagetable may
   * or may not be based in swapper_pg_dir.  In any case,
   * paravirt_pagetable_setup_start() will set up swapper_pg_dir
   * appropriately for the rest of the initialization to work.
   *
   * In general, pagetable_init() assumes that the pagetable may already
   * be partially populated, and so it avoids stomping on any existing
   * mappings.
   */
f765090a2   Pekka Enberg   x86: move init_me...
483
  void __init early_ioremap_page_table_range_init(void)
b239fb250   Jeremy Fitzhardinge   [PATCH] i386: PAR...
484
  {
e7179853e   Pekka Enberg   x86: move pgd_bas...
485
  	pgd_t *pgd_base = swapper_pg_dir;
8550eb998   Ingo Molnar   x86: arch/x86/mm/...
486
  	unsigned long vaddr, end;
b239fb250   Jeremy Fitzhardinge   [PATCH] i386: PAR...
487

1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
488
489
490
491
492
  	/*
  	 * Fixed mappings, only the page table structure has to be
  	 * created - mappings will be set by set_fixmap():
  	 */
  	vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK;
b239fb250   Jeremy Fitzhardinge   [PATCH] i386: PAR...
493
494
  	end = (FIXADDR_TOP + PMD_SIZE - 1) & PMD_MASK;
  	page_table_range_init(vaddr, end, pgd_base);
beacfaac3   Huang, Ying   x86 32-bit boot: ...
495
  	early_ioremap_reset();
e7b378952   Yinghai Lu   x86: move fix map...
496
497
498
499
500
  }
  
  static void __init pagetable_init(void)
  {
  	pgd_t *pgd_base = swapper_pg_dir;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
501
  	permanent_kmaps_init(pgd_base);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
502
  }
be43d7283   Jeremy Fitzhardinge   x86: add _PAGE_IO...
503
  pteval_t __supported_pte_mask __read_mostly = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
6fdc05d47   Jeremy Fitzhardinge   x86: unify pgtabl...
504
  EXPORT_SYMBOL_GPL(__supported_pte_mask);
90d967e0e   Yinghai Lu   x86: move find_ma...
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
  /* user-defined highmem size */
  static unsigned int highmem_pages = -1;
  
  /*
   * highmem=size forces highmem to be exactly 'size' bytes.
   * This works even on boxes that have no highmem otherwise.
   * This also works to reduce highmem size on bigger boxes.
   */
  static int __init parse_highmem(char *arg)
  {
  	if (!arg)
  		return -EINVAL;
  
  	highmem_pages = memparse(arg, &arg) >> PAGE_SHIFT;
  	return 0;
  }
  early_param("highmem", parse_highmem);
4769843bc   Ingo Molnar   x86, 32-bit: clea...
522
523
524
525
526
527
528
  #define MSG_HIGHMEM_TOO_BIG \
  	"highmem size (%luMB) is bigger than pages available (%luMB)!
  "
  
  #define MSG_LOWMEM_TOO_SMALL \
  	"highmem size (%luMB) results in <64MB lowmem, ignoring it!
  "
90d967e0e   Yinghai Lu   x86: move find_ma...
529
  /*
4769843bc   Ingo Molnar   x86, 32-bit: clea...
530
531
532
   * All of RAM fits into lowmem - but if user wants highmem
   * artificially via the highmem=x boot parameter then create
   * it:
90d967e0e   Yinghai Lu   x86: move find_ma...
533
   */
4769843bc   Ingo Molnar   x86, 32-bit: clea...
534
  void __init lowmem_pfn_init(void)
90d967e0e   Yinghai Lu   x86: move find_ma...
535
  {
346cafecd   Yinghai Lu   x86: clean up min...
536
  	/* max_low_pfn is 0, we already have early_res support */
90d967e0e   Yinghai Lu   x86: move find_ma...
537
  	max_low_pfn = max_pfn;
d88316c24   Ingo Molnar   x86, 32-bit: refa...
538

4769843bc   Ingo Molnar   x86, 32-bit: clea...
539
540
541
542
543
544
545
546
547
548
549
  	if (highmem_pages == -1)
  		highmem_pages = 0;
  #ifdef CONFIG_HIGHMEM
  	if (highmem_pages >= max_pfn) {
  		printk(KERN_ERR MSG_HIGHMEM_TOO_BIG,
  			pages_to_mb(highmem_pages), pages_to_mb(max_pfn));
  		highmem_pages = 0;
  	}
  	if (highmem_pages) {
  		if (max_low_pfn - highmem_pages < 64*1024*1024/PAGE_SIZE) {
  			printk(KERN_ERR MSG_LOWMEM_TOO_SMALL,
90d967e0e   Yinghai Lu   x86: move find_ma...
550
551
552
  				pages_to_mb(highmem_pages));
  			highmem_pages = 0;
  		}
4769843bc   Ingo Molnar   x86, 32-bit: clea...
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
  		max_low_pfn -= highmem_pages;
  	}
  #else
  	if (highmem_pages)
  		printk(KERN_ERR "ignoring highmem size on non-highmem kernel!
  ");
  #endif
  }
  
  #define MSG_HIGHMEM_TOO_SMALL \
  	"only %luMB highmem pages available, ignoring highmem size of %luMB!
  "
  
  #define MSG_HIGHMEM_TRIMMED \
  	"Warning: only 4GB will be used. Use a HIGHMEM64G enabled kernel!
  "
  /*
   * We have more RAM than fits into lowmem - we try to put it into
   * highmem, also taking the highmem=x boot parameter into account:
   */
  void __init highmem_pfn_init(void)
  {
d88316c24   Ingo Molnar   x86, 32-bit: refa...
575
  	max_low_pfn = MAXMEM_PFN;
4769843bc   Ingo Molnar   x86, 32-bit: clea...
576
577
578
579
580
581
582
583
584
585
586
587
  	if (highmem_pages == -1)
  		highmem_pages = max_pfn - MAXMEM_PFN;
  
  	if (highmem_pages + MAXMEM_PFN < max_pfn)
  		max_pfn = MAXMEM_PFN + highmem_pages;
  
  	if (highmem_pages + MAXMEM_PFN > max_pfn) {
  		printk(KERN_WARNING MSG_HIGHMEM_TOO_SMALL,
  			pages_to_mb(max_pfn - MAXMEM_PFN),
  			pages_to_mb(highmem_pages));
  		highmem_pages = 0;
  	}
90d967e0e   Yinghai Lu   x86: move find_ma...
588
  #ifndef CONFIG_HIGHMEM
4769843bc   Ingo Molnar   x86, 32-bit: clea...
589
590
591
592
593
594
595
596
597
598
  	/* Maximum memory usable is what is directly addressable */
  	printk(KERN_WARNING "Warning only %ldMB will be used.
  ", MAXMEM>>20);
  	if (max_pfn > MAX_NONPAE_PFN)
  		printk(KERN_WARNING "Use a HIGHMEM64G enabled kernel.
  ");
  	else
  		printk(KERN_WARNING "Use a HIGHMEM enabled kernel.
  ");
  	max_pfn = MAXMEM_PFN;
90d967e0e   Yinghai Lu   x86: move find_ma...
599
600
  #else /* !CONFIG_HIGHMEM */
  #ifndef CONFIG_HIGHMEM64G
4769843bc   Ingo Molnar   x86, 32-bit: clea...
601
602
603
604
  	if (max_pfn > MAX_NONPAE_PFN) {
  		max_pfn = MAX_NONPAE_PFN;
  		printk(KERN_WARNING MSG_HIGHMEM_TRIMMED);
  	}
90d967e0e   Yinghai Lu   x86: move find_ma...
605
606
  #endif /* !CONFIG_HIGHMEM64G */
  #endif /* !CONFIG_HIGHMEM */
4769843bc   Ingo Molnar   x86, 32-bit: clea...
607
608
609
610
611
612
613
614
  }
  
  /*
   * Determine low and high memory ranges:
   */
  void __init find_low_pfn_range(void)
  {
  	/* it could update max_pfn */
d88316c24   Ingo Molnar   x86, 32-bit: refa...
615
  	if (max_pfn <= MAXMEM_PFN)
4769843bc   Ingo Molnar   x86, 32-bit: clea...
616
  		lowmem_pfn_init();
d88316c24   Ingo Molnar   x86, 32-bit: refa...
617
618
  	else
  		highmem_pfn_init();
90d967e0e   Yinghai Lu   x86: move find_ma...
619
  }
b2ac82a09   Yinghai Lu   x86: introduce in...
620
  #ifndef CONFIG_NEED_MULTIPLE_NODES
d8fc3afc4   Tejun Heo   x86, NUMA: Move *...
621
  void __init initmem_init(void)
b2ac82a09   Yinghai Lu   x86: introduce in...
622
  {
b2ac82a09   Yinghai Lu   x86: introduce in...
623
624
625
626
  #ifdef CONFIG_HIGHMEM
  	highstart_pfn = highend_pfn = max_pfn;
  	if (max_pfn > max_low_pfn)
  		highstart_pfn = max_low_pfn;
b2ac82a09   Yinghai Lu   x86: introduce in...
627
628
629
630
631
632
  	printk(KERN_NOTICE "%ldMB HIGHMEM available.
  ",
  		pages_to_mb(highend_pfn - highstart_pfn));
  	num_physpages = highend_pfn;
  	high_memory = (void *) __va(highstart_pfn * PAGE_SIZE - 1) + 1;
  #else
b2ac82a09   Yinghai Lu   x86: introduce in...
633
634
635
  	num_physpages = max_low_pfn;
  	high_memory = (void *) __va(max_low_pfn * PAGE_SIZE - 1) + 1;
  #endif
0608f70c7   Tejun Heo   x86: Use HAVE_MEM...
636
637
638
  
  	memblock_set_node(0, (phys_addr_t)ULLONG_MAX, 0);
  	sparse_memory_present_with_active_regions(0);
b2ac82a09   Yinghai Lu   x86: introduce in...
639
640
641
  #ifdef CONFIG_FLATMEM
  	max_mapnr = num_physpages;
  #endif
dc16ecf7f   Jeremy Fitzhardinge   x86-32: use speci...
642
  	__vmalloc_start_set = true;
b2ac82a09   Yinghai Lu   x86: introduce in...
643
644
645
646
647
  	printk(KERN_NOTICE "%ldMB LOWMEM available.
  ",
  			pages_to_mb(max_low_pfn));
  
  	setup_bootmem_allocator();
b2ac82a09   Yinghai Lu   x86: introduce in...
648
  }
cb95a13a8   Yinghai Lu   x86: merge zones_...
649
  #endif /* !CONFIG_NEED_MULTIPLE_NODES */
b2ac82a09   Yinghai Lu   x86: introduce in...
650

b2ac82a09   Yinghai Lu   x86: introduce in...
651
652
  void __init setup_bootmem_allocator(void)
  {
b2ac82a09   Yinghai Lu   x86: introduce in...
653
654
655
  	printk(KERN_INFO "  mapped low ram: 0 - %08lx
  ",
  		 max_pfn_mapped<<PAGE_SHIFT);
fc5efe394   Yinghai Lu   x86: fix bootmem ...
656
657
  	printk(KERN_INFO "  low ram: 0 - %08lx
  ", max_low_pfn<<PAGE_SHIFT);
7482b0e96   Yinghai Lu   x86: fix init_mem...
658

c464573cb   Pekka Enberg   x86: rename after...
659
  	after_bootmem = 1;
4e29684c4   Yinghai Lu   x86: introduce in...
660
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
661
662
663
664
665
666
667
668
669
  /*
   * paging_init() sets up the page tables - note that the first 8MB are
   * already mapped by head.S.
   *
   * This routines also unmaps the page at virtual kernel address 0, so
   * that we can trap those pesky NULL-reference errors in the kernel.
   */
  void __init paging_init(void)
  {
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
670
  	pagetable_init();
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
671
672
673
  	__flush_tlb_all();
  
  	kmap_init();
11cd0bc14   Yinghai Lu   x86: move some fu...
674
675
676
677
  
  	/*
  	 * NOTE: at this point the bootmem allocator is fully available.
  	 */
c10d1e260   Andres Salomon   x86, olpc: Add OL...
678
  	olpc_dt_build_devicetree();
797390d85   Tejun Heo   x86-32, NUMA: use...
679
  	sparse_memory_present_with_active_regions(MAX_NUMNODES);
11cd0bc14   Yinghai Lu   x86: move some fu...
680
681
  	sparse_init();
  	zone_sizes_init();
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
682
683
684
685
  }
  
  /*
   * Test if the WP bit works in supervisor mode. It isn't supported on 386's
f7f17a67c   Dmitri Vorobiev   x86: remove NexGe...
686
687
688
   * and also on some strange 486's. All 586+'s are OK. This used to involve
   * black magic jumps to work around some nasty CPU bugs, but fortunately the
   * switch to using exceptions got rid of all that.
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
689
   */
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
690
691
  static void __init test_wp_bit(void)
  {
d7d119d77   Ingo Molnar   x86: arch/x86/mm/...
692
693
  	printk(KERN_INFO
    "Checking if this processor honours the WP bit even in supervisor mode...");
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
694
695
696
697
698
699
700
  
  	/* Any page-aligned address will do, the test is non-destructive */
  	__set_fixmap(FIX_WP_TEST, __pa(&swapper_pg_dir), PAGE_READONLY);
  	boot_cpu_data.wp_works_ok = do_test_wp_bit();
  	clear_fixmap(FIX_WP_TEST);
  
  	if (!boot_cpu_data.wp_works_ok) {
d7d119d77   Ingo Molnar   x86: arch/x86/mm/...
701
702
  		printk(KERN_CONT "No.
  ");
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
703
  #ifdef CONFIG_X86_WP_WORKS_OK
d7d119d77   Ingo Molnar   x86: arch/x86/mm/...
704
705
  		panic(
    "This kernel doesn't support CPU's with broken WP. Recompile it for a 386!");
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
706
707
  #endif
  	} else {
d7d119d77   Ingo Molnar   x86: arch/x86/mm/...
708
709
  		printk(KERN_CONT "Ok.
  ");
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
710
711
  	}
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
712
713
  void __init mem_init(void)
  {
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
714
  	int codesize, reservedpages, datasize, initsize;
cc9f7a0cc   Yinghai Lu   x86: kill bad_ppro
715
  	int tmp;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
716

cfb80c9ea   Jeremy Fitzhardinge   x86: unify pci io...
717
  	pci_iommu_alloc();
05b79bdcb   Andy Whitcroft   [PATCH] sparsemem...
718
  #ifdef CONFIG_FLATMEM
8d8f3cbe7   Eric Sesterhenn   BUG_ON cleanups i...
719
  	BUG_ON(!mem_map);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
720
  #endif
855c743a2   Stanislaw Gruszka   x86/mm: Initializ...
721
722
723
724
725
726
727
728
729
730
  	/*
  	 * With CONFIG_DEBUG_PAGEALLOC initialization of highmem pages has to
  	 * be done before free_all_bootmem(). Memblock use free low memory for
  	 * temporary data (see find_range_array()) and for this purpose can use
  	 * pages that was already passed to the buddy allocator, hence marked as
  	 * not accessible in the page tables when compiled with
  	 * CONFIG_DEBUG_PAGEALLOC. Otherwise order of initialization is not
  	 * important here.
  	 */
  	set_highmem_pages_init();
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
731
732
733
734
735
736
  	/* this will put all low memory onto the freelists */
  	totalram_pages += free_all_bootmem();
  
  	reservedpages = 0;
  	for (tmp = 0; tmp < max_low_pfn; tmp++)
  		/*
8550eb998   Ingo Molnar   x86: arch/x86/mm/...
737
  		 * Only count reserved RAM pages:
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
738
739
740
  		 */
  		if (page_is_ram(tmp) && PageReserved(pfn_to_page(tmp)))
  			reservedpages++;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
741
742
743
  	codesize =  (unsigned long) &_etext - (unsigned long) &_text;
  	datasize =  (unsigned long) &_edata - (unsigned long) &_etext;
  	initsize =  (unsigned long) &__init_end - (unsigned long) &__init_begin;
8550eb998   Ingo Molnar   x86: arch/x86/mm/...
744
745
746
  	printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, "
  			"%dk reserved, %dk data, %dk init, %ldk highmem)
  ",
cc013a889   Geert Uytterhoeven   arches: drop supe...
747
  		nr_free_pages() << (PAGE_SHIFT-10),
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
748
749
750
751
752
  		num_physpages << (PAGE_SHIFT-10),
  		codesize >> 10,
  		reservedpages << (PAGE_SHIFT-10),
  		datasize >> 10,
  		initsize >> 10,
4b529401c   Andreas Fenkart   mm: make totalhig...
753
  		totalhigh_pages << (PAGE_SHIFT-10));
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
754

d7d119d77   Ingo Molnar   x86: arch/x86/mm/...
755
756
  	printk(KERN_INFO "virtual kernel memory layout:
  "
8550eb998   Ingo Molnar   x86: arch/x86/mm/...
757
758
  		"    fixmap  : 0x%08lx - 0x%08lx   (%4ld kB)
  "
052e79941   Jeremy Fitzhardinge   [PATCH] x86: make...
759
  #ifdef CONFIG_HIGHMEM
8550eb998   Ingo Molnar   x86: arch/x86/mm/...
760
761
  		"    pkmap   : 0x%08lx - 0x%08lx   (%4ld kB)
  "
052e79941   Jeremy Fitzhardinge   [PATCH] x86: make...
762
  #endif
8550eb998   Ingo Molnar   x86: arch/x86/mm/...
763
764
765
766
767
768
769
770
771
772
773
774
  		"    vmalloc : 0x%08lx - 0x%08lx   (%4ld MB)
  "
  		"    lowmem  : 0x%08lx - 0x%08lx   (%4ld MB)
  "
  		"      .init : 0x%08lx - 0x%08lx   (%4ld kB)
  "
  		"      .data : 0x%08lx - 0x%08lx   (%4ld kB)
  "
  		"      .text : 0x%08lx - 0x%08lx   (%4ld kB)
  ",
  		FIXADDR_START, FIXADDR_TOP,
  		(FIXADDR_TOP - FIXADDR_START) >> 10,
052e79941   Jeremy Fitzhardinge   [PATCH] x86: make...
775
776
  
  #ifdef CONFIG_HIGHMEM
8550eb998   Ingo Molnar   x86: arch/x86/mm/...
777
778
  		PKMAP_BASE, PKMAP_BASE+LAST_PKMAP*PAGE_SIZE,
  		(LAST_PKMAP*PAGE_SIZE) >> 10,
052e79941   Jeremy Fitzhardinge   [PATCH] x86: make...
779
  #endif
8550eb998   Ingo Molnar   x86: arch/x86/mm/...
780
781
  		VMALLOC_START, VMALLOC_END,
  		(VMALLOC_END - VMALLOC_START) >> 20,
052e79941   Jeremy Fitzhardinge   [PATCH] x86: make...
782

8550eb998   Ingo Molnar   x86: arch/x86/mm/...
783
784
  		(unsigned long)__va(0), (unsigned long)high_memory,
  		((unsigned long)high_memory - (unsigned long)__va(0)) >> 20,
052e79941   Jeremy Fitzhardinge   [PATCH] x86: make...
785

8550eb998   Ingo Molnar   x86: arch/x86/mm/...
786
787
788
  		(unsigned long)&__init_begin, (unsigned long)&__init_end,
  		((unsigned long)&__init_end -
  		 (unsigned long)&__init_begin) >> 10,
052e79941   Jeremy Fitzhardinge   [PATCH] x86: make...
789

8550eb998   Ingo Molnar   x86: arch/x86/mm/...
790
791
  		(unsigned long)&_etext, (unsigned long)&_edata,
  		((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
052e79941   Jeremy Fitzhardinge   [PATCH] x86: make...
792

8550eb998   Ingo Molnar   x86: arch/x86/mm/...
793
794
  		(unsigned long)&_text, (unsigned long)&_etext,
  		((unsigned long)&_etext - (unsigned long)&_text) >> 10);
052e79941   Jeremy Fitzhardinge   [PATCH] x86: make...
795

beeb4195c   Jan Beulich   x86, 32-bit: add ...
796
797
798
799
800
801
802
803
804
805
806
807
808
  	/*
  	 * Check boundaries twice: Some fundamental inconsistencies can
  	 * be detected at build time already.
  	 */
  #define __FIXADDR_TOP (-PAGE_SIZE)
  #ifdef CONFIG_HIGHMEM
  	BUILD_BUG_ON(PKMAP_BASE + LAST_PKMAP*PAGE_SIZE	> FIXADDR_START);
  	BUILD_BUG_ON(VMALLOC_END			> PKMAP_BASE);
  #endif
  #define high_memory (-128UL << 20)
  	BUILD_BUG_ON(VMALLOC_START			>= VMALLOC_END);
  #undef high_memory
  #undef __FIXADDR_TOP
052e79941   Jeremy Fitzhardinge   [PATCH] x86: make...
809
  #ifdef CONFIG_HIGHMEM
8550eb998   Ingo Molnar   x86: arch/x86/mm/...
810
811
  	BUG_ON(PKMAP_BASE + LAST_PKMAP*PAGE_SIZE	> FIXADDR_START);
  	BUG_ON(VMALLOC_END				> PKMAP_BASE);
052e79941   Jeremy Fitzhardinge   [PATCH] x86: make...
812
  #endif
beeb4195c   Jan Beulich   x86, 32-bit: add ...
813
  	BUG_ON(VMALLOC_START				>= VMALLOC_END);
8550eb998   Ingo Molnar   x86: arch/x86/mm/...
814
  	BUG_ON((unsigned long)high_memory		> VMALLOC_START);
052e79941   Jeremy Fitzhardinge   [PATCH] x86: make...
815

1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
816
817
  	if (boot_cpu_data.wp_works_ok < 0)
  		test_wp_bit();
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
818
  }
ad8f57973   KAMEZAWA Hiroyuki   [PATCH] build fix...
819
  #ifdef CONFIG_MEMORY_HOTPLUG
bc02af93d   Yasunori Goto   [PATCH] pgdat all...
820
  int arch_add_memory(int nid, u64 start, u64 size)
05039b926   Dave Hansen   [PATCH] memory ho...
821
  {
7c7e9425f   Yasunori Goto   [PATCH] memory ho...
822
  	struct pglist_data *pgdata = NODE_DATA(nid);
776ed98b8   Christoph Lameter   [PATCH] reduce MA...
823
  	struct zone *zone = pgdata->node_zones + ZONE_HIGHMEM;
05039b926   Dave Hansen   [PATCH] memory ho...
824
825
  	unsigned long start_pfn = start >> PAGE_SHIFT;
  	unsigned long nr_pages = size >> PAGE_SHIFT;
c04fc586c   Gary Hade   mm: show node to ...
826
  	return __add_pages(nid, zone, start_pfn, nr_pages);
05039b926   Dave Hansen   [PATCH] memory ho...
827
  }
9d99aaa31   Andi Kleen   [PATCH] x86_64: S...
828
  #endif
05039b926   Dave Hansen   [PATCH] memory ho...
829

1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
830
831
832
833
  /*
   * This function cannot be __init, since exceptions don't work in that
   * section.  Put this after the callers, so that it cannot be inlined.
   */
8550eb998   Ingo Molnar   x86: arch/x86/mm/...
834
  static noinline int do_test_wp_bit(void)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
835
836
837
838
839
  {
  	char tmp_reg;
  	int flag;
  
  	__asm__ __volatile__(
8550eb998   Ingo Molnar   x86: arch/x86/mm/...
840
841
842
843
844
845
  		"	movb %0, %1	
  "
  		"1:	movb %1, %0	
  "
  		"	xorl %2, %2	
  "
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
846
847
  		"2:			
  "
f832ff18e   H. Peter Anvin   x86: use _ASM_EXT...
848
  		_ASM_EXTABLE(1b,2b)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
849
850
851
852
853
  		:"=m" (*(char *)fix_to_virt(FIX_WP_TEST)),
  		 "=q" (tmp_reg),
  		 "=r" (flag)
  		:"2" (1)
  		:"memory");
8550eb998   Ingo Molnar   x86: arch/x86/mm/...
854

1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
855
856
  	return flag;
  }
63aaf3086   Arjan van de Ven   [PATCH] x86/x86_6...
857
  #ifdef CONFIG_DEBUG_RODATA
edeed3058   Arjan van de Ven   x86: add testcase...
858
859
  const int rodata_test_data = 0xC3;
  EXPORT_SYMBOL_GPL(rodata_test_data);
63aaf3086   Arjan van de Ven   [PATCH] x86/x86_6...
860

502f66046   Suresh Siddha   x86, cpa: Fix ker...
861
  int kernel_set_to_readonly __read_mostly;
162396309   Steven Rostedt   ftrace, x86: make...
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
  
  void set_kernel_text_rw(void)
  {
  	unsigned long start = PFN_ALIGN(_text);
  	unsigned long size = PFN_ALIGN(_etext) - start;
  
  	if (!kernel_set_to_readonly)
  		return;
  
  	pr_debug("Set kernel text: %lx - %lx for read write
  ",
  		 start, start+size);
  
  	set_pages_rw(virt_to_page(start), size >> PAGE_SHIFT);
  }
  
  void set_kernel_text_ro(void)
  {
  	unsigned long start = PFN_ALIGN(_text);
  	unsigned long size = PFN_ALIGN(_etext) - start;
  
  	if (!kernel_set_to_readonly)
  		return;
  
  	pr_debug("Set kernel text: %lx - %lx for read only
  ",
  		 start, start+size);
  
  	set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
  }
5bd5a4526   Matthieu CASTET   x86: Add NX prote...
892
893
894
895
  static void mark_nxdata_nx(void)
  {
  	/*
  	 * When this called, init has already been executed and released,
0d2eb44f6   Lucas De Marchi   x86: Fix common m...
896
  	 * so everything past _etext should be NX.
5bd5a4526   Matthieu CASTET   x86: Add NX prote...
897
898
899
900
901
902
903
904
905
906
907
908
  	 */
  	unsigned long start = PFN_ALIGN(_etext);
  	/*
  	 * This comes from is_kernel_text upper limit. Also HPAGE where used:
  	 */
  	unsigned long size = (((unsigned long)__init_end + HPAGE_SIZE) & HPAGE_MASK) - start;
  
  	if (__supported_pte_mask & _PAGE_NX)
  		printk(KERN_INFO "NX-protecting the kernel data: %luk
  ", size >> 10);
  	set_pages_nx(virt_to_page(start), size >> PAGE_SHIFT);
  }
63aaf3086   Arjan van de Ven   [PATCH] x86/x86_6...
909
910
  void mark_rodata_ro(void)
  {
6fb14755a   Jan Beulich   [PATCH] x86: tigh...
911
912
  	unsigned long start = PFN_ALIGN(_text);
  	unsigned long size = PFN_ALIGN(_etext) - start;
63aaf3086   Arjan van de Ven   [PATCH] x86/x86_6...
913

4e4eee0e0   Mathieu Desnoyers   x86: enhance DEBU...
914
915
916
917
  	set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
  	printk(KERN_INFO "Write protecting the kernel text: %luk
  ",
  		size >> 10);
0c42f3927   Andi Kleen   c_p_a(): do a sim...
918

162396309   Steven Rostedt   ftrace, x86: make...
919
  	kernel_set_to_readonly = 1;
0c42f3927   Andi Kleen   c_p_a(): do a sim...
920
  #ifdef CONFIG_CPA_DEBUG
4e4eee0e0   Mathieu Desnoyers   x86: enhance DEBU...
921
922
923
924
  	printk(KERN_INFO "Testing CPA: Reverting %lx-%lx
  ",
  		start, start+size);
  	set_pages_rw(virt_to_page(start), size>>PAGE_SHIFT);
0c42f3927   Andi Kleen   c_p_a(): do a sim...
925

4e4eee0e0   Mathieu Desnoyers   x86: enhance DEBU...
926
927
928
  	printk(KERN_INFO "Testing CPA: write protecting again
  ");
  	set_pages_ro(virt_to_page(start), size>>PAGE_SHIFT);
602033ed5   Linus Torvalds   Revert most of "x...
929
  #endif
8f0f996e8   Steven Rostedt   ftrace: dont writ...
930

6fb14755a   Jan Beulich   [PATCH] x86: tigh...
931
932
  	start += size;
  	size = (unsigned long)__end_rodata - start;
6d238cc4d   Arjan van de Ven   x86: convert CPA ...
933
  	set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
d7d119d77   Ingo Molnar   x86: arch/x86/mm/...
934
935
936
  	printk(KERN_INFO "Write protecting the kernel read-only data: %luk
  ",
  		size >> 10);
edeed3058   Arjan van de Ven   x86: add testcase...
937
  	rodata_test();
63aaf3086   Arjan van de Ven   [PATCH] x86/x86_6...
938

0c42f3927   Andi Kleen   c_p_a(): do a sim...
939
  #ifdef CONFIG_CPA_DEBUG
d7d119d77   Ingo Molnar   x86: arch/x86/mm/...
940
941
  	printk(KERN_INFO "Testing CPA: undo %lx-%lx
  ", start, start + size);
6d238cc4d   Arjan van de Ven   x86: convert CPA ...
942
  	set_pages_rw(virt_to_page(start), size >> PAGE_SHIFT);
0c42f3927   Andi Kleen   c_p_a(): do a sim...
943

d7d119d77   Ingo Molnar   x86: arch/x86/mm/...
944
945
  	printk(KERN_INFO "Testing CPA: write protecting again
  ");
6d238cc4d   Arjan van de Ven   x86: convert CPA ...
946
  	set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
0c42f3927   Andi Kleen   c_p_a(): do a sim...
947
  #endif
5bd5a4526   Matthieu CASTET   x86: Add NX prote...
948
  	mark_nxdata_nx();
63aaf3086   Arjan van de Ven   [PATCH] x86/x86_6...
949
950
  }
  #endif