Commit f3beeb4a51d3aa16571e6c3774b929209e9956bf
Committed by
Linus Torvalds
1 parent
69afade72a
Exists in
smarc-l5.0.0_1.0.0-ga
and in
5 other branches
mm/alpha: use common help functions to free reserved pages
Use common help functions to free reserved pages. Also include <asm/sections.h> to avoid local declarations. Signed-off-by: Jiang Liu <jiang.liu@huawei.com> Cc: Richard Henderson <rth@twiddle.net> Cc: Ivan Kokshaysky <ink@jurassic.park.msu.ru> Cc: Matt Turner <mattst88@gmail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Showing 3 changed files with 6 additions and 26 deletions Inline Diff
arch/alpha/kernel/sys_nautilus.c
1 | /* | 1 | /* |
2 | * linux/arch/alpha/kernel/sys_nautilus.c | 2 | * linux/arch/alpha/kernel/sys_nautilus.c |
3 | * | 3 | * |
4 | * Copyright (C) 1995 David A Rusling | 4 | * Copyright (C) 1995 David A Rusling |
5 | * Copyright (C) 1998 Richard Henderson | 5 | * Copyright (C) 1998 Richard Henderson |
6 | * Copyright (C) 1999 Alpha Processor, Inc., | 6 | * Copyright (C) 1999 Alpha Processor, Inc., |
7 | * (David Daniel, Stig Telfer, Soohoon Lee) | 7 | * (David Daniel, Stig Telfer, Soohoon Lee) |
8 | * | 8 | * |
9 | * Code supporting NAUTILUS systems. | 9 | * Code supporting NAUTILUS systems. |
10 | * | 10 | * |
11 | * | 11 | * |
12 | * NAUTILUS has the following I/O features: | 12 | * NAUTILUS has the following I/O features: |
13 | * | 13 | * |
14 | * a) Driven by AMD 751 aka IRONGATE (northbridge): | 14 | * a) Driven by AMD 751 aka IRONGATE (northbridge): |
15 | * 4 PCI slots | 15 | * 4 PCI slots |
16 | * 1 AGP slot | 16 | * 1 AGP slot |
17 | * | 17 | * |
18 | * b) Driven by ALI M1543C (southbridge) | 18 | * b) Driven by ALI M1543C (southbridge) |
19 | * 2 ISA slots | 19 | * 2 ISA slots |
20 | * 2 IDE connectors | 20 | * 2 IDE connectors |
21 | * 1 dual drive capable FDD controller | 21 | * 1 dual drive capable FDD controller |
22 | * 2 serial ports | 22 | * 2 serial ports |
23 | * 1 ECP/EPP/SP parallel port | 23 | * 1 ECP/EPP/SP parallel port |
24 | * 2 USB ports | 24 | * 2 USB ports |
25 | */ | 25 | */ |
26 | 26 | ||
27 | #include <linux/kernel.h> | 27 | #include <linux/kernel.h> |
28 | #include <linux/types.h> | 28 | #include <linux/types.h> |
29 | #include <linux/mm.h> | 29 | #include <linux/mm.h> |
30 | #include <linux/sched.h> | 30 | #include <linux/sched.h> |
31 | #include <linux/pci.h> | 31 | #include <linux/pci.h> |
32 | #include <linux/init.h> | 32 | #include <linux/init.h> |
33 | #include <linux/reboot.h> | 33 | #include <linux/reboot.h> |
34 | #include <linux/bootmem.h> | 34 | #include <linux/bootmem.h> |
35 | #include <linux/bitops.h> | 35 | #include <linux/bitops.h> |
36 | 36 | ||
37 | #include <asm/ptrace.h> | 37 | #include <asm/ptrace.h> |
38 | #include <asm/dma.h> | 38 | #include <asm/dma.h> |
39 | #include <asm/irq.h> | 39 | #include <asm/irq.h> |
40 | #include <asm/mmu_context.h> | 40 | #include <asm/mmu_context.h> |
41 | #include <asm/io.h> | 41 | #include <asm/io.h> |
42 | #include <asm/pci.h> | 42 | #include <asm/pci.h> |
43 | #include <asm/pgtable.h> | 43 | #include <asm/pgtable.h> |
44 | #include <asm/core_irongate.h> | 44 | #include <asm/core_irongate.h> |
45 | #include <asm/hwrpb.h> | 45 | #include <asm/hwrpb.h> |
46 | #include <asm/tlbflush.h> | 46 | #include <asm/tlbflush.h> |
47 | 47 | ||
48 | #include "proto.h" | 48 | #include "proto.h" |
49 | #include "err_impl.h" | 49 | #include "err_impl.h" |
50 | #include "irq_impl.h" | 50 | #include "irq_impl.h" |
51 | #include "pci_impl.h" | 51 | #include "pci_impl.h" |
52 | #include "machvec_impl.h" | 52 | #include "machvec_impl.h" |
53 | 53 | ||
54 | 54 | ||
55 | static void __init | 55 | static void __init |
56 | nautilus_init_irq(void) | 56 | nautilus_init_irq(void) |
57 | { | 57 | { |
58 | if (alpha_using_srm) { | 58 | if (alpha_using_srm) { |
59 | alpha_mv.device_interrupt = srm_device_interrupt; | 59 | alpha_mv.device_interrupt = srm_device_interrupt; |
60 | } | 60 | } |
61 | 61 | ||
62 | init_i8259a_irqs(); | 62 | init_i8259a_irqs(); |
63 | common_init_isa_dma(); | 63 | common_init_isa_dma(); |
64 | } | 64 | } |
65 | 65 | ||
66 | static int __init | 66 | static int __init |
67 | nautilus_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) | 67 | nautilus_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) |
68 | { | 68 | { |
69 | /* Preserve the IRQ set up by the console. */ | 69 | /* Preserve the IRQ set up by the console. */ |
70 | 70 | ||
71 | u8 irq; | 71 | u8 irq; |
72 | /* UP1500: AGP INTA is actually routed to IRQ 5, not IRQ 10 as | 72 | /* UP1500: AGP INTA is actually routed to IRQ 5, not IRQ 10 as |
73 | console reports. Check the device id of AGP bridge to distinguish | 73 | console reports. Check the device id of AGP bridge to distinguish |
74 | UP1500 from UP1000/1100. Note: 'pin' is 2 due to bridge swizzle. */ | 74 | UP1500 from UP1000/1100. Note: 'pin' is 2 due to bridge swizzle. */ |
75 | if (slot == 1 && pin == 2 && | 75 | if (slot == 1 && pin == 2 && |
76 | dev->bus->self && dev->bus->self->device == 0x700f) | 76 | dev->bus->self && dev->bus->self->device == 0x700f) |
77 | return 5; | 77 | return 5; |
78 | pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &irq); | 78 | pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &irq); |
79 | return irq; | 79 | return irq; |
80 | } | 80 | } |
81 | 81 | ||
82 | void | 82 | void |
83 | nautilus_kill_arch(int mode) | 83 | nautilus_kill_arch(int mode) |
84 | { | 84 | { |
85 | struct pci_bus *bus = pci_isa_hose->bus; | 85 | struct pci_bus *bus = pci_isa_hose->bus; |
86 | u32 pmuport; | 86 | u32 pmuport; |
87 | int off; | 87 | int off; |
88 | 88 | ||
89 | switch (mode) { | 89 | switch (mode) { |
90 | case LINUX_REBOOT_CMD_RESTART: | 90 | case LINUX_REBOOT_CMD_RESTART: |
91 | if (! alpha_using_srm) { | 91 | if (! alpha_using_srm) { |
92 | u8 t8; | 92 | u8 t8; |
93 | pci_bus_read_config_byte(bus, 0x38, 0x43, &t8); | 93 | pci_bus_read_config_byte(bus, 0x38, 0x43, &t8); |
94 | pci_bus_write_config_byte(bus, 0x38, 0x43, t8 | 0x80); | 94 | pci_bus_write_config_byte(bus, 0x38, 0x43, t8 | 0x80); |
95 | outb(1, 0x92); | 95 | outb(1, 0x92); |
96 | outb(0, 0x92); | 96 | outb(0, 0x92); |
97 | /* NOTREACHED */ | 97 | /* NOTREACHED */ |
98 | } | 98 | } |
99 | break; | 99 | break; |
100 | 100 | ||
101 | case LINUX_REBOOT_CMD_POWER_OFF: | 101 | case LINUX_REBOOT_CMD_POWER_OFF: |
102 | /* Assume M1543C */ | 102 | /* Assume M1543C */ |
103 | off = 0x2000; /* SLP_TYPE = 0, SLP_EN = 1 */ | 103 | off = 0x2000; /* SLP_TYPE = 0, SLP_EN = 1 */ |
104 | pci_bus_read_config_dword(bus, 0x88, 0x10, &pmuport); | 104 | pci_bus_read_config_dword(bus, 0x88, 0x10, &pmuport); |
105 | if (!pmuport) { | 105 | if (!pmuport) { |
106 | /* M1535D/D+ */ | 106 | /* M1535D/D+ */ |
107 | off = 0x3400; /* SLP_TYPE = 5, SLP_EN = 1 */ | 107 | off = 0x3400; /* SLP_TYPE = 5, SLP_EN = 1 */ |
108 | pci_bus_read_config_dword(bus, 0x88, 0xe0, &pmuport); | 108 | pci_bus_read_config_dword(bus, 0x88, 0xe0, &pmuport); |
109 | } | 109 | } |
110 | pmuport &= 0xfffe; | 110 | pmuport &= 0xfffe; |
111 | outw(0xffff, pmuport); /* Clear pending events. */ | 111 | outw(0xffff, pmuport); /* Clear pending events. */ |
112 | outw(off, pmuport + 4); | 112 | outw(off, pmuport + 4); |
113 | /* NOTREACHED */ | 113 | /* NOTREACHED */ |
114 | break; | 114 | break; |
115 | } | 115 | } |
116 | } | 116 | } |
117 | 117 | ||
118 | /* Perform analysis of a machine check that arrived from the system (NMI) */ | 118 | /* Perform analysis of a machine check that arrived from the system (NMI) */ |
119 | 119 | ||
120 | static void | 120 | static void |
121 | naut_sys_machine_check(unsigned long vector, unsigned long la_ptr, | 121 | naut_sys_machine_check(unsigned long vector, unsigned long la_ptr, |
122 | struct pt_regs *regs) | 122 | struct pt_regs *regs) |
123 | { | 123 | { |
124 | printk("PC %lx RA %lx\n", regs->pc, regs->r26); | 124 | printk("PC %lx RA %lx\n", regs->pc, regs->r26); |
125 | irongate_pci_clr_err(); | 125 | irongate_pci_clr_err(); |
126 | } | 126 | } |
127 | 127 | ||
128 | /* Machine checks can come from two sources - those on the CPU and those | 128 | /* Machine checks can come from two sources - those on the CPU and those |
129 | in the system. They are analysed separately but all starts here. */ | 129 | in the system. They are analysed separately but all starts here. */ |
130 | 130 | ||
131 | void | 131 | void |
132 | nautilus_machine_check(unsigned long vector, unsigned long la_ptr) | 132 | nautilus_machine_check(unsigned long vector, unsigned long la_ptr) |
133 | { | 133 | { |
134 | char *mchk_class; | 134 | char *mchk_class; |
135 | 135 | ||
136 | /* Now for some analysis. Machine checks fall into two classes -- | 136 | /* Now for some analysis. Machine checks fall into two classes -- |
137 | those picked up by the system, and those picked up by the CPU. | 137 | those picked up by the system, and those picked up by the CPU. |
138 | Add to that the two levels of severity - correctable or not. */ | 138 | Add to that the two levels of severity - correctable or not. */ |
139 | 139 | ||
140 | if (vector == SCB_Q_SYSMCHK | 140 | if (vector == SCB_Q_SYSMCHK |
141 | && ((IRONGATE0->dramms & 0x300) == 0x300)) { | 141 | && ((IRONGATE0->dramms & 0x300) == 0x300)) { |
142 | unsigned long nmi_ctl; | 142 | unsigned long nmi_ctl; |
143 | 143 | ||
144 | /* Clear ALI NMI */ | 144 | /* Clear ALI NMI */ |
145 | nmi_ctl = inb(0x61); | 145 | nmi_ctl = inb(0x61); |
146 | nmi_ctl |= 0x0c; | 146 | nmi_ctl |= 0x0c; |
147 | outb(nmi_ctl, 0x61); | 147 | outb(nmi_ctl, 0x61); |
148 | nmi_ctl &= ~0x0c; | 148 | nmi_ctl &= ~0x0c; |
149 | outb(nmi_ctl, 0x61); | 149 | outb(nmi_ctl, 0x61); |
150 | 150 | ||
151 | /* Write again clears error bits. */ | 151 | /* Write again clears error bits. */ |
152 | IRONGATE0->stat_cmd = IRONGATE0->stat_cmd & ~0x100; | 152 | IRONGATE0->stat_cmd = IRONGATE0->stat_cmd & ~0x100; |
153 | mb(); | 153 | mb(); |
154 | IRONGATE0->stat_cmd; | 154 | IRONGATE0->stat_cmd; |
155 | 155 | ||
156 | /* Write again clears error bits. */ | 156 | /* Write again clears error bits. */ |
157 | IRONGATE0->dramms = IRONGATE0->dramms; | 157 | IRONGATE0->dramms = IRONGATE0->dramms; |
158 | mb(); | 158 | mb(); |
159 | IRONGATE0->dramms; | 159 | IRONGATE0->dramms; |
160 | 160 | ||
161 | draina(); | 161 | draina(); |
162 | wrmces(0x7); | 162 | wrmces(0x7); |
163 | mb(); | 163 | mb(); |
164 | return; | 164 | return; |
165 | } | 165 | } |
166 | 166 | ||
167 | if (vector == SCB_Q_SYSERR) | 167 | if (vector == SCB_Q_SYSERR) |
168 | mchk_class = "Correctable"; | 168 | mchk_class = "Correctable"; |
169 | else if (vector == SCB_Q_SYSMCHK) | 169 | else if (vector == SCB_Q_SYSMCHK) |
170 | mchk_class = "Fatal"; | 170 | mchk_class = "Fatal"; |
171 | else { | 171 | else { |
172 | ev6_machine_check(vector, la_ptr); | 172 | ev6_machine_check(vector, la_ptr); |
173 | return; | 173 | return; |
174 | } | 174 | } |
175 | 175 | ||
176 | printk(KERN_CRIT "NAUTILUS Machine check 0x%lx " | 176 | printk(KERN_CRIT "NAUTILUS Machine check 0x%lx " |
177 | "[%s System Machine Check (NMI)]\n", | 177 | "[%s System Machine Check (NMI)]\n", |
178 | vector, mchk_class); | 178 | vector, mchk_class); |
179 | 179 | ||
180 | naut_sys_machine_check(vector, la_ptr, get_irq_regs()); | 180 | naut_sys_machine_check(vector, la_ptr, get_irq_regs()); |
181 | 181 | ||
182 | /* Tell the PALcode to clear the machine check */ | 182 | /* Tell the PALcode to clear the machine check */ |
183 | draina(); | 183 | draina(); |
184 | wrmces(0x7); | 184 | wrmces(0x7); |
185 | mb(); | 185 | mb(); |
186 | } | 186 | } |
187 | 187 | ||
188 | extern void free_reserved_mem(void *, void *); | ||
189 | extern void pcibios_claim_one_bus(struct pci_bus *); | 188 | extern void pcibios_claim_one_bus(struct pci_bus *); |
190 | 189 | ||
191 | static struct resource irongate_io = { | 190 | static struct resource irongate_io = { |
192 | .name = "Irongate PCI IO", | 191 | .name = "Irongate PCI IO", |
193 | .flags = IORESOURCE_IO, | 192 | .flags = IORESOURCE_IO, |
194 | }; | 193 | }; |
195 | static struct resource irongate_mem = { | 194 | static struct resource irongate_mem = { |
196 | .name = "Irongate PCI MEM", | 195 | .name = "Irongate PCI MEM", |
197 | .flags = IORESOURCE_MEM, | 196 | .flags = IORESOURCE_MEM, |
198 | }; | 197 | }; |
199 | 198 | ||
200 | void __init | 199 | void __init |
201 | nautilus_init_pci(void) | 200 | nautilus_init_pci(void) |
202 | { | 201 | { |
203 | struct pci_controller *hose = hose_head; | 202 | struct pci_controller *hose = hose_head; |
204 | struct pci_bus *bus; | 203 | struct pci_bus *bus; |
205 | struct pci_dev *irongate; | 204 | struct pci_dev *irongate; |
206 | unsigned long bus_align, bus_size, pci_mem; | 205 | unsigned long bus_align, bus_size, pci_mem; |
207 | unsigned long memtop = max_low_pfn << PAGE_SHIFT; | 206 | unsigned long memtop = max_low_pfn << PAGE_SHIFT; |
208 | 207 | ||
209 | /* Scan our single hose. */ | 208 | /* Scan our single hose. */ |
210 | bus = pci_scan_bus(0, alpha_mv.pci_ops, hose); | 209 | bus = pci_scan_bus(0, alpha_mv.pci_ops, hose); |
211 | hose->bus = bus; | 210 | hose->bus = bus; |
212 | pcibios_claim_one_bus(bus); | 211 | pcibios_claim_one_bus(bus); |
213 | 212 | ||
214 | irongate = pci_get_bus_and_slot(0, 0); | 213 | irongate = pci_get_bus_and_slot(0, 0); |
215 | bus->self = irongate; | 214 | bus->self = irongate; |
216 | bus->resource[0] = &irongate_io; | 215 | bus->resource[0] = &irongate_io; |
217 | bus->resource[1] = &irongate_mem; | 216 | bus->resource[1] = &irongate_mem; |
218 | 217 | ||
219 | pci_bus_size_bridges(bus); | 218 | pci_bus_size_bridges(bus); |
220 | 219 | ||
221 | /* IO port range. */ | 220 | /* IO port range. */ |
222 | bus->resource[0]->start = 0; | 221 | bus->resource[0]->start = 0; |
223 | bus->resource[0]->end = 0xffff; | 222 | bus->resource[0]->end = 0xffff; |
224 | 223 | ||
225 | /* Set up PCI memory range - limit is hardwired to 0xffffffff, | 224 | /* Set up PCI memory range - limit is hardwired to 0xffffffff, |
226 | base must be at aligned to 16Mb. */ | 225 | base must be at aligned to 16Mb. */ |
227 | bus_align = bus->resource[1]->start; | 226 | bus_align = bus->resource[1]->start; |
228 | bus_size = bus->resource[1]->end + 1 - bus_align; | 227 | bus_size = bus->resource[1]->end + 1 - bus_align; |
229 | if (bus_align < 0x1000000UL) | 228 | if (bus_align < 0x1000000UL) |
230 | bus_align = 0x1000000UL; | 229 | bus_align = 0x1000000UL; |
231 | 230 | ||
232 | pci_mem = (0x100000000UL - bus_size) & -bus_align; | 231 | pci_mem = (0x100000000UL - bus_size) & -bus_align; |
233 | 232 | ||
234 | bus->resource[1]->start = pci_mem; | 233 | bus->resource[1]->start = pci_mem; |
235 | bus->resource[1]->end = 0xffffffffUL; | 234 | bus->resource[1]->end = 0xffffffffUL; |
236 | if (request_resource(&iomem_resource, bus->resource[1]) < 0) | 235 | if (request_resource(&iomem_resource, bus->resource[1]) < 0) |
237 | printk(KERN_ERR "Failed to request MEM on hose 0\n"); | 236 | printk(KERN_ERR "Failed to request MEM on hose 0\n"); |
238 | 237 | ||
239 | if (pci_mem < memtop) | 238 | if (pci_mem < memtop) |
240 | memtop = pci_mem; | 239 | memtop = pci_mem; |
241 | if (memtop > alpha_mv.min_mem_address) { | 240 | if (memtop > alpha_mv.min_mem_address) { |
242 | free_reserved_mem(__va(alpha_mv.min_mem_address), | 241 | free_reserved_area((unsigned long)__va(alpha_mv.min_mem_address), |
243 | __va(memtop)); | 242 | (unsigned long)__va(memtop), 0, NULL); |
244 | printk("nautilus_init_pci: %ldk freed\n", | 243 | printk("nautilus_init_pci: %ldk freed\n", |
245 | (memtop - alpha_mv.min_mem_address) >> 10); | 244 | (memtop - alpha_mv.min_mem_address) >> 10); |
246 | } | 245 | } |
247 | 246 | ||
248 | if ((IRONGATE0->dev_vendor >> 16) > 0x7006) /* Albacore? */ | 247 | if ((IRONGATE0->dev_vendor >> 16) > 0x7006) /* Albacore? */ |
249 | IRONGATE0->pci_mem = pci_mem; | 248 | IRONGATE0->pci_mem = pci_mem; |
250 | 249 | ||
251 | pci_bus_assign_resources(bus); | 250 | pci_bus_assign_resources(bus); |
252 | 251 | ||
253 | /* pci_common_swizzle() relies on bus->self being NULL | 252 | /* pci_common_swizzle() relies on bus->self being NULL |
254 | for the root bus, so just clear it. */ | 253 | for the root bus, so just clear it. */ |
255 | bus->self = NULL; | 254 | bus->self = NULL; |
256 | pci_fixup_irqs(alpha_mv.pci_swizzle, alpha_mv.pci_map_irq); | 255 | pci_fixup_irqs(alpha_mv.pci_swizzle, alpha_mv.pci_map_irq); |
257 | } | 256 | } |
258 | 257 | ||
259 | /* | 258 | /* |
260 | * The System Vectors | 259 | * The System Vectors |
261 | */ | 260 | */ |
262 | 261 | ||
263 | struct alpha_machine_vector nautilus_mv __initmv = { | 262 | struct alpha_machine_vector nautilus_mv __initmv = { |
264 | .vector_name = "Nautilus", | 263 | .vector_name = "Nautilus", |
265 | DO_EV6_MMU, | 264 | DO_EV6_MMU, |
266 | DO_DEFAULT_RTC, | 265 | DO_DEFAULT_RTC, |
267 | DO_IRONGATE_IO, | 266 | DO_IRONGATE_IO, |
268 | .machine_check = nautilus_machine_check, | 267 | .machine_check = nautilus_machine_check, |
269 | .max_isa_dma_address = ALPHA_MAX_ISA_DMA_ADDRESS, | 268 | .max_isa_dma_address = ALPHA_MAX_ISA_DMA_ADDRESS, |
270 | .min_io_address = DEFAULT_IO_BASE, | 269 | .min_io_address = DEFAULT_IO_BASE, |
271 | .min_mem_address = IRONGATE_DEFAULT_MEM_BASE, | 270 | .min_mem_address = IRONGATE_DEFAULT_MEM_BASE, |
272 | 271 | ||
273 | .nr_irqs = 16, | 272 | .nr_irqs = 16, |
274 | .device_interrupt = isa_device_interrupt, | 273 | .device_interrupt = isa_device_interrupt, |
275 | 274 | ||
276 | .init_arch = irongate_init_arch, | 275 | .init_arch = irongate_init_arch, |
277 | .init_irq = nautilus_init_irq, | 276 | .init_irq = nautilus_init_irq, |
278 | .init_rtc = common_init_rtc, | 277 | .init_rtc = common_init_rtc, |
279 | .init_pci = nautilus_init_pci, | 278 | .init_pci = nautilus_init_pci, |
280 | .kill_arch = nautilus_kill_arch, | 279 | .kill_arch = nautilus_kill_arch, |
281 | .pci_map_irq = nautilus_map_irq, | 280 | .pci_map_irq = nautilus_map_irq, |
282 | .pci_swizzle = common_swizzle, | 281 | .pci_swizzle = common_swizzle, |
283 | }; | 282 | }; |
284 | ALIAS_MV(nautilus) | 283 | ALIAS_MV(nautilus) |
285 | 284 |
arch/alpha/mm/init.c
1 | /* | 1 | /* |
2 | * linux/arch/alpha/mm/init.c | 2 | * linux/arch/alpha/mm/init.c |
3 | * | 3 | * |
4 | * Copyright (C) 1995 Linus Torvalds | 4 | * Copyright (C) 1995 Linus Torvalds |
5 | */ | 5 | */ |
6 | 6 | ||
7 | /* 2.3.x zone allocator, 1999 Andrea Arcangeli <andrea@suse.de> */ | 7 | /* 2.3.x zone allocator, 1999 Andrea Arcangeli <andrea@suse.de> */ |
8 | 8 | ||
9 | #include <linux/pagemap.h> | 9 | #include <linux/pagemap.h> |
10 | #include <linux/signal.h> | 10 | #include <linux/signal.h> |
11 | #include <linux/sched.h> | 11 | #include <linux/sched.h> |
12 | #include <linux/kernel.h> | 12 | #include <linux/kernel.h> |
13 | #include <linux/errno.h> | 13 | #include <linux/errno.h> |
14 | #include <linux/string.h> | 14 | #include <linux/string.h> |
15 | #include <linux/types.h> | 15 | #include <linux/types.h> |
16 | #include <linux/ptrace.h> | 16 | #include <linux/ptrace.h> |
17 | #include <linux/mman.h> | 17 | #include <linux/mman.h> |
18 | #include <linux/mm.h> | 18 | #include <linux/mm.h> |
19 | #include <linux/swap.h> | 19 | #include <linux/swap.h> |
20 | #include <linux/init.h> | 20 | #include <linux/init.h> |
21 | #include <linux/bootmem.h> /* max_low_pfn */ | 21 | #include <linux/bootmem.h> /* max_low_pfn */ |
22 | #include <linux/vmalloc.h> | 22 | #include <linux/vmalloc.h> |
23 | #include <linux/gfp.h> | 23 | #include <linux/gfp.h> |
24 | 24 | ||
25 | #include <asm/uaccess.h> | 25 | #include <asm/uaccess.h> |
26 | #include <asm/pgtable.h> | 26 | #include <asm/pgtable.h> |
27 | #include <asm/pgalloc.h> | 27 | #include <asm/pgalloc.h> |
28 | #include <asm/hwrpb.h> | 28 | #include <asm/hwrpb.h> |
29 | #include <asm/dma.h> | 29 | #include <asm/dma.h> |
30 | #include <asm/mmu_context.h> | 30 | #include <asm/mmu_context.h> |
31 | #include <asm/console.h> | 31 | #include <asm/console.h> |
32 | #include <asm/tlb.h> | 32 | #include <asm/tlb.h> |
33 | #include <asm/setup.h> | 33 | #include <asm/setup.h> |
34 | #include <asm/sections.h> | ||
34 | 35 | ||
35 | extern void die_if_kernel(char *,struct pt_regs *,long); | 36 | extern void die_if_kernel(char *,struct pt_regs *,long); |
36 | 37 | ||
37 | static struct pcb_struct original_pcb; | 38 | static struct pcb_struct original_pcb; |
38 | 39 | ||
39 | pgd_t * | 40 | pgd_t * |
40 | pgd_alloc(struct mm_struct *mm) | 41 | pgd_alloc(struct mm_struct *mm) |
41 | { | 42 | { |
42 | pgd_t *ret, *init; | 43 | pgd_t *ret, *init; |
43 | 44 | ||
44 | ret = (pgd_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO); | 45 | ret = (pgd_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO); |
45 | init = pgd_offset(&init_mm, 0UL); | 46 | init = pgd_offset(&init_mm, 0UL); |
46 | if (ret) { | 47 | if (ret) { |
47 | #ifdef CONFIG_ALPHA_LARGE_VMALLOC | 48 | #ifdef CONFIG_ALPHA_LARGE_VMALLOC |
48 | memcpy (ret + USER_PTRS_PER_PGD, init + USER_PTRS_PER_PGD, | 49 | memcpy (ret + USER_PTRS_PER_PGD, init + USER_PTRS_PER_PGD, |
49 | (PTRS_PER_PGD - USER_PTRS_PER_PGD - 1)*sizeof(pgd_t)); | 50 | (PTRS_PER_PGD - USER_PTRS_PER_PGD - 1)*sizeof(pgd_t)); |
50 | #else | 51 | #else |
51 | pgd_val(ret[PTRS_PER_PGD-2]) = pgd_val(init[PTRS_PER_PGD-2]); | 52 | pgd_val(ret[PTRS_PER_PGD-2]) = pgd_val(init[PTRS_PER_PGD-2]); |
52 | #endif | 53 | #endif |
53 | 54 | ||
54 | /* The last PGD entry is the VPTB self-map. */ | 55 | /* The last PGD entry is the VPTB self-map. */ |
55 | pgd_val(ret[PTRS_PER_PGD-1]) | 56 | pgd_val(ret[PTRS_PER_PGD-1]) |
56 | = pte_val(mk_pte(virt_to_page(ret), PAGE_KERNEL)); | 57 | = pte_val(mk_pte(virt_to_page(ret), PAGE_KERNEL)); |
57 | } | 58 | } |
58 | return ret; | 59 | return ret; |
59 | } | 60 | } |
60 | 61 | ||
61 | 62 | ||
62 | /* | 63 | /* |
63 | * BAD_PAGE is the page that is used for page faults when linux | 64 | * BAD_PAGE is the page that is used for page faults when linux |
64 | * is out-of-memory. Older versions of linux just did a | 65 | * is out-of-memory. Older versions of linux just did a |
65 | * do_exit(), but using this instead means there is less risk | 66 | * do_exit(), but using this instead means there is less risk |
66 | * for a process dying in kernel mode, possibly leaving an inode | 67 | * for a process dying in kernel mode, possibly leaving an inode |
67 | * unused etc.. | 68 | * unused etc.. |
68 | * | 69 | * |
69 | * BAD_PAGETABLE is the accompanying page-table: it is initialized | 70 | * BAD_PAGETABLE is the accompanying page-table: it is initialized |
70 | * to point to BAD_PAGE entries. | 71 | * to point to BAD_PAGE entries. |
71 | * | 72 | * |
72 | * ZERO_PAGE is a special page that is used for zero-initialized | 73 | * ZERO_PAGE is a special page that is used for zero-initialized |
73 | * data and COW. | 74 | * data and COW. |
74 | */ | 75 | */ |
75 | pmd_t * | 76 | pmd_t * |
76 | __bad_pagetable(void) | 77 | __bad_pagetable(void) |
77 | { | 78 | { |
78 | memset((void *) EMPTY_PGT, 0, PAGE_SIZE); | 79 | memset((void *) EMPTY_PGT, 0, PAGE_SIZE); |
79 | return (pmd_t *) EMPTY_PGT; | 80 | return (pmd_t *) EMPTY_PGT; |
80 | } | 81 | } |
81 | 82 | ||
82 | pte_t | 83 | pte_t |
83 | __bad_page(void) | 84 | __bad_page(void) |
84 | { | 85 | { |
85 | memset((void *) EMPTY_PGE, 0, PAGE_SIZE); | 86 | memset((void *) EMPTY_PGE, 0, PAGE_SIZE); |
86 | return pte_mkdirty(mk_pte(virt_to_page(EMPTY_PGE), PAGE_SHARED)); | 87 | return pte_mkdirty(mk_pte(virt_to_page(EMPTY_PGE), PAGE_SHARED)); |
87 | } | 88 | } |
88 | 89 | ||
89 | static inline unsigned long | 90 | static inline unsigned long |
90 | load_PCB(struct pcb_struct *pcb) | 91 | load_PCB(struct pcb_struct *pcb) |
91 | { | 92 | { |
92 | register unsigned long sp __asm__("$30"); | 93 | register unsigned long sp __asm__("$30"); |
93 | pcb->ksp = sp; | 94 | pcb->ksp = sp; |
94 | return __reload_thread(pcb); | 95 | return __reload_thread(pcb); |
95 | } | 96 | } |
96 | 97 | ||
97 | /* Set up initial PCB, VPTB, and other such nicities. */ | 98 | /* Set up initial PCB, VPTB, and other such nicities. */ |
98 | 99 | ||
99 | static inline void | 100 | static inline void |
100 | switch_to_system_map(void) | 101 | switch_to_system_map(void) |
101 | { | 102 | { |
102 | unsigned long newptbr; | 103 | unsigned long newptbr; |
103 | unsigned long original_pcb_ptr; | 104 | unsigned long original_pcb_ptr; |
104 | 105 | ||
105 | /* Initialize the kernel's page tables. Linux puts the vptb in | 106 | /* Initialize the kernel's page tables. Linux puts the vptb in |
106 | the last slot of the L1 page table. */ | 107 | the last slot of the L1 page table. */ |
107 | memset(swapper_pg_dir, 0, PAGE_SIZE); | 108 | memset(swapper_pg_dir, 0, PAGE_SIZE); |
108 | newptbr = ((unsigned long) swapper_pg_dir - PAGE_OFFSET) >> PAGE_SHIFT; | 109 | newptbr = ((unsigned long) swapper_pg_dir - PAGE_OFFSET) >> PAGE_SHIFT; |
109 | pgd_val(swapper_pg_dir[1023]) = | 110 | pgd_val(swapper_pg_dir[1023]) = |
110 | (newptbr << 32) | pgprot_val(PAGE_KERNEL); | 111 | (newptbr << 32) | pgprot_val(PAGE_KERNEL); |
111 | 112 | ||
112 | /* Set the vptb. This is often done by the bootloader, but | 113 | /* Set the vptb. This is often done by the bootloader, but |
113 | shouldn't be required. */ | 114 | shouldn't be required. */ |
114 | if (hwrpb->vptb != 0xfffffffe00000000UL) { | 115 | if (hwrpb->vptb != 0xfffffffe00000000UL) { |
115 | wrvptptr(0xfffffffe00000000UL); | 116 | wrvptptr(0xfffffffe00000000UL); |
116 | hwrpb->vptb = 0xfffffffe00000000UL; | 117 | hwrpb->vptb = 0xfffffffe00000000UL; |
117 | hwrpb_update_checksum(hwrpb); | 118 | hwrpb_update_checksum(hwrpb); |
118 | } | 119 | } |
119 | 120 | ||
120 | /* Also set up the real kernel PCB while we're at it. */ | 121 | /* Also set up the real kernel PCB while we're at it. */ |
121 | init_thread_info.pcb.ptbr = newptbr; | 122 | init_thread_info.pcb.ptbr = newptbr; |
122 | init_thread_info.pcb.flags = 1; /* set FEN, clear everything else */ | 123 | init_thread_info.pcb.flags = 1; /* set FEN, clear everything else */ |
123 | original_pcb_ptr = load_PCB(&init_thread_info.pcb); | 124 | original_pcb_ptr = load_PCB(&init_thread_info.pcb); |
124 | tbia(); | 125 | tbia(); |
125 | 126 | ||
126 | /* Save off the contents of the original PCB so that we can | 127 | /* Save off the contents of the original PCB so that we can |
127 | restore the original console's page tables for a clean reboot. | 128 | restore the original console's page tables for a clean reboot. |
128 | 129 | ||
129 | Note that the PCB is supposed to be a physical address, but | 130 | Note that the PCB is supposed to be a physical address, but |
130 | since KSEG values also happen to work, folks get confused. | 131 | since KSEG values also happen to work, folks get confused. |
131 | Check this here. */ | 132 | Check this here. */ |
132 | 133 | ||
133 | if (original_pcb_ptr < PAGE_OFFSET) { | 134 | if (original_pcb_ptr < PAGE_OFFSET) { |
134 | original_pcb_ptr = (unsigned long) | 135 | original_pcb_ptr = (unsigned long) |
135 | phys_to_virt(original_pcb_ptr); | 136 | phys_to_virt(original_pcb_ptr); |
136 | } | 137 | } |
137 | original_pcb = *(struct pcb_struct *) original_pcb_ptr; | 138 | original_pcb = *(struct pcb_struct *) original_pcb_ptr; |
138 | } | 139 | } |
139 | 140 | ||
140 | int callback_init_done; | 141 | int callback_init_done; |
141 | 142 | ||
142 | void * __init | 143 | void * __init |
143 | callback_init(void * kernel_end) | 144 | callback_init(void * kernel_end) |
144 | { | 145 | { |
145 | struct crb_struct * crb; | 146 | struct crb_struct * crb; |
146 | pgd_t *pgd; | 147 | pgd_t *pgd; |
147 | pmd_t *pmd; | 148 | pmd_t *pmd; |
148 | void *two_pages; | 149 | void *two_pages; |
149 | 150 | ||
150 | /* Starting at the HWRPB, locate the CRB. */ | 151 | /* Starting at the HWRPB, locate the CRB. */ |
151 | crb = (struct crb_struct *)((char *)hwrpb + hwrpb->crb_offset); | 152 | crb = (struct crb_struct *)((char *)hwrpb + hwrpb->crb_offset); |
152 | 153 | ||
153 | if (alpha_using_srm) { | 154 | if (alpha_using_srm) { |
154 | /* Tell the console whither it is to be remapped. */ | 155 | /* Tell the console whither it is to be remapped. */ |
155 | if (srm_fixup(VMALLOC_START, (unsigned long)hwrpb)) | 156 | if (srm_fixup(VMALLOC_START, (unsigned long)hwrpb)) |
156 | __halt(); /* "We're boned." --Bender */ | 157 | __halt(); /* "We're boned." --Bender */ |
157 | 158 | ||
158 | /* Edit the procedure descriptors for DISPATCH and FIXUP. */ | 159 | /* Edit the procedure descriptors for DISPATCH and FIXUP. */ |
159 | crb->dispatch_va = (struct procdesc_struct *) | 160 | crb->dispatch_va = (struct procdesc_struct *) |
160 | (VMALLOC_START + (unsigned long)crb->dispatch_va | 161 | (VMALLOC_START + (unsigned long)crb->dispatch_va |
161 | - crb->map[0].va); | 162 | - crb->map[0].va); |
162 | crb->fixup_va = (struct procdesc_struct *) | 163 | crb->fixup_va = (struct procdesc_struct *) |
163 | (VMALLOC_START + (unsigned long)crb->fixup_va | 164 | (VMALLOC_START + (unsigned long)crb->fixup_va |
164 | - crb->map[0].va); | 165 | - crb->map[0].va); |
165 | } | 166 | } |
166 | 167 | ||
167 | switch_to_system_map(); | 168 | switch_to_system_map(); |
168 | 169 | ||
169 | /* Allocate one PGD and one PMD. In the case of SRM, we'll need | 170 | /* Allocate one PGD and one PMD. In the case of SRM, we'll need |
170 | these to actually remap the console. There is an assumption | 171 | these to actually remap the console. There is an assumption |
171 | here that only one of each is needed, and this allows for 8MB. | 172 | here that only one of each is needed, and this allows for 8MB. |
172 | On systems with larger consoles, additional pages will be | 173 | On systems with larger consoles, additional pages will be |
173 | allocated as needed during the mapping process. | 174 | allocated as needed during the mapping process. |
174 | 175 | ||
175 | In the case of not SRM, but not CONFIG_ALPHA_LARGE_VMALLOC, | 176 | In the case of not SRM, but not CONFIG_ALPHA_LARGE_VMALLOC, |
176 | we need to allocate the PGD we use for vmalloc before we start | 177 | we need to allocate the PGD we use for vmalloc before we start |
177 | forking other tasks. */ | 178 | forking other tasks. */ |
178 | 179 | ||
179 | two_pages = (void *) | 180 | two_pages = (void *) |
180 | (((unsigned long)kernel_end + ~PAGE_MASK) & PAGE_MASK); | 181 | (((unsigned long)kernel_end + ~PAGE_MASK) & PAGE_MASK); |
181 | kernel_end = two_pages + 2*PAGE_SIZE; | 182 | kernel_end = two_pages + 2*PAGE_SIZE; |
182 | memset(two_pages, 0, 2*PAGE_SIZE); | 183 | memset(two_pages, 0, 2*PAGE_SIZE); |
183 | 184 | ||
184 | pgd = pgd_offset_k(VMALLOC_START); | 185 | pgd = pgd_offset_k(VMALLOC_START); |
185 | pgd_set(pgd, (pmd_t *)two_pages); | 186 | pgd_set(pgd, (pmd_t *)two_pages); |
186 | pmd = pmd_offset(pgd, VMALLOC_START); | 187 | pmd = pmd_offset(pgd, VMALLOC_START); |
187 | pmd_set(pmd, (pte_t *)(two_pages + PAGE_SIZE)); | 188 | pmd_set(pmd, (pte_t *)(two_pages + PAGE_SIZE)); |
188 | 189 | ||
189 | if (alpha_using_srm) { | 190 | if (alpha_using_srm) { |
190 | static struct vm_struct console_remap_vm; | 191 | static struct vm_struct console_remap_vm; |
191 | unsigned long nr_pages = 0; | 192 | unsigned long nr_pages = 0; |
192 | unsigned long vaddr; | 193 | unsigned long vaddr; |
193 | unsigned long i, j; | 194 | unsigned long i, j; |
194 | 195 | ||
195 | /* calculate needed size */ | 196 | /* calculate needed size */ |
196 | for (i = 0; i < crb->map_entries; ++i) | 197 | for (i = 0; i < crb->map_entries; ++i) |
197 | nr_pages += crb->map[i].count; | 198 | nr_pages += crb->map[i].count; |
198 | 199 | ||
199 | /* register the vm area */ | 200 | /* register the vm area */ |
200 | console_remap_vm.flags = VM_ALLOC; | 201 | console_remap_vm.flags = VM_ALLOC; |
201 | console_remap_vm.size = nr_pages << PAGE_SHIFT; | 202 | console_remap_vm.size = nr_pages << PAGE_SHIFT; |
202 | vm_area_register_early(&console_remap_vm, PAGE_SIZE); | 203 | vm_area_register_early(&console_remap_vm, PAGE_SIZE); |
203 | 204 | ||
204 | vaddr = (unsigned long)console_remap_vm.addr; | 205 | vaddr = (unsigned long)console_remap_vm.addr; |
205 | 206 | ||
206 | /* Set up the third level PTEs and update the virtual | 207 | /* Set up the third level PTEs and update the virtual |
207 | addresses of the CRB entries. */ | 208 | addresses of the CRB entries. */ |
208 | for (i = 0; i < crb->map_entries; ++i) { | 209 | for (i = 0; i < crb->map_entries; ++i) { |
209 | unsigned long pfn = crb->map[i].pa >> PAGE_SHIFT; | 210 | unsigned long pfn = crb->map[i].pa >> PAGE_SHIFT; |
210 | crb->map[i].va = vaddr; | 211 | crb->map[i].va = vaddr; |
211 | for (j = 0; j < crb->map[i].count; ++j) { | 212 | for (j = 0; j < crb->map[i].count; ++j) { |
212 | /* Newer consoles (especially on larger | 213 | /* Newer consoles (especially on larger |
213 | systems) may require more pages of | 214 | systems) may require more pages of |
214 | PTEs. Grab additional pages as needed. */ | 215 | PTEs. Grab additional pages as needed. */ |
215 | if (pmd != pmd_offset(pgd, vaddr)) { | 216 | if (pmd != pmd_offset(pgd, vaddr)) { |
216 | memset(kernel_end, 0, PAGE_SIZE); | 217 | memset(kernel_end, 0, PAGE_SIZE); |
217 | pmd = pmd_offset(pgd, vaddr); | 218 | pmd = pmd_offset(pgd, vaddr); |
218 | pmd_set(pmd, (pte_t *)kernel_end); | 219 | pmd_set(pmd, (pte_t *)kernel_end); |
219 | kernel_end += PAGE_SIZE; | 220 | kernel_end += PAGE_SIZE; |
220 | } | 221 | } |
221 | set_pte(pte_offset_kernel(pmd, vaddr), | 222 | set_pte(pte_offset_kernel(pmd, vaddr), |
222 | pfn_pte(pfn, PAGE_KERNEL)); | 223 | pfn_pte(pfn, PAGE_KERNEL)); |
223 | pfn++; | 224 | pfn++; |
224 | vaddr += PAGE_SIZE; | 225 | vaddr += PAGE_SIZE; |
225 | } | 226 | } |
226 | } | 227 | } |
227 | } | 228 | } |
228 | 229 | ||
229 | callback_init_done = 1; | 230 | callback_init_done = 1; |
230 | return kernel_end; | 231 | return kernel_end; |
231 | } | 232 | } |
232 | 233 | ||
233 | 234 | ||
234 | #ifndef CONFIG_DISCONTIGMEM | 235 | #ifndef CONFIG_DISCONTIGMEM |
235 | /* | 236 | /* |
236 | * paging_init() sets up the memory map. | 237 | * paging_init() sets up the memory map. |
237 | */ | 238 | */ |
238 | void __init paging_init(void) | 239 | void __init paging_init(void) |
239 | { | 240 | { |
240 | unsigned long zones_size[MAX_NR_ZONES] = {0, }; | 241 | unsigned long zones_size[MAX_NR_ZONES] = {0, }; |
241 | unsigned long dma_pfn, high_pfn; | 242 | unsigned long dma_pfn, high_pfn; |
242 | 243 | ||
243 | dma_pfn = virt_to_phys((char *)MAX_DMA_ADDRESS) >> PAGE_SHIFT; | 244 | dma_pfn = virt_to_phys((char *)MAX_DMA_ADDRESS) >> PAGE_SHIFT; |
244 | high_pfn = max_pfn = max_low_pfn; | 245 | high_pfn = max_pfn = max_low_pfn; |
245 | 246 | ||
246 | if (dma_pfn >= high_pfn) | 247 | if (dma_pfn >= high_pfn) |
247 | zones_size[ZONE_DMA] = high_pfn; | 248 | zones_size[ZONE_DMA] = high_pfn; |
248 | else { | 249 | else { |
249 | zones_size[ZONE_DMA] = dma_pfn; | 250 | zones_size[ZONE_DMA] = dma_pfn; |
250 | zones_size[ZONE_NORMAL] = high_pfn - dma_pfn; | 251 | zones_size[ZONE_NORMAL] = high_pfn - dma_pfn; |
251 | } | 252 | } |
252 | 253 | ||
253 | /* Initialize mem_map[]. */ | 254 | /* Initialize mem_map[]. */ |
254 | free_area_init(zones_size); | 255 | free_area_init(zones_size); |
255 | 256 | ||
256 | /* Initialize the kernel's ZERO_PGE. */ | 257 | /* Initialize the kernel's ZERO_PGE. */ |
257 | memset((void *)ZERO_PGE, 0, PAGE_SIZE); | 258 | memset((void *)ZERO_PGE, 0, PAGE_SIZE); |
258 | } | 259 | } |
259 | #endif /* CONFIG_DISCONTIGMEM */ | 260 | #endif /* CONFIG_DISCONTIGMEM */ |
260 | 261 | ||
261 | #if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_SRM) | 262 | #if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_SRM) |
262 | void | 263 | void |
263 | srm_paging_stop (void) | 264 | srm_paging_stop (void) |
264 | { | 265 | { |
265 | /* Move the vptb back to where the SRM console expects it. */ | 266 | /* Move the vptb back to where the SRM console expects it. */ |
266 | swapper_pg_dir[1] = swapper_pg_dir[1023]; | 267 | swapper_pg_dir[1] = swapper_pg_dir[1023]; |
267 | tbia(); | 268 | tbia(); |
268 | wrvptptr(0x200000000UL); | 269 | wrvptptr(0x200000000UL); |
269 | hwrpb->vptb = 0x200000000UL; | 270 | hwrpb->vptb = 0x200000000UL; |
270 | hwrpb_update_checksum(hwrpb); | 271 | hwrpb_update_checksum(hwrpb); |
271 | 272 | ||
272 | /* Reload the page tables that the console had in use. */ | 273 | /* Reload the page tables that the console had in use. */ |
273 | load_PCB(&original_pcb); | 274 | load_PCB(&original_pcb); |
274 | tbia(); | 275 | tbia(); |
275 | } | 276 | } |
276 | #endif | 277 | #endif |
277 | 278 | ||
278 | #ifndef CONFIG_DISCONTIGMEM | 279 | #ifndef CONFIG_DISCONTIGMEM |
279 | static void __init | 280 | static void __init |
280 | printk_memory_info(void) | 281 | printk_memory_info(void) |
281 | { | 282 | { |
282 | unsigned long codesize, reservedpages, datasize, initsize, tmp; | 283 | unsigned long codesize, reservedpages, datasize, initsize, tmp; |
283 | extern int page_is_ram(unsigned long) __init; | 284 | extern int page_is_ram(unsigned long) __init; |
284 | extern char _text, _etext, _data, _edata; | ||
285 | extern char __init_begin, __init_end; | ||
286 | 285 | ||
287 | /* printk all informations */ | 286 | /* printk all informations */ |
288 | reservedpages = 0; | 287 | reservedpages = 0; |
289 | for (tmp = 0; tmp < max_low_pfn; tmp++) | 288 | for (tmp = 0; tmp < max_low_pfn; tmp++) |
290 | /* | 289 | /* |
291 | * Only count reserved RAM pages | 290 | * Only count reserved RAM pages |
292 | */ | 291 | */ |
293 | if (page_is_ram(tmp) && PageReserved(mem_map+tmp)) | 292 | if (page_is_ram(tmp) && PageReserved(mem_map+tmp)) |
294 | reservedpages++; | 293 | reservedpages++; |
295 | 294 | ||
296 | codesize = (unsigned long) &_etext - (unsigned long) &_text; | 295 | codesize = (unsigned long) &_etext - (unsigned long) &_text; |
297 | datasize = (unsigned long) &_edata - (unsigned long) &_data; | 296 | datasize = (unsigned long) &_edata - (unsigned long) &_data; |
298 | initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin; | 297 | initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin; |
299 | 298 | ||
300 | printk("Memory: %luk/%luk available (%luk kernel code, %luk reserved, %luk data, %luk init)\n", | 299 | printk("Memory: %luk/%luk available (%luk kernel code, %luk reserved, %luk data, %luk init)\n", |
301 | nr_free_pages() << (PAGE_SHIFT-10), | 300 | nr_free_pages() << (PAGE_SHIFT-10), |
302 | max_mapnr << (PAGE_SHIFT-10), | 301 | max_mapnr << (PAGE_SHIFT-10), |
303 | codesize >> 10, | 302 | codesize >> 10, |
304 | reservedpages << (PAGE_SHIFT-10), | 303 | reservedpages << (PAGE_SHIFT-10), |
305 | datasize >> 10, | 304 | datasize >> 10, |
306 | initsize >> 10); | 305 | initsize >> 10); |
307 | } | 306 | } |
308 | 307 | ||
309 | void __init | 308 | void __init |
310 | mem_init(void) | 309 | mem_init(void) |
311 | { | 310 | { |
312 | max_mapnr = num_physpages = max_low_pfn; | 311 | max_mapnr = num_physpages = max_low_pfn; |
313 | totalram_pages += free_all_bootmem(); | 312 | totalram_pages += free_all_bootmem(); |
314 | high_memory = (void *) __va(max_low_pfn * PAGE_SIZE); | 313 | high_memory = (void *) __va(max_low_pfn * PAGE_SIZE); |
315 | 314 | ||
316 | printk_memory_info(); | 315 | printk_memory_info(); |
317 | } | 316 | } |
318 | #endif /* CONFIG_DISCONTIGMEM */ | 317 | #endif /* CONFIG_DISCONTIGMEM */ |
319 | 318 | ||
320 | void | 319 | void |
321 | free_reserved_mem(void *start, void *end) | ||
322 | { | ||
323 | void *__start = start; | ||
324 | for (; __start < end; __start += PAGE_SIZE) { | ||
325 | ClearPageReserved(virt_to_page(__start)); | ||
326 | init_page_count(virt_to_page(__start)); | ||
327 | free_page((long)__start); | ||
328 | totalram_pages++; | ||
329 | } | ||
330 | } | ||
331 | |||
332 | void | ||
333 | free_initmem(void) | 320 | free_initmem(void) |
334 | { | 321 | { |
335 | extern char __init_begin, __init_end; | 322 | free_initmem_default(0); |
336 | |||
337 | free_reserved_mem(&__init_begin, &__init_end); | ||
338 | printk ("Freeing unused kernel memory: %ldk freed\n", | ||
339 | (&__init_end - &__init_begin) >> 10); | ||
340 | } | 323 | } |
341 | 324 | ||
342 | #ifdef CONFIG_BLK_DEV_INITRD | 325 | #ifdef CONFIG_BLK_DEV_INITRD |
343 | void | 326 | void |
344 | free_initrd_mem(unsigned long start, unsigned long end) | 327 | free_initrd_mem(unsigned long start, unsigned long end) |
345 | { | 328 | { |
346 | free_reserved_mem((void *)start, (void *)end); | 329 | free_reserved_area(start, end, 0, "initrd"); |
347 | printk ("Freeing initrd memory: %ldk freed\n", (end - start) >> 10); | ||
348 | } | 330 | } |
349 | #endif | 331 | #endif |
arch/alpha/mm/numa.c
1 | /* | 1 | /* |
2 | * linux/arch/alpha/mm/numa.c | 2 | * linux/arch/alpha/mm/numa.c |
3 | * | 3 | * |
4 | * DISCONTIGMEM NUMA alpha support. | 4 | * DISCONTIGMEM NUMA alpha support. |
5 | * | 5 | * |
6 | * Copyright (C) 2001 Andrea Arcangeli <andrea@suse.de> SuSE | 6 | * Copyright (C) 2001 Andrea Arcangeli <andrea@suse.de> SuSE |
7 | */ | 7 | */ |
8 | 8 | ||
9 | #include <linux/types.h> | 9 | #include <linux/types.h> |
10 | #include <linux/kernel.h> | 10 | #include <linux/kernel.h> |
11 | #include <linux/mm.h> | 11 | #include <linux/mm.h> |
12 | #include <linux/bootmem.h> | 12 | #include <linux/bootmem.h> |
13 | #include <linux/swap.h> | 13 | #include <linux/swap.h> |
14 | #include <linux/initrd.h> | 14 | #include <linux/initrd.h> |
15 | #include <linux/pfn.h> | 15 | #include <linux/pfn.h> |
16 | #include <linux/module.h> | 16 | #include <linux/module.h> |
17 | 17 | ||
18 | #include <asm/hwrpb.h> | 18 | #include <asm/hwrpb.h> |
19 | #include <asm/pgalloc.h> | 19 | #include <asm/pgalloc.h> |
20 | #include <asm/sections.h> | ||
20 | 21 | ||
21 | pg_data_t node_data[MAX_NUMNODES]; | 22 | pg_data_t node_data[MAX_NUMNODES]; |
22 | EXPORT_SYMBOL(node_data); | 23 | EXPORT_SYMBOL(node_data); |
23 | 24 | ||
24 | #undef DEBUG_DISCONTIG | 25 | #undef DEBUG_DISCONTIG |
25 | #ifdef DEBUG_DISCONTIG | 26 | #ifdef DEBUG_DISCONTIG |
26 | #define DBGDCONT(args...) printk(args) | 27 | #define DBGDCONT(args...) printk(args) |
27 | #else | 28 | #else |
28 | #define DBGDCONT(args...) | 29 | #define DBGDCONT(args...) |
29 | #endif | 30 | #endif |
30 | 31 | ||
31 | #define for_each_mem_cluster(memdesc, _cluster, i) \ | 32 | #define for_each_mem_cluster(memdesc, _cluster, i) \ |
32 | for ((_cluster) = (memdesc)->cluster, (i) = 0; \ | 33 | for ((_cluster) = (memdesc)->cluster, (i) = 0; \ |
33 | (i) < (memdesc)->numclusters; (i)++, (_cluster)++) | 34 | (i) < (memdesc)->numclusters; (i)++, (_cluster)++) |
34 | 35 | ||
35 | static void __init show_mem_layout(void) | 36 | static void __init show_mem_layout(void) |
36 | { | 37 | { |
37 | struct memclust_struct * cluster; | 38 | struct memclust_struct * cluster; |
38 | struct memdesc_struct * memdesc; | 39 | struct memdesc_struct * memdesc; |
39 | int i; | 40 | int i; |
40 | 41 | ||
41 | /* Find free clusters, and init and free the bootmem accordingly. */ | 42 | /* Find free clusters, and init and free the bootmem accordingly. */ |
42 | memdesc = (struct memdesc_struct *) | 43 | memdesc = (struct memdesc_struct *) |
43 | (hwrpb->mddt_offset + (unsigned long) hwrpb); | 44 | (hwrpb->mddt_offset + (unsigned long) hwrpb); |
44 | 45 | ||
45 | printk("Raw memory layout:\n"); | 46 | printk("Raw memory layout:\n"); |
46 | for_each_mem_cluster(memdesc, cluster, i) { | 47 | for_each_mem_cluster(memdesc, cluster, i) { |
47 | printk(" memcluster %2d, usage %1lx, start %8lu, end %8lu\n", | 48 | printk(" memcluster %2d, usage %1lx, start %8lu, end %8lu\n", |
48 | i, cluster->usage, cluster->start_pfn, | 49 | i, cluster->usage, cluster->start_pfn, |
49 | cluster->start_pfn + cluster->numpages); | 50 | cluster->start_pfn + cluster->numpages); |
50 | } | 51 | } |
51 | } | 52 | } |
52 | 53 | ||
53 | static void __init | 54 | static void __init |
54 | setup_memory_node(int nid, void *kernel_end) | 55 | setup_memory_node(int nid, void *kernel_end) |
55 | { | 56 | { |
56 | extern unsigned long mem_size_limit; | 57 | extern unsigned long mem_size_limit; |
57 | struct memclust_struct * cluster; | 58 | struct memclust_struct * cluster; |
58 | struct memdesc_struct * memdesc; | 59 | struct memdesc_struct * memdesc; |
59 | unsigned long start_kernel_pfn, end_kernel_pfn; | 60 | unsigned long start_kernel_pfn, end_kernel_pfn; |
60 | unsigned long bootmap_size, bootmap_pages, bootmap_start; | 61 | unsigned long bootmap_size, bootmap_pages, bootmap_start; |
61 | unsigned long start, end; | 62 | unsigned long start, end; |
62 | unsigned long node_pfn_start, node_pfn_end; | 63 | unsigned long node_pfn_start, node_pfn_end; |
63 | unsigned long node_min_pfn, node_max_pfn; | 64 | unsigned long node_min_pfn, node_max_pfn; |
64 | int i; | 65 | int i; |
65 | unsigned long node_datasz = PFN_UP(sizeof(pg_data_t)); | 66 | unsigned long node_datasz = PFN_UP(sizeof(pg_data_t)); |
66 | int show_init = 0; | 67 | int show_init = 0; |
67 | 68 | ||
68 | /* Find the bounds of current node */ | 69 | /* Find the bounds of current node */ |
69 | node_pfn_start = (node_mem_start(nid)) >> PAGE_SHIFT; | 70 | node_pfn_start = (node_mem_start(nid)) >> PAGE_SHIFT; |
70 | node_pfn_end = node_pfn_start + (node_mem_size(nid) >> PAGE_SHIFT); | 71 | node_pfn_end = node_pfn_start + (node_mem_size(nid) >> PAGE_SHIFT); |
71 | 72 | ||
72 | /* Find free clusters, and init and free the bootmem accordingly. */ | 73 | /* Find free clusters, and init and free the bootmem accordingly. */ |
73 | memdesc = (struct memdesc_struct *) | 74 | memdesc = (struct memdesc_struct *) |
74 | (hwrpb->mddt_offset + (unsigned long) hwrpb); | 75 | (hwrpb->mddt_offset + (unsigned long) hwrpb); |
75 | 76 | ||
76 | /* find the bounds of this node (node_min_pfn/node_max_pfn) */ | 77 | /* find the bounds of this node (node_min_pfn/node_max_pfn) */ |
77 | node_min_pfn = ~0UL; | 78 | node_min_pfn = ~0UL; |
78 | node_max_pfn = 0UL; | 79 | node_max_pfn = 0UL; |
79 | for_each_mem_cluster(memdesc, cluster, i) { | 80 | for_each_mem_cluster(memdesc, cluster, i) { |
80 | /* Bit 0 is console/PALcode reserved. Bit 1 is | 81 | /* Bit 0 is console/PALcode reserved. Bit 1 is |
81 | non-volatile memory -- we might want to mark | 82 | non-volatile memory -- we might want to mark |
82 | this for later. */ | 83 | this for later. */ |
83 | if (cluster->usage & 3) | 84 | if (cluster->usage & 3) |
84 | continue; | 85 | continue; |
85 | 86 | ||
86 | start = cluster->start_pfn; | 87 | start = cluster->start_pfn; |
87 | end = start + cluster->numpages; | 88 | end = start + cluster->numpages; |
88 | 89 | ||
89 | if (start >= node_pfn_end || end <= node_pfn_start) | 90 | if (start >= node_pfn_end || end <= node_pfn_start) |
90 | continue; | 91 | continue; |
91 | 92 | ||
92 | if (!show_init) { | 93 | if (!show_init) { |
93 | show_init = 1; | 94 | show_init = 1; |
94 | printk("Initializing bootmem allocator on Node ID %d\n", nid); | 95 | printk("Initializing bootmem allocator on Node ID %d\n", nid); |
95 | } | 96 | } |
96 | printk(" memcluster %2d, usage %1lx, start %8lu, end %8lu\n", | 97 | printk(" memcluster %2d, usage %1lx, start %8lu, end %8lu\n", |
97 | i, cluster->usage, cluster->start_pfn, | 98 | i, cluster->usage, cluster->start_pfn, |
98 | cluster->start_pfn + cluster->numpages); | 99 | cluster->start_pfn + cluster->numpages); |
99 | 100 | ||
100 | if (start < node_pfn_start) | 101 | if (start < node_pfn_start) |
101 | start = node_pfn_start; | 102 | start = node_pfn_start; |
102 | if (end > node_pfn_end) | 103 | if (end > node_pfn_end) |
103 | end = node_pfn_end; | 104 | end = node_pfn_end; |
104 | 105 | ||
105 | if (start < node_min_pfn) | 106 | if (start < node_min_pfn) |
106 | node_min_pfn = start; | 107 | node_min_pfn = start; |
107 | if (end > node_max_pfn) | 108 | if (end > node_max_pfn) |
108 | node_max_pfn = end; | 109 | node_max_pfn = end; |
109 | } | 110 | } |
110 | 111 | ||
111 | if (mem_size_limit && node_max_pfn > mem_size_limit) { | 112 | if (mem_size_limit && node_max_pfn > mem_size_limit) { |
112 | static int msg_shown = 0; | 113 | static int msg_shown = 0; |
113 | if (!msg_shown) { | 114 | if (!msg_shown) { |
114 | msg_shown = 1; | 115 | msg_shown = 1; |
115 | printk("setup: forcing memory size to %ldK (from %ldK).\n", | 116 | printk("setup: forcing memory size to %ldK (from %ldK).\n", |
116 | mem_size_limit << (PAGE_SHIFT - 10), | 117 | mem_size_limit << (PAGE_SHIFT - 10), |
117 | node_max_pfn << (PAGE_SHIFT - 10)); | 118 | node_max_pfn << (PAGE_SHIFT - 10)); |
118 | } | 119 | } |
119 | node_max_pfn = mem_size_limit; | 120 | node_max_pfn = mem_size_limit; |
120 | } | 121 | } |
121 | 122 | ||
122 | if (node_min_pfn >= node_max_pfn) | 123 | if (node_min_pfn >= node_max_pfn) |
123 | return; | 124 | return; |
124 | 125 | ||
125 | /* Update global {min,max}_low_pfn from node information. */ | 126 | /* Update global {min,max}_low_pfn from node information. */ |
126 | if (node_min_pfn < min_low_pfn) | 127 | if (node_min_pfn < min_low_pfn) |
127 | min_low_pfn = node_min_pfn; | 128 | min_low_pfn = node_min_pfn; |
128 | if (node_max_pfn > max_low_pfn) | 129 | if (node_max_pfn > max_low_pfn) |
129 | max_pfn = max_low_pfn = node_max_pfn; | 130 | max_pfn = max_low_pfn = node_max_pfn; |
130 | 131 | ||
131 | num_physpages += node_max_pfn - node_min_pfn; | 132 | num_physpages += node_max_pfn - node_min_pfn; |
132 | 133 | ||
133 | #if 0 /* we'll try this one again in a little while */ | 134 | #if 0 /* we'll try this one again in a little while */ |
134 | /* Cute trick to make sure our local node data is on local memory */ | 135 | /* Cute trick to make sure our local node data is on local memory */ |
135 | node_data[nid] = (pg_data_t *)(__va(node_min_pfn << PAGE_SHIFT)); | 136 | node_data[nid] = (pg_data_t *)(__va(node_min_pfn << PAGE_SHIFT)); |
136 | #endif | 137 | #endif |
137 | /* Quasi-mark the pg_data_t as in-use */ | 138 | /* Quasi-mark the pg_data_t as in-use */ |
138 | node_min_pfn += node_datasz; | 139 | node_min_pfn += node_datasz; |
139 | if (node_min_pfn >= node_max_pfn) { | 140 | if (node_min_pfn >= node_max_pfn) { |
140 | printk(" not enough mem to reserve NODE_DATA"); | 141 | printk(" not enough mem to reserve NODE_DATA"); |
141 | return; | 142 | return; |
142 | } | 143 | } |
143 | NODE_DATA(nid)->bdata = &bootmem_node_data[nid]; | 144 | NODE_DATA(nid)->bdata = &bootmem_node_data[nid]; |
144 | 145 | ||
145 | printk(" Detected node memory: start %8lu, end %8lu\n", | 146 | printk(" Detected node memory: start %8lu, end %8lu\n", |
146 | node_min_pfn, node_max_pfn); | 147 | node_min_pfn, node_max_pfn); |
147 | 148 | ||
148 | DBGDCONT(" DISCONTIG: node_data[%d] is at 0x%p\n", nid, NODE_DATA(nid)); | 149 | DBGDCONT(" DISCONTIG: node_data[%d] is at 0x%p\n", nid, NODE_DATA(nid)); |
149 | DBGDCONT(" DISCONTIG: NODE_DATA(%d)->bdata is at 0x%p\n", nid, NODE_DATA(nid)->bdata); | 150 | DBGDCONT(" DISCONTIG: NODE_DATA(%d)->bdata is at 0x%p\n", nid, NODE_DATA(nid)->bdata); |
150 | 151 | ||
151 | /* Find the bounds of kernel memory. */ | 152 | /* Find the bounds of kernel memory. */ |
152 | start_kernel_pfn = PFN_DOWN(KERNEL_START_PHYS); | 153 | start_kernel_pfn = PFN_DOWN(KERNEL_START_PHYS); |
153 | end_kernel_pfn = PFN_UP(virt_to_phys(kernel_end)); | 154 | end_kernel_pfn = PFN_UP(virt_to_phys(kernel_end)); |
154 | bootmap_start = -1; | 155 | bootmap_start = -1; |
155 | 156 | ||
156 | if (!nid && (node_max_pfn < end_kernel_pfn || node_min_pfn > start_kernel_pfn)) | 157 | if (!nid && (node_max_pfn < end_kernel_pfn || node_min_pfn > start_kernel_pfn)) |
157 | panic("kernel loaded out of ram"); | 158 | panic("kernel loaded out of ram"); |
158 | 159 | ||
159 | /* Zone start phys-addr must be 2^(MAX_ORDER-1) aligned. | 160 | /* Zone start phys-addr must be 2^(MAX_ORDER-1) aligned. |
160 | Note that we round this down, not up - node memory | 161 | Note that we round this down, not up - node memory |
161 | has much larger alignment than 8Mb, so it's safe. */ | 162 | has much larger alignment than 8Mb, so it's safe. */ |
162 | node_min_pfn &= ~((1UL << (MAX_ORDER-1))-1); | 163 | node_min_pfn &= ~((1UL << (MAX_ORDER-1))-1); |
163 | 164 | ||
164 | /* We need to know how many physically contiguous pages | 165 | /* We need to know how many physically contiguous pages |
165 | we'll need for the bootmap. */ | 166 | we'll need for the bootmap. */ |
166 | bootmap_pages = bootmem_bootmap_pages(node_max_pfn-node_min_pfn); | 167 | bootmap_pages = bootmem_bootmap_pages(node_max_pfn-node_min_pfn); |
167 | 168 | ||
168 | /* Now find a good region where to allocate the bootmap. */ | 169 | /* Now find a good region where to allocate the bootmap. */ |
169 | for_each_mem_cluster(memdesc, cluster, i) { | 170 | for_each_mem_cluster(memdesc, cluster, i) { |
170 | if (cluster->usage & 3) | 171 | if (cluster->usage & 3) |
171 | continue; | 172 | continue; |
172 | 173 | ||
173 | start = cluster->start_pfn; | 174 | start = cluster->start_pfn; |
174 | end = start + cluster->numpages; | 175 | end = start + cluster->numpages; |
175 | 176 | ||
176 | if (start >= node_max_pfn || end <= node_min_pfn) | 177 | if (start >= node_max_pfn || end <= node_min_pfn) |
177 | continue; | 178 | continue; |
178 | 179 | ||
179 | if (end > node_max_pfn) | 180 | if (end > node_max_pfn) |
180 | end = node_max_pfn; | 181 | end = node_max_pfn; |
181 | if (start < node_min_pfn) | 182 | if (start < node_min_pfn) |
182 | start = node_min_pfn; | 183 | start = node_min_pfn; |
183 | 184 | ||
184 | if (start < start_kernel_pfn) { | 185 | if (start < start_kernel_pfn) { |
185 | if (end > end_kernel_pfn | 186 | if (end > end_kernel_pfn |
186 | && end - end_kernel_pfn >= bootmap_pages) { | 187 | && end - end_kernel_pfn >= bootmap_pages) { |
187 | bootmap_start = end_kernel_pfn; | 188 | bootmap_start = end_kernel_pfn; |
188 | break; | 189 | break; |
189 | } else if (end > start_kernel_pfn) | 190 | } else if (end > start_kernel_pfn) |
190 | end = start_kernel_pfn; | 191 | end = start_kernel_pfn; |
191 | } else if (start < end_kernel_pfn) | 192 | } else if (start < end_kernel_pfn) |
192 | start = end_kernel_pfn; | 193 | start = end_kernel_pfn; |
193 | if (end - start >= bootmap_pages) { | 194 | if (end - start >= bootmap_pages) { |
194 | bootmap_start = start; | 195 | bootmap_start = start; |
195 | break; | 196 | break; |
196 | } | 197 | } |
197 | } | 198 | } |
198 | 199 | ||
199 | if (bootmap_start == -1) | 200 | if (bootmap_start == -1) |
200 | panic("couldn't find a contiguous place for the bootmap"); | 201 | panic("couldn't find a contiguous place for the bootmap"); |
201 | 202 | ||
202 | /* Allocate the bootmap and mark the whole MM as reserved. */ | 203 | /* Allocate the bootmap and mark the whole MM as reserved. */ |
203 | bootmap_size = init_bootmem_node(NODE_DATA(nid), bootmap_start, | 204 | bootmap_size = init_bootmem_node(NODE_DATA(nid), bootmap_start, |
204 | node_min_pfn, node_max_pfn); | 205 | node_min_pfn, node_max_pfn); |
205 | DBGDCONT(" bootmap_start %lu, bootmap_size %lu, bootmap_pages %lu\n", | 206 | DBGDCONT(" bootmap_start %lu, bootmap_size %lu, bootmap_pages %lu\n", |
206 | bootmap_start, bootmap_size, bootmap_pages); | 207 | bootmap_start, bootmap_size, bootmap_pages); |
207 | 208 | ||
208 | /* Mark the free regions. */ | 209 | /* Mark the free regions. */ |
209 | for_each_mem_cluster(memdesc, cluster, i) { | 210 | for_each_mem_cluster(memdesc, cluster, i) { |
210 | if (cluster->usage & 3) | 211 | if (cluster->usage & 3) |
211 | continue; | 212 | continue; |
212 | 213 | ||
213 | start = cluster->start_pfn; | 214 | start = cluster->start_pfn; |
214 | end = cluster->start_pfn + cluster->numpages; | 215 | end = cluster->start_pfn + cluster->numpages; |
215 | 216 | ||
216 | if (start >= node_max_pfn || end <= node_min_pfn) | 217 | if (start >= node_max_pfn || end <= node_min_pfn) |
217 | continue; | 218 | continue; |
218 | 219 | ||
219 | if (end > node_max_pfn) | 220 | if (end > node_max_pfn) |
220 | end = node_max_pfn; | 221 | end = node_max_pfn; |
221 | if (start < node_min_pfn) | 222 | if (start < node_min_pfn) |
222 | start = node_min_pfn; | 223 | start = node_min_pfn; |
223 | 224 | ||
224 | if (start < start_kernel_pfn) { | 225 | if (start < start_kernel_pfn) { |
225 | if (end > end_kernel_pfn) { | 226 | if (end > end_kernel_pfn) { |
226 | free_bootmem_node(NODE_DATA(nid), PFN_PHYS(start), | 227 | free_bootmem_node(NODE_DATA(nid), PFN_PHYS(start), |
227 | (PFN_PHYS(start_kernel_pfn) | 228 | (PFN_PHYS(start_kernel_pfn) |
228 | - PFN_PHYS(start))); | 229 | - PFN_PHYS(start))); |
229 | printk(" freeing pages %ld:%ld\n", | 230 | printk(" freeing pages %ld:%ld\n", |
230 | start, start_kernel_pfn); | 231 | start, start_kernel_pfn); |
231 | start = end_kernel_pfn; | 232 | start = end_kernel_pfn; |
232 | } else if (end > start_kernel_pfn) | 233 | } else if (end > start_kernel_pfn) |
233 | end = start_kernel_pfn; | 234 | end = start_kernel_pfn; |
234 | } else if (start < end_kernel_pfn) | 235 | } else if (start < end_kernel_pfn) |
235 | start = end_kernel_pfn; | 236 | start = end_kernel_pfn; |
236 | if (start >= end) | 237 | if (start >= end) |
237 | continue; | 238 | continue; |
238 | 239 | ||
239 | free_bootmem_node(NODE_DATA(nid), PFN_PHYS(start), PFN_PHYS(end) - PFN_PHYS(start)); | 240 | free_bootmem_node(NODE_DATA(nid), PFN_PHYS(start), PFN_PHYS(end) - PFN_PHYS(start)); |
240 | printk(" freeing pages %ld:%ld\n", start, end); | 241 | printk(" freeing pages %ld:%ld\n", start, end); |
241 | } | 242 | } |
242 | 243 | ||
243 | /* Reserve the bootmap memory. */ | 244 | /* Reserve the bootmap memory. */ |
244 | reserve_bootmem_node(NODE_DATA(nid), PFN_PHYS(bootmap_start), | 245 | reserve_bootmem_node(NODE_DATA(nid), PFN_PHYS(bootmap_start), |
245 | bootmap_size, BOOTMEM_DEFAULT); | 246 | bootmap_size, BOOTMEM_DEFAULT); |
246 | printk(" reserving pages %ld:%ld\n", bootmap_start, bootmap_start+PFN_UP(bootmap_size)); | 247 | printk(" reserving pages %ld:%ld\n", bootmap_start, bootmap_start+PFN_UP(bootmap_size)); |
247 | 248 | ||
248 | node_set_online(nid); | 249 | node_set_online(nid); |
249 | } | 250 | } |
250 | 251 | ||
251 | void __init | 252 | void __init |
252 | setup_memory(void *kernel_end) | 253 | setup_memory(void *kernel_end) |
253 | { | 254 | { |
254 | int nid; | 255 | int nid; |
255 | 256 | ||
256 | show_mem_layout(); | 257 | show_mem_layout(); |
257 | 258 | ||
258 | nodes_clear(node_online_map); | 259 | nodes_clear(node_online_map); |
259 | 260 | ||
260 | min_low_pfn = ~0UL; | 261 | min_low_pfn = ~0UL; |
261 | max_low_pfn = 0UL; | 262 | max_low_pfn = 0UL; |
262 | for (nid = 0; nid < MAX_NUMNODES; nid++) | 263 | for (nid = 0; nid < MAX_NUMNODES; nid++) |
263 | setup_memory_node(nid, kernel_end); | 264 | setup_memory_node(nid, kernel_end); |
264 | 265 | ||
265 | #ifdef CONFIG_BLK_DEV_INITRD | 266 | #ifdef CONFIG_BLK_DEV_INITRD |
266 | initrd_start = INITRD_START; | 267 | initrd_start = INITRD_START; |
267 | if (initrd_start) { | 268 | if (initrd_start) { |
268 | extern void *move_initrd(unsigned long); | 269 | extern void *move_initrd(unsigned long); |
269 | 270 | ||
270 | initrd_end = initrd_start+INITRD_SIZE; | 271 | initrd_end = initrd_start+INITRD_SIZE; |
271 | printk("Initial ramdisk at: 0x%p (%lu bytes)\n", | 272 | printk("Initial ramdisk at: 0x%p (%lu bytes)\n", |
272 | (void *) initrd_start, INITRD_SIZE); | 273 | (void *) initrd_start, INITRD_SIZE); |
273 | 274 | ||
274 | if ((void *)initrd_end > phys_to_virt(PFN_PHYS(max_low_pfn))) { | 275 | if ((void *)initrd_end > phys_to_virt(PFN_PHYS(max_low_pfn))) { |
275 | if (!move_initrd(PFN_PHYS(max_low_pfn))) | 276 | if (!move_initrd(PFN_PHYS(max_low_pfn))) |
276 | printk("initrd extends beyond end of memory " | 277 | printk("initrd extends beyond end of memory " |
277 | "(0x%08lx > 0x%p)\ndisabling initrd\n", | 278 | "(0x%08lx > 0x%p)\ndisabling initrd\n", |
278 | initrd_end, | 279 | initrd_end, |
279 | phys_to_virt(PFN_PHYS(max_low_pfn))); | 280 | phys_to_virt(PFN_PHYS(max_low_pfn))); |
280 | } else { | 281 | } else { |
281 | nid = kvaddr_to_nid(initrd_start); | 282 | nid = kvaddr_to_nid(initrd_start); |
282 | reserve_bootmem_node(NODE_DATA(nid), | 283 | reserve_bootmem_node(NODE_DATA(nid), |
283 | virt_to_phys((void *)initrd_start), | 284 | virt_to_phys((void *)initrd_start), |
284 | INITRD_SIZE, BOOTMEM_DEFAULT); | 285 | INITRD_SIZE, BOOTMEM_DEFAULT); |
285 | } | 286 | } |
286 | } | 287 | } |
287 | #endif /* CONFIG_BLK_DEV_INITRD */ | 288 | #endif /* CONFIG_BLK_DEV_INITRD */ |
288 | } | 289 | } |
289 | 290 | ||
290 | void __init paging_init(void) | 291 | void __init paging_init(void) |
291 | { | 292 | { |
292 | unsigned int nid; | 293 | unsigned int nid; |
293 | unsigned long zones_size[MAX_NR_ZONES] = {0, }; | 294 | unsigned long zones_size[MAX_NR_ZONES] = {0, }; |
294 | unsigned long dma_local_pfn; | 295 | unsigned long dma_local_pfn; |
295 | 296 | ||
296 | /* | 297 | /* |
297 | * The old global MAX_DMA_ADDRESS per-arch API doesn't fit | 298 | * The old global MAX_DMA_ADDRESS per-arch API doesn't fit |
298 | * in the NUMA model, for now we convert it to a pfn and | 299 | * in the NUMA model, for now we convert it to a pfn and |
299 | * we interpret this pfn as a local per-node information. | 300 | * we interpret this pfn as a local per-node information. |
300 | * This issue isn't very important since none of these machines | 301 | * This issue isn't very important since none of these machines |
301 | * have legacy ISA slots anyways. | 302 | * have legacy ISA slots anyways. |
302 | */ | 303 | */ |
303 | dma_local_pfn = virt_to_phys((char *)MAX_DMA_ADDRESS) >> PAGE_SHIFT; | 304 | dma_local_pfn = virt_to_phys((char *)MAX_DMA_ADDRESS) >> PAGE_SHIFT; |
304 | 305 | ||
305 | for_each_online_node(nid) { | 306 | for_each_online_node(nid) { |
306 | bootmem_data_t *bdata = &bootmem_node_data[nid]; | 307 | bootmem_data_t *bdata = &bootmem_node_data[nid]; |
307 | unsigned long start_pfn = bdata->node_min_pfn; | 308 | unsigned long start_pfn = bdata->node_min_pfn; |
308 | unsigned long end_pfn = bdata->node_low_pfn; | 309 | unsigned long end_pfn = bdata->node_low_pfn; |
309 | 310 | ||
310 | if (dma_local_pfn >= end_pfn - start_pfn) | 311 | if (dma_local_pfn >= end_pfn - start_pfn) |
311 | zones_size[ZONE_DMA] = end_pfn - start_pfn; | 312 | zones_size[ZONE_DMA] = end_pfn - start_pfn; |
312 | else { | 313 | else { |
313 | zones_size[ZONE_DMA] = dma_local_pfn; | 314 | zones_size[ZONE_DMA] = dma_local_pfn; |
314 | zones_size[ZONE_NORMAL] = (end_pfn - start_pfn) - dma_local_pfn; | 315 | zones_size[ZONE_NORMAL] = (end_pfn - start_pfn) - dma_local_pfn; |
315 | } | 316 | } |
316 | node_set_state(nid, N_NORMAL_MEMORY); | 317 | node_set_state(nid, N_NORMAL_MEMORY); |
317 | free_area_init_node(nid, zones_size, start_pfn, NULL); | 318 | free_area_init_node(nid, zones_size, start_pfn, NULL); |
318 | } | 319 | } |
319 | 320 | ||
320 | /* Initialize the kernel's ZERO_PGE. */ | 321 | /* Initialize the kernel's ZERO_PGE. */ |
321 | memset((void *)ZERO_PGE, 0, PAGE_SIZE); | 322 | memset((void *)ZERO_PGE, 0, PAGE_SIZE); |
322 | } | 323 | } |
323 | 324 | ||
324 | void __init mem_init(void) | 325 | void __init mem_init(void) |
325 | { | 326 | { |
326 | unsigned long codesize, reservedpages, datasize, initsize, pfn; | 327 | unsigned long codesize, reservedpages, datasize, initsize, pfn; |
327 | extern int page_is_ram(unsigned long) __init; | 328 | extern int page_is_ram(unsigned long) __init; |
328 | extern char _text, _etext, _data, _edata; | ||
329 | extern char __init_begin, __init_end; | ||
330 | unsigned long nid, i; | 329 | unsigned long nid, i; |
331 | high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT); | 330 | high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT); |
332 | 331 | ||
333 | reservedpages = 0; | 332 | reservedpages = 0; |
334 | for_each_online_node(nid) { | 333 | for_each_online_node(nid) { |
335 | /* | 334 | /* |
336 | * This will free up the bootmem, ie, slot 0 memory | 335 | * This will free up the bootmem, ie, slot 0 memory |
337 | */ | 336 | */ |
338 | totalram_pages += free_all_bootmem_node(NODE_DATA(nid)); | 337 | totalram_pages += free_all_bootmem_node(NODE_DATA(nid)); |
339 | 338 | ||
340 | pfn = NODE_DATA(nid)->node_start_pfn; | 339 | pfn = NODE_DATA(nid)->node_start_pfn; |
341 | for (i = 0; i < node_spanned_pages(nid); i++, pfn++) | 340 | for (i = 0; i < node_spanned_pages(nid); i++, pfn++) |
342 | if (page_is_ram(pfn) && | 341 | if (page_is_ram(pfn) && |
343 | PageReserved(nid_page_nr(nid, i))) | 342 | PageReserved(nid_page_nr(nid, i))) |
344 | reservedpages++; | 343 | reservedpages++; |
345 | } | 344 | } |
346 | 345 | ||
347 | codesize = (unsigned long) &_etext - (unsigned long) &_text; | 346 | codesize = (unsigned long) &_etext - (unsigned long) &_text; |
348 | datasize = (unsigned long) &_edata - (unsigned long) &_data; | 347 | datasize = (unsigned long) &_edata - (unsigned long) &_data; |
349 | initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin; | 348 | initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin; |
350 | 349 | ||
351 | printk("Memory: %luk/%luk available (%luk kernel code, %luk reserved, " | 350 | printk("Memory: %luk/%luk available (%luk kernel code, %luk reserved, " |
352 | "%luk data, %luk init)\n", | 351 | "%luk data, %luk init)\n", |
353 | nr_free_pages() << (PAGE_SHIFT-10), | 352 | nr_free_pages() << (PAGE_SHIFT-10), |
354 | num_physpages << (PAGE_SHIFT-10), | 353 | num_physpages << (PAGE_SHIFT-10), |
355 | codesize >> 10, | 354 | codesize >> 10, |
356 | reservedpages << (PAGE_SHIFT-10), | 355 | reservedpages << (PAGE_SHIFT-10), |
357 | datasize >> 10, | 356 | datasize >> 10, |
358 | initsize >> 10); | 357 | initsize >> 10); |
359 | #if 0 | 358 | #if 0 |
360 | mem_stress(); | 359 | mem_stress(); |
361 | #endif | 360 | #endif |
362 | } | 361 | } |