Commit 911a62d42365076209e2c327e7688db296e35d62
Committed by
Linus Torvalds
1 parent
484b90c4b9
Exists in
master
and in
7 other branches
[PATCH] x86: sutomatically enable bigsmp when we have more than 8 CPUs
i386 generic subarchitecture requires explicit dmi strings or command line to enable bigsmp mode. The patch below removes that restriction, and uses bigsmp as soon as it finds more than 8 logical CPUs, Intel processors and xAPIC support. Signed-off-by: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Showing 8 changed files with 47 additions and 2 deletions Inline Diff
arch/i386/kernel/acpi/boot.c
1 | /* | 1 | /* |
2 | * boot.c - Architecture-Specific Low-Level ACPI Boot Support | 2 | * boot.c - Architecture-Specific Low-Level ACPI Boot Support |
3 | * | 3 | * |
4 | * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com> | 4 | * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com> |
5 | * Copyright (C) 2001 Jun Nakajima <jun.nakajima@intel.com> | 5 | * Copyright (C) 2001 Jun Nakajima <jun.nakajima@intel.com> |
6 | * | 6 | * |
7 | * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ | 7 | * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ |
8 | * | 8 | * |
9 | * This program is free software; you can redistribute it and/or modify | 9 | * This program is free software; you can redistribute it and/or modify |
10 | * it under the terms of the GNU General Public License as published by | 10 | * it under the terms of the GNU General Public License as published by |
11 | * the Free Software Foundation; either version 2 of the License, or | 11 | * the Free Software Foundation; either version 2 of the License, or |
12 | * (at your option) any later version. | 12 | * (at your option) any later version. |
13 | * | 13 | * |
14 | * This program is distributed in the hope that it will be useful, | 14 | * This program is distributed in the hope that it will be useful, |
15 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | 15 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
16 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | 16 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
17 | * GNU General Public License for more details. | 17 | * GNU General Public License for more details. |
18 | * | 18 | * |
19 | * You should have received a copy of the GNU General Public License | 19 | * You should have received a copy of the GNU General Public License |
20 | * along with this program; if not, write to the Free Software | 20 | * along with this program; if not, write to the Free Software |
21 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | 21 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA |
22 | * | 22 | * |
23 | * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ | 23 | * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ |
24 | */ | 24 | */ |
25 | 25 | ||
26 | #include <linux/init.h> | 26 | #include <linux/init.h> |
27 | #include <linux/config.h> | 27 | #include <linux/config.h> |
28 | #include <linux/acpi.h> | 28 | #include <linux/acpi.h> |
29 | #include <linux/efi.h> | 29 | #include <linux/efi.h> |
30 | #include <linux/irq.h> | 30 | #include <linux/irq.h> |
31 | #include <linux/module.h> | 31 | #include <linux/module.h> |
32 | #include <linux/dmi.h> | 32 | #include <linux/dmi.h> |
33 | 33 | ||
34 | #include <asm/pgtable.h> | 34 | #include <asm/pgtable.h> |
35 | #include <asm/io_apic.h> | 35 | #include <asm/io_apic.h> |
36 | #include <asm/apic.h> | 36 | #include <asm/apic.h> |
37 | #include <asm/io.h> | 37 | #include <asm/io.h> |
38 | #include <asm/irq.h> | 38 | #include <asm/irq.h> |
39 | #include <asm/mpspec.h> | 39 | #include <asm/mpspec.h> |
40 | 40 | ||
41 | #ifdef CONFIG_X86_64 | 41 | #ifdef CONFIG_X86_64 |
42 | 42 | ||
43 | static inline void acpi_madt_oem_check(char *oem_id, char *oem_table_id) { } | 43 | static inline void acpi_madt_oem_check(char *oem_id, char *oem_table_id) { } |
44 | extern void __init clustered_apic_check(void); | 44 | extern void __init clustered_apic_check(void); |
45 | static inline int ioapic_setup_disabled(void) { return 0; } | 45 | static inline int ioapic_setup_disabled(void) { return 0; } |
46 | #include <asm/proto.h> | 46 | #include <asm/proto.h> |
47 | 47 | ||
48 | #else /* X86 */ | 48 | #else /* X86 */ |
49 | 49 | ||
50 | #ifdef CONFIG_X86_LOCAL_APIC | 50 | #ifdef CONFIG_X86_LOCAL_APIC |
51 | #include <mach_apic.h> | 51 | #include <mach_apic.h> |
52 | #include <mach_mpparse.h> | 52 | #include <mach_mpparse.h> |
53 | #endif /* CONFIG_X86_LOCAL_APIC */ | 53 | #endif /* CONFIG_X86_LOCAL_APIC */ |
54 | 54 | ||
55 | #endif /* X86 */ | 55 | #endif /* X86 */ |
56 | 56 | ||
57 | #define BAD_MADT_ENTRY(entry, end) ( \ | 57 | #define BAD_MADT_ENTRY(entry, end) ( \ |
58 | (!entry) || (unsigned long)entry + sizeof(*entry) > end || \ | 58 | (!entry) || (unsigned long)entry + sizeof(*entry) > end || \ |
59 | ((acpi_table_entry_header *)entry)->length != sizeof(*entry)) | 59 | ((acpi_table_entry_header *)entry)->length != sizeof(*entry)) |
60 | 60 | ||
61 | #define PREFIX "ACPI: " | 61 | #define PREFIX "ACPI: " |
62 | 62 | ||
63 | #ifdef CONFIG_ACPI_PCI | 63 | #ifdef CONFIG_ACPI_PCI |
64 | int acpi_noirq __initdata; /* skip ACPI IRQ initialization */ | 64 | int acpi_noirq __initdata; /* skip ACPI IRQ initialization */ |
65 | int acpi_pci_disabled __initdata; /* skip ACPI PCI scan and IRQ initialization */ | 65 | int acpi_pci_disabled __initdata; /* skip ACPI PCI scan and IRQ initialization */ |
66 | #else | 66 | #else |
67 | int acpi_noirq __initdata = 1; | 67 | int acpi_noirq __initdata = 1; |
68 | int acpi_pci_disabled __initdata = 1; | 68 | int acpi_pci_disabled __initdata = 1; |
69 | #endif | 69 | #endif |
70 | int acpi_ht __initdata = 1; /* enable HT */ | 70 | int acpi_ht __initdata = 1; /* enable HT */ |
71 | 71 | ||
72 | int acpi_lapic; | 72 | int acpi_lapic; |
73 | int acpi_ioapic; | 73 | int acpi_ioapic; |
74 | int acpi_strict; | 74 | int acpi_strict; |
75 | EXPORT_SYMBOL(acpi_strict); | 75 | EXPORT_SYMBOL(acpi_strict); |
76 | 76 | ||
77 | acpi_interrupt_flags acpi_sci_flags __initdata; | 77 | acpi_interrupt_flags acpi_sci_flags __initdata; |
78 | int acpi_sci_override_gsi __initdata; | 78 | int acpi_sci_override_gsi __initdata; |
79 | int acpi_skip_timer_override __initdata; | 79 | int acpi_skip_timer_override __initdata; |
80 | 80 | ||
81 | #ifdef CONFIG_X86_LOCAL_APIC | 81 | #ifdef CONFIG_X86_LOCAL_APIC |
82 | static u64 acpi_lapic_addr __initdata = APIC_DEFAULT_PHYS_BASE; | 82 | static u64 acpi_lapic_addr __initdata = APIC_DEFAULT_PHYS_BASE; |
83 | #endif | 83 | #endif |
84 | 84 | ||
85 | #ifndef __HAVE_ARCH_CMPXCHG | 85 | #ifndef __HAVE_ARCH_CMPXCHG |
86 | #warning ACPI uses CMPXCHG, i486 and later hardware | 86 | #warning ACPI uses CMPXCHG, i486 and later hardware |
87 | #endif | 87 | #endif |
88 | 88 | ||
89 | #define MAX_MADT_ENTRIES 256 | 89 | #define MAX_MADT_ENTRIES 256 |
90 | u8 x86_acpiid_to_apicid[MAX_MADT_ENTRIES] = | 90 | u8 x86_acpiid_to_apicid[MAX_MADT_ENTRIES] = |
91 | { [0 ... MAX_MADT_ENTRIES-1] = 0xff }; | 91 | { [0 ... MAX_MADT_ENTRIES-1] = 0xff }; |
92 | EXPORT_SYMBOL(x86_acpiid_to_apicid); | 92 | EXPORT_SYMBOL(x86_acpiid_to_apicid); |
93 | 93 | ||
94 | /* -------------------------------------------------------------------------- | 94 | /* -------------------------------------------------------------------------- |
95 | Boot-time Configuration | 95 | Boot-time Configuration |
96 | -------------------------------------------------------------------------- */ | 96 | -------------------------------------------------------------------------- */ |
97 | 97 | ||
98 | /* | 98 | /* |
99 | * The default interrupt routing model is PIC (8259). This gets | 99 | * The default interrupt routing model is PIC (8259). This gets |
100 | * overriden if IOAPICs are enumerated (below). | 100 | * overriden if IOAPICs are enumerated (below). |
101 | */ | 101 | */ |
102 | enum acpi_irq_model_id acpi_irq_model = ACPI_IRQ_MODEL_PIC; | 102 | enum acpi_irq_model_id acpi_irq_model = ACPI_IRQ_MODEL_PIC; |
103 | 103 | ||
104 | #ifdef CONFIG_X86_64 | 104 | #ifdef CONFIG_X86_64 |
105 | 105 | ||
106 | /* rely on all ACPI tables being in the direct mapping */ | 106 | /* rely on all ACPI tables being in the direct mapping */ |
107 | char *__acpi_map_table(unsigned long phys_addr, unsigned long size) | 107 | char *__acpi_map_table(unsigned long phys_addr, unsigned long size) |
108 | { | 108 | { |
109 | if (!phys_addr || !size) | 109 | if (!phys_addr || !size) |
110 | return NULL; | 110 | return NULL; |
111 | 111 | ||
112 | if (phys_addr < (end_pfn_map << PAGE_SHIFT)) | 112 | if (phys_addr < (end_pfn_map << PAGE_SHIFT)) |
113 | return __va(phys_addr); | 113 | return __va(phys_addr); |
114 | 114 | ||
115 | return NULL; | 115 | return NULL; |
116 | } | 116 | } |
117 | 117 | ||
118 | #else | 118 | #else |
119 | 119 | ||
120 | /* | 120 | /* |
121 | * Temporarily use the virtual area starting from FIX_IO_APIC_BASE_END, | 121 | * Temporarily use the virtual area starting from FIX_IO_APIC_BASE_END, |
122 | * to map the target physical address. The problem is that set_fixmap() | 122 | * to map the target physical address. The problem is that set_fixmap() |
123 | * provides a single page, and it is possible that the page is not | 123 | * provides a single page, and it is possible that the page is not |
124 | * sufficient. | 124 | * sufficient. |
125 | * By using this area, we can map up to MAX_IO_APICS pages temporarily, | 125 | * By using this area, we can map up to MAX_IO_APICS pages temporarily, |
126 | * i.e. until the next __va_range() call. | 126 | * i.e. until the next __va_range() call. |
127 | * | 127 | * |
128 | * Important Safety Note: The fixed I/O APIC page numbers are *subtracted* | 128 | * Important Safety Note: The fixed I/O APIC page numbers are *subtracted* |
129 | * from the fixed base. That's why we start at FIX_IO_APIC_BASE_END and | 129 | * from the fixed base. That's why we start at FIX_IO_APIC_BASE_END and |
130 | * count idx down while incrementing the phys address. | 130 | * count idx down while incrementing the phys address. |
131 | */ | 131 | */ |
132 | char *__acpi_map_table(unsigned long phys, unsigned long size) | 132 | char *__acpi_map_table(unsigned long phys, unsigned long size) |
133 | { | 133 | { |
134 | unsigned long base, offset, mapped_size; | 134 | unsigned long base, offset, mapped_size; |
135 | int idx; | 135 | int idx; |
136 | 136 | ||
137 | if (phys + size < 8*1024*1024) | 137 | if (phys + size < 8*1024*1024) |
138 | return __va(phys); | 138 | return __va(phys); |
139 | 139 | ||
140 | offset = phys & (PAGE_SIZE - 1); | 140 | offset = phys & (PAGE_SIZE - 1); |
141 | mapped_size = PAGE_SIZE - offset; | 141 | mapped_size = PAGE_SIZE - offset; |
142 | set_fixmap(FIX_ACPI_END, phys); | 142 | set_fixmap(FIX_ACPI_END, phys); |
143 | base = fix_to_virt(FIX_ACPI_END); | 143 | base = fix_to_virt(FIX_ACPI_END); |
144 | 144 | ||
145 | /* | 145 | /* |
146 | * Most cases can be covered by the below. | 146 | * Most cases can be covered by the below. |
147 | */ | 147 | */ |
148 | idx = FIX_ACPI_END; | 148 | idx = FIX_ACPI_END; |
149 | while (mapped_size < size) { | 149 | while (mapped_size < size) { |
150 | if (--idx < FIX_ACPI_BEGIN) | 150 | if (--idx < FIX_ACPI_BEGIN) |
151 | return NULL; /* cannot handle this */ | 151 | return NULL; /* cannot handle this */ |
152 | phys += PAGE_SIZE; | 152 | phys += PAGE_SIZE; |
153 | set_fixmap(idx, phys); | 153 | set_fixmap(idx, phys); |
154 | mapped_size += PAGE_SIZE; | 154 | mapped_size += PAGE_SIZE; |
155 | } | 155 | } |
156 | 156 | ||
157 | return ((unsigned char *) base + offset); | 157 | return ((unsigned char *) base + offset); |
158 | } | 158 | } |
159 | #endif | 159 | #endif |
160 | 160 | ||
161 | #ifdef CONFIG_PCI_MMCONFIG | 161 | #ifdef CONFIG_PCI_MMCONFIG |
162 | /* The physical address of the MMCONFIG aperture. Set from ACPI tables. */ | 162 | /* The physical address of the MMCONFIG aperture. Set from ACPI tables. */ |
163 | struct acpi_table_mcfg_config *pci_mmcfg_config; | 163 | struct acpi_table_mcfg_config *pci_mmcfg_config; |
164 | int pci_mmcfg_config_num; | 164 | int pci_mmcfg_config_num; |
165 | 165 | ||
166 | int __init acpi_parse_mcfg(unsigned long phys_addr, unsigned long size) | 166 | int __init acpi_parse_mcfg(unsigned long phys_addr, unsigned long size) |
167 | { | 167 | { |
168 | struct acpi_table_mcfg *mcfg; | 168 | struct acpi_table_mcfg *mcfg; |
169 | unsigned long i; | 169 | unsigned long i; |
170 | int config_size; | 170 | int config_size; |
171 | 171 | ||
172 | if (!phys_addr || !size) | 172 | if (!phys_addr || !size) |
173 | return -EINVAL; | 173 | return -EINVAL; |
174 | 174 | ||
175 | mcfg = (struct acpi_table_mcfg *) __acpi_map_table(phys_addr, size); | 175 | mcfg = (struct acpi_table_mcfg *) __acpi_map_table(phys_addr, size); |
176 | if (!mcfg) { | 176 | if (!mcfg) { |
177 | printk(KERN_WARNING PREFIX "Unable to map MCFG\n"); | 177 | printk(KERN_WARNING PREFIX "Unable to map MCFG\n"); |
178 | return -ENODEV; | 178 | return -ENODEV; |
179 | } | 179 | } |
180 | 180 | ||
181 | /* how many config structures do we have */ | 181 | /* how many config structures do we have */ |
182 | pci_mmcfg_config_num = 0; | 182 | pci_mmcfg_config_num = 0; |
183 | i = size - sizeof(struct acpi_table_mcfg); | 183 | i = size - sizeof(struct acpi_table_mcfg); |
184 | while (i >= sizeof(struct acpi_table_mcfg_config)) { | 184 | while (i >= sizeof(struct acpi_table_mcfg_config)) { |
185 | ++pci_mmcfg_config_num; | 185 | ++pci_mmcfg_config_num; |
186 | i -= sizeof(struct acpi_table_mcfg_config); | 186 | i -= sizeof(struct acpi_table_mcfg_config); |
187 | }; | 187 | }; |
188 | if (pci_mmcfg_config_num == 0) { | 188 | if (pci_mmcfg_config_num == 0) { |
189 | printk(KERN_ERR PREFIX "MMCONFIG has no entries\n"); | 189 | printk(KERN_ERR PREFIX "MMCONFIG has no entries\n"); |
190 | return -ENODEV; | 190 | return -ENODEV; |
191 | } | 191 | } |
192 | 192 | ||
193 | config_size = pci_mmcfg_config_num * sizeof(*pci_mmcfg_config); | 193 | config_size = pci_mmcfg_config_num * sizeof(*pci_mmcfg_config); |
194 | pci_mmcfg_config = kmalloc(config_size, GFP_KERNEL); | 194 | pci_mmcfg_config = kmalloc(config_size, GFP_KERNEL); |
195 | if (!pci_mmcfg_config) { | 195 | if (!pci_mmcfg_config) { |
196 | printk(KERN_WARNING PREFIX | 196 | printk(KERN_WARNING PREFIX |
197 | "No memory for MCFG config tables\n"); | 197 | "No memory for MCFG config tables\n"); |
198 | return -ENOMEM; | 198 | return -ENOMEM; |
199 | } | 199 | } |
200 | 200 | ||
201 | memcpy(pci_mmcfg_config, &mcfg->config, config_size); | 201 | memcpy(pci_mmcfg_config, &mcfg->config, config_size); |
202 | for (i = 0; i < pci_mmcfg_config_num; ++i) { | 202 | for (i = 0; i < pci_mmcfg_config_num; ++i) { |
203 | if (mcfg->config[i].base_reserved) { | 203 | if (mcfg->config[i].base_reserved) { |
204 | printk(KERN_ERR PREFIX | 204 | printk(KERN_ERR PREFIX |
205 | "MMCONFIG not in low 4GB of memory\n"); | 205 | "MMCONFIG not in low 4GB of memory\n"); |
206 | return -ENODEV; | 206 | return -ENODEV; |
207 | } | 207 | } |
208 | } | 208 | } |
209 | 209 | ||
210 | return 0; | 210 | return 0; |
211 | } | 211 | } |
212 | #endif /* CONFIG_PCI_MMCONFIG */ | 212 | #endif /* CONFIG_PCI_MMCONFIG */ |
213 | 213 | ||
214 | #ifdef CONFIG_X86_LOCAL_APIC | 214 | #ifdef CONFIG_X86_LOCAL_APIC |
215 | static int __init | 215 | static int __init |
216 | acpi_parse_madt ( | 216 | acpi_parse_madt ( |
217 | unsigned long phys_addr, | 217 | unsigned long phys_addr, |
218 | unsigned long size) | 218 | unsigned long size) |
219 | { | 219 | { |
220 | struct acpi_table_madt *madt = NULL; | 220 | struct acpi_table_madt *madt = NULL; |
221 | 221 | ||
222 | if (!phys_addr || !size) | 222 | if (!phys_addr || !size) |
223 | return -EINVAL; | 223 | return -EINVAL; |
224 | 224 | ||
225 | madt = (struct acpi_table_madt *) __acpi_map_table(phys_addr, size); | 225 | madt = (struct acpi_table_madt *) __acpi_map_table(phys_addr, size); |
226 | if (!madt) { | 226 | if (!madt) { |
227 | printk(KERN_WARNING PREFIX "Unable to map MADT\n"); | 227 | printk(KERN_WARNING PREFIX "Unable to map MADT\n"); |
228 | return -ENODEV; | 228 | return -ENODEV; |
229 | } | 229 | } |
230 | 230 | ||
231 | if (madt->lapic_address) { | 231 | if (madt->lapic_address) { |
232 | acpi_lapic_addr = (u64) madt->lapic_address; | 232 | acpi_lapic_addr = (u64) madt->lapic_address; |
233 | 233 | ||
234 | printk(KERN_DEBUG PREFIX "Local APIC address 0x%08x\n", | 234 | printk(KERN_DEBUG PREFIX "Local APIC address 0x%08x\n", |
235 | madt->lapic_address); | 235 | madt->lapic_address); |
236 | } | 236 | } |
237 | 237 | ||
238 | acpi_madt_oem_check(madt->header.oem_id, madt->header.oem_table_id); | 238 | acpi_madt_oem_check(madt->header.oem_id, madt->header.oem_table_id); |
239 | 239 | ||
240 | return 0; | 240 | return 0; |
241 | } | 241 | } |
242 | 242 | ||
243 | 243 | ||
244 | static int __init | 244 | static int __init |
245 | acpi_parse_lapic ( | 245 | acpi_parse_lapic ( |
246 | acpi_table_entry_header *header, const unsigned long end) | 246 | acpi_table_entry_header *header, const unsigned long end) |
247 | { | 247 | { |
248 | struct acpi_table_lapic *processor = NULL; | 248 | struct acpi_table_lapic *processor = NULL; |
249 | 249 | ||
250 | processor = (struct acpi_table_lapic*) header; | 250 | processor = (struct acpi_table_lapic*) header; |
251 | 251 | ||
252 | if (BAD_MADT_ENTRY(processor, end)) | 252 | if (BAD_MADT_ENTRY(processor, end)) |
253 | return -EINVAL; | 253 | return -EINVAL; |
254 | 254 | ||
255 | acpi_table_print_madt_entry(header); | 255 | acpi_table_print_madt_entry(header); |
256 | 256 | ||
257 | /* no utility in registering a disabled processor */ | 257 | /* no utility in registering a disabled processor */ |
258 | if (processor->flags.enabled == 0) | 258 | if (processor->flags.enabled == 0) |
259 | return 0; | 259 | return 0; |
260 | 260 | ||
261 | x86_acpiid_to_apicid[processor->acpi_id] = processor->id; | 261 | x86_acpiid_to_apicid[processor->acpi_id] = processor->id; |
262 | 262 | ||
263 | mp_register_lapic ( | 263 | mp_register_lapic ( |
264 | processor->id, /* APIC ID */ | 264 | processor->id, /* APIC ID */ |
265 | processor->flags.enabled); /* Enabled? */ | 265 | processor->flags.enabled); /* Enabled? */ |
266 | 266 | ||
267 | return 0; | 267 | return 0; |
268 | } | 268 | } |
269 | 269 | ||
270 | static int __init | 270 | static int __init |
271 | acpi_parse_lapic_addr_ovr ( | 271 | acpi_parse_lapic_addr_ovr ( |
272 | acpi_table_entry_header *header, const unsigned long end) | 272 | acpi_table_entry_header *header, const unsigned long end) |
273 | { | 273 | { |
274 | struct acpi_table_lapic_addr_ovr *lapic_addr_ovr = NULL; | 274 | struct acpi_table_lapic_addr_ovr *lapic_addr_ovr = NULL; |
275 | 275 | ||
276 | lapic_addr_ovr = (struct acpi_table_lapic_addr_ovr*) header; | 276 | lapic_addr_ovr = (struct acpi_table_lapic_addr_ovr*) header; |
277 | 277 | ||
278 | if (BAD_MADT_ENTRY(lapic_addr_ovr, end)) | 278 | if (BAD_MADT_ENTRY(lapic_addr_ovr, end)) |
279 | return -EINVAL; | 279 | return -EINVAL; |
280 | 280 | ||
281 | acpi_lapic_addr = lapic_addr_ovr->address; | 281 | acpi_lapic_addr = lapic_addr_ovr->address; |
282 | 282 | ||
283 | return 0; | 283 | return 0; |
284 | } | 284 | } |
285 | 285 | ||
286 | static int __init | 286 | static int __init |
287 | acpi_parse_lapic_nmi ( | 287 | acpi_parse_lapic_nmi ( |
288 | acpi_table_entry_header *header, const unsigned long end) | 288 | acpi_table_entry_header *header, const unsigned long end) |
289 | { | 289 | { |
290 | struct acpi_table_lapic_nmi *lapic_nmi = NULL; | 290 | struct acpi_table_lapic_nmi *lapic_nmi = NULL; |
291 | 291 | ||
292 | lapic_nmi = (struct acpi_table_lapic_nmi*) header; | 292 | lapic_nmi = (struct acpi_table_lapic_nmi*) header; |
293 | 293 | ||
294 | if (BAD_MADT_ENTRY(lapic_nmi, end)) | 294 | if (BAD_MADT_ENTRY(lapic_nmi, end)) |
295 | return -EINVAL; | 295 | return -EINVAL; |
296 | 296 | ||
297 | acpi_table_print_madt_entry(header); | 297 | acpi_table_print_madt_entry(header); |
298 | 298 | ||
299 | if (lapic_nmi->lint != 1) | 299 | if (lapic_nmi->lint != 1) |
300 | printk(KERN_WARNING PREFIX "NMI not connected to LINT 1!\n"); | 300 | printk(KERN_WARNING PREFIX "NMI not connected to LINT 1!\n"); |
301 | 301 | ||
302 | return 0; | 302 | return 0; |
303 | } | 303 | } |
304 | 304 | ||
305 | 305 | ||
306 | #endif /*CONFIG_X86_LOCAL_APIC*/ | 306 | #endif /*CONFIG_X86_LOCAL_APIC*/ |
307 | 307 | ||
308 | #if defined(CONFIG_X86_IO_APIC) && defined(CONFIG_ACPI_INTERPRETER) | 308 | #if defined(CONFIG_X86_IO_APIC) && defined(CONFIG_ACPI_INTERPRETER) |
309 | 309 | ||
310 | static int __init | 310 | static int __init |
311 | acpi_parse_ioapic ( | 311 | acpi_parse_ioapic ( |
312 | acpi_table_entry_header *header, const unsigned long end) | 312 | acpi_table_entry_header *header, const unsigned long end) |
313 | { | 313 | { |
314 | struct acpi_table_ioapic *ioapic = NULL; | 314 | struct acpi_table_ioapic *ioapic = NULL; |
315 | 315 | ||
316 | ioapic = (struct acpi_table_ioapic*) header; | 316 | ioapic = (struct acpi_table_ioapic*) header; |
317 | 317 | ||
318 | if (BAD_MADT_ENTRY(ioapic, end)) | 318 | if (BAD_MADT_ENTRY(ioapic, end)) |
319 | return -EINVAL; | 319 | return -EINVAL; |
320 | 320 | ||
321 | acpi_table_print_madt_entry(header); | 321 | acpi_table_print_madt_entry(header); |
322 | 322 | ||
323 | mp_register_ioapic ( | 323 | mp_register_ioapic ( |
324 | ioapic->id, | 324 | ioapic->id, |
325 | ioapic->address, | 325 | ioapic->address, |
326 | ioapic->global_irq_base); | 326 | ioapic->global_irq_base); |
327 | 327 | ||
328 | return 0; | 328 | return 0; |
329 | } | 329 | } |
330 | 330 | ||
331 | /* | 331 | /* |
332 | * Parse Interrupt Source Override for the ACPI SCI | 332 | * Parse Interrupt Source Override for the ACPI SCI |
333 | */ | 333 | */ |
334 | static void | 334 | static void |
335 | acpi_sci_ioapic_setup(u32 gsi, u16 polarity, u16 trigger) | 335 | acpi_sci_ioapic_setup(u32 gsi, u16 polarity, u16 trigger) |
336 | { | 336 | { |
337 | if (trigger == 0) /* compatible SCI trigger is level */ | 337 | if (trigger == 0) /* compatible SCI trigger is level */ |
338 | trigger = 3; | 338 | trigger = 3; |
339 | 339 | ||
340 | if (polarity == 0) /* compatible SCI polarity is low */ | 340 | if (polarity == 0) /* compatible SCI polarity is low */ |
341 | polarity = 3; | 341 | polarity = 3; |
342 | 342 | ||
343 | /* Command-line over-ride via acpi_sci= */ | 343 | /* Command-line over-ride via acpi_sci= */ |
344 | if (acpi_sci_flags.trigger) | 344 | if (acpi_sci_flags.trigger) |
345 | trigger = acpi_sci_flags.trigger; | 345 | trigger = acpi_sci_flags.trigger; |
346 | 346 | ||
347 | if (acpi_sci_flags.polarity) | 347 | if (acpi_sci_flags.polarity) |
348 | polarity = acpi_sci_flags.polarity; | 348 | polarity = acpi_sci_flags.polarity; |
349 | 349 | ||
350 | /* | 350 | /* |
351 | * mp_config_acpi_legacy_irqs() already setup IRQs < 16 | 351 | * mp_config_acpi_legacy_irqs() already setup IRQs < 16 |
352 | * If GSI is < 16, this will update its flags, | 352 | * If GSI is < 16, this will update its flags, |
353 | * else it will create a new mp_irqs[] entry. | 353 | * else it will create a new mp_irqs[] entry. |
354 | */ | 354 | */ |
355 | mp_override_legacy_irq(gsi, polarity, trigger, gsi); | 355 | mp_override_legacy_irq(gsi, polarity, trigger, gsi); |
356 | 356 | ||
357 | /* | 357 | /* |
358 | * stash over-ride to indicate we've been here | 358 | * stash over-ride to indicate we've been here |
359 | * and for later update of acpi_fadt | 359 | * and for later update of acpi_fadt |
360 | */ | 360 | */ |
361 | acpi_sci_override_gsi = gsi; | 361 | acpi_sci_override_gsi = gsi; |
362 | return; | 362 | return; |
363 | } | 363 | } |
364 | 364 | ||
365 | static int __init | 365 | static int __init |
366 | acpi_parse_int_src_ovr ( | 366 | acpi_parse_int_src_ovr ( |
367 | acpi_table_entry_header *header, const unsigned long end) | 367 | acpi_table_entry_header *header, const unsigned long end) |
368 | { | 368 | { |
369 | struct acpi_table_int_src_ovr *intsrc = NULL; | 369 | struct acpi_table_int_src_ovr *intsrc = NULL; |
370 | 370 | ||
371 | intsrc = (struct acpi_table_int_src_ovr*) header; | 371 | intsrc = (struct acpi_table_int_src_ovr*) header; |
372 | 372 | ||
373 | if (BAD_MADT_ENTRY(intsrc, end)) | 373 | if (BAD_MADT_ENTRY(intsrc, end)) |
374 | return -EINVAL; | 374 | return -EINVAL; |
375 | 375 | ||
376 | acpi_table_print_madt_entry(header); | 376 | acpi_table_print_madt_entry(header); |
377 | 377 | ||
378 | if (intsrc->bus_irq == acpi_fadt.sci_int) { | 378 | if (intsrc->bus_irq == acpi_fadt.sci_int) { |
379 | acpi_sci_ioapic_setup(intsrc->global_irq, | 379 | acpi_sci_ioapic_setup(intsrc->global_irq, |
380 | intsrc->flags.polarity, intsrc->flags.trigger); | 380 | intsrc->flags.polarity, intsrc->flags.trigger); |
381 | return 0; | 381 | return 0; |
382 | } | 382 | } |
383 | 383 | ||
384 | if (acpi_skip_timer_override && | 384 | if (acpi_skip_timer_override && |
385 | intsrc->bus_irq == 0 && intsrc->global_irq == 2) { | 385 | intsrc->bus_irq == 0 && intsrc->global_irq == 2) { |
386 | printk(PREFIX "BIOS IRQ0 pin2 override ignored.\n"); | 386 | printk(PREFIX "BIOS IRQ0 pin2 override ignored.\n"); |
387 | return 0; | 387 | return 0; |
388 | } | 388 | } |
389 | 389 | ||
390 | mp_override_legacy_irq ( | 390 | mp_override_legacy_irq ( |
391 | intsrc->bus_irq, | 391 | intsrc->bus_irq, |
392 | intsrc->flags.polarity, | 392 | intsrc->flags.polarity, |
393 | intsrc->flags.trigger, | 393 | intsrc->flags.trigger, |
394 | intsrc->global_irq); | 394 | intsrc->global_irq); |
395 | 395 | ||
396 | return 0; | 396 | return 0; |
397 | } | 397 | } |
398 | 398 | ||
399 | 399 | ||
400 | static int __init | 400 | static int __init |
401 | acpi_parse_nmi_src ( | 401 | acpi_parse_nmi_src ( |
402 | acpi_table_entry_header *header, const unsigned long end) | 402 | acpi_table_entry_header *header, const unsigned long end) |
403 | { | 403 | { |
404 | struct acpi_table_nmi_src *nmi_src = NULL; | 404 | struct acpi_table_nmi_src *nmi_src = NULL; |
405 | 405 | ||
406 | nmi_src = (struct acpi_table_nmi_src*) header; | 406 | nmi_src = (struct acpi_table_nmi_src*) header; |
407 | 407 | ||
408 | if (BAD_MADT_ENTRY(nmi_src, end)) | 408 | if (BAD_MADT_ENTRY(nmi_src, end)) |
409 | return -EINVAL; | 409 | return -EINVAL; |
410 | 410 | ||
411 | acpi_table_print_madt_entry(header); | 411 | acpi_table_print_madt_entry(header); |
412 | 412 | ||
413 | /* TBD: Support nimsrc entries? */ | 413 | /* TBD: Support nimsrc entries? */ |
414 | 414 | ||
415 | return 0; | 415 | return 0; |
416 | } | 416 | } |
417 | 417 | ||
418 | #endif /* CONFIG_X86_IO_APIC */ | 418 | #endif /* CONFIG_X86_IO_APIC */ |
419 | 419 | ||
420 | #ifdef CONFIG_ACPI_BUS | 420 | #ifdef CONFIG_ACPI_BUS |
421 | 421 | ||
422 | /* | 422 | /* |
423 | * acpi_pic_sci_set_trigger() | 423 | * acpi_pic_sci_set_trigger() |
424 | * | 424 | * |
425 | * use ELCR to set PIC-mode trigger type for SCI | 425 | * use ELCR to set PIC-mode trigger type for SCI |
426 | * | 426 | * |
427 | * If a PIC-mode SCI is not recognized or gives spurious IRQ7's | 427 | * If a PIC-mode SCI is not recognized or gives spurious IRQ7's |
428 | * it may require Edge Trigger -- use "acpi_sci=edge" | 428 | * it may require Edge Trigger -- use "acpi_sci=edge" |
429 | * | 429 | * |
430 | * Port 0x4d0-4d1 are ECLR1 and ECLR2, the Edge/Level Control Registers | 430 | * Port 0x4d0-4d1 are ECLR1 and ECLR2, the Edge/Level Control Registers |
431 | * for the 8259 PIC. bit[n] = 1 means irq[n] is Level, otherwise Edge. | 431 | * for the 8259 PIC. bit[n] = 1 means irq[n] is Level, otherwise Edge. |
432 | * ECLR1 is IRQ's 0-7 (IRQ 0, 1, 2 must be 0) | 432 | * ECLR1 is IRQ's 0-7 (IRQ 0, 1, 2 must be 0) |
433 | * ECLR2 is IRQ's 8-15 (IRQ 8, 13 must be 0) | 433 | * ECLR2 is IRQ's 8-15 (IRQ 8, 13 must be 0) |
434 | */ | 434 | */ |
435 | 435 | ||
436 | void __init | 436 | void __init |
437 | acpi_pic_sci_set_trigger(unsigned int irq, u16 trigger) | 437 | acpi_pic_sci_set_trigger(unsigned int irq, u16 trigger) |
438 | { | 438 | { |
439 | unsigned int mask = 1 << irq; | 439 | unsigned int mask = 1 << irq; |
440 | unsigned int old, new; | 440 | unsigned int old, new; |
441 | 441 | ||
442 | /* Real old ELCR mask */ | 442 | /* Real old ELCR mask */ |
443 | old = inb(0x4d0) | (inb(0x4d1) << 8); | 443 | old = inb(0x4d0) | (inb(0x4d1) << 8); |
444 | 444 | ||
445 | /* | 445 | /* |
446 | * If we use ACPI to set PCI irq's, then we should clear ELCR | 446 | * If we use ACPI to set PCI irq's, then we should clear ELCR |
447 | * since we will set it correctly as we enable the PCI irq | 447 | * since we will set it correctly as we enable the PCI irq |
448 | * routing. | 448 | * routing. |
449 | */ | 449 | */ |
450 | new = acpi_noirq ? old : 0; | 450 | new = acpi_noirq ? old : 0; |
451 | 451 | ||
452 | /* | 452 | /* |
453 | * Update SCI information in the ELCR, it isn't in the PCI | 453 | * Update SCI information in the ELCR, it isn't in the PCI |
454 | * routing tables.. | 454 | * routing tables.. |
455 | */ | 455 | */ |
456 | switch (trigger) { | 456 | switch (trigger) { |
457 | case 1: /* Edge - clear */ | 457 | case 1: /* Edge - clear */ |
458 | new &= ~mask; | 458 | new &= ~mask; |
459 | break; | 459 | break; |
460 | case 3: /* Level - set */ | 460 | case 3: /* Level - set */ |
461 | new |= mask; | 461 | new |= mask; |
462 | break; | 462 | break; |
463 | } | 463 | } |
464 | 464 | ||
465 | if (old == new) | 465 | if (old == new) |
466 | return; | 466 | return; |
467 | 467 | ||
468 | printk(PREFIX "setting ELCR to %04x (from %04x)\n", new, old); | 468 | printk(PREFIX "setting ELCR to %04x (from %04x)\n", new, old); |
469 | outb(new, 0x4d0); | 469 | outb(new, 0x4d0); |
470 | outb(new >> 8, 0x4d1); | 470 | outb(new >> 8, 0x4d1); |
471 | } | 471 | } |
472 | 472 | ||
473 | 473 | ||
474 | #endif /* CONFIG_ACPI_BUS */ | 474 | #endif /* CONFIG_ACPI_BUS */ |
475 | 475 | ||
476 | int acpi_gsi_to_irq(u32 gsi, unsigned int *irq) | 476 | int acpi_gsi_to_irq(u32 gsi, unsigned int *irq) |
477 | { | 477 | { |
478 | #ifdef CONFIG_X86_IO_APIC | 478 | #ifdef CONFIG_X86_IO_APIC |
479 | if (use_pci_vector() && !platform_legacy_irq(gsi)) | 479 | if (use_pci_vector() && !platform_legacy_irq(gsi)) |
480 | *irq = IO_APIC_VECTOR(gsi); | 480 | *irq = IO_APIC_VECTOR(gsi); |
481 | else | 481 | else |
482 | #endif | 482 | #endif |
483 | *irq = gsi; | 483 | *irq = gsi; |
484 | return 0; | 484 | return 0; |
485 | } | 485 | } |
486 | 486 | ||
487 | unsigned int acpi_register_gsi(u32 gsi, int edge_level, int active_high_low) | 487 | unsigned int acpi_register_gsi(u32 gsi, int edge_level, int active_high_low) |
488 | { | 488 | { |
489 | unsigned int irq; | 489 | unsigned int irq; |
490 | unsigned int plat_gsi = gsi; | 490 | unsigned int plat_gsi = gsi; |
491 | 491 | ||
492 | #ifdef CONFIG_PCI | 492 | #ifdef CONFIG_PCI |
493 | /* | 493 | /* |
494 | * Make sure all (legacy) PCI IRQs are set as level-triggered. | 494 | * Make sure all (legacy) PCI IRQs are set as level-triggered. |
495 | */ | 495 | */ |
496 | if (acpi_irq_model == ACPI_IRQ_MODEL_PIC) { | 496 | if (acpi_irq_model == ACPI_IRQ_MODEL_PIC) { |
497 | extern void eisa_set_level_irq(unsigned int irq); | 497 | extern void eisa_set_level_irq(unsigned int irq); |
498 | 498 | ||
499 | if (edge_level == ACPI_LEVEL_SENSITIVE) | 499 | if (edge_level == ACPI_LEVEL_SENSITIVE) |
500 | eisa_set_level_irq(gsi); | 500 | eisa_set_level_irq(gsi); |
501 | } | 501 | } |
502 | #endif | 502 | #endif |
503 | 503 | ||
504 | #ifdef CONFIG_X86_IO_APIC | 504 | #ifdef CONFIG_X86_IO_APIC |
505 | if (acpi_irq_model == ACPI_IRQ_MODEL_IOAPIC) { | 505 | if (acpi_irq_model == ACPI_IRQ_MODEL_IOAPIC) { |
506 | plat_gsi = mp_register_gsi(gsi, edge_level, active_high_low); | 506 | plat_gsi = mp_register_gsi(gsi, edge_level, active_high_low); |
507 | } | 507 | } |
508 | #endif | 508 | #endif |
509 | acpi_gsi_to_irq(plat_gsi, &irq); | 509 | acpi_gsi_to_irq(plat_gsi, &irq); |
510 | return irq; | 510 | return irq; |
511 | } | 511 | } |
512 | EXPORT_SYMBOL(acpi_register_gsi); | 512 | EXPORT_SYMBOL(acpi_register_gsi); |
513 | 513 | ||
514 | /* | 514 | /* |
515 | * ACPI based hotplug support for CPU | 515 | * ACPI based hotplug support for CPU |
516 | */ | 516 | */ |
517 | #ifdef CONFIG_ACPI_HOTPLUG_CPU | 517 | #ifdef CONFIG_ACPI_HOTPLUG_CPU |
518 | int | 518 | int |
519 | acpi_map_lsapic(acpi_handle handle, int *pcpu) | 519 | acpi_map_lsapic(acpi_handle handle, int *pcpu) |
520 | { | 520 | { |
521 | /* TBD */ | 521 | /* TBD */ |
522 | return -EINVAL; | 522 | return -EINVAL; |
523 | } | 523 | } |
524 | EXPORT_SYMBOL(acpi_map_lsapic); | 524 | EXPORT_SYMBOL(acpi_map_lsapic); |
525 | 525 | ||
526 | 526 | ||
527 | int | 527 | int |
528 | acpi_unmap_lsapic(int cpu) | 528 | acpi_unmap_lsapic(int cpu) |
529 | { | 529 | { |
530 | /* TBD */ | 530 | /* TBD */ |
531 | return -EINVAL; | 531 | return -EINVAL; |
532 | } | 532 | } |
533 | EXPORT_SYMBOL(acpi_unmap_lsapic); | 533 | EXPORT_SYMBOL(acpi_unmap_lsapic); |
534 | #endif /* CONFIG_ACPI_HOTPLUG_CPU */ | 534 | #endif /* CONFIG_ACPI_HOTPLUG_CPU */ |
535 | 535 | ||
536 | int | 536 | int |
537 | acpi_register_ioapic(acpi_handle handle, u64 phys_addr, u32 gsi_base) | 537 | acpi_register_ioapic(acpi_handle handle, u64 phys_addr, u32 gsi_base) |
538 | { | 538 | { |
539 | /* TBD */ | 539 | /* TBD */ |
540 | return -EINVAL; | 540 | return -EINVAL; |
541 | } | 541 | } |
542 | EXPORT_SYMBOL(acpi_register_ioapic); | 542 | EXPORT_SYMBOL(acpi_register_ioapic); |
543 | 543 | ||
544 | int | 544 | int |
545 | acpi_unregister_ioapic(acpi_handle handle, u32 gsi_base) | 545 | acpi_unregister_ioapic(acpi_handle handle, u32 gsi_base) |
546 | { | 546 | { |
547 | /* TBD */ | 547 | /* TBD */ |
548 | return -EINVAL; | 548 | return -EINVAL; |
549 | } | 549 | } |
550 | EXPORT_SYMBOL(acpi_unregister_ioapic); | 550 | EXPORT_SYMBOL(acpi_unregister_ioapic); |
551 | 551 | ||
552 | static unsigned long __init | 552 | static unsigned long __init |
553 | acpi_scan_rsdp ( | 553 | acpi_scan_rsdp ( |
554 | unsigned long start, | 554 | unsigned long start, |
555 | unsigned long length) | 555 | unsigned long length) |
556 | { | 556 | { |
557 | unsigned long offset = 0; | 557 | unsigned long offset = 0; |
558 | unsigned long sig_len = sizeof("RSD PTR ") - 1; | 558 | unsigned long sig_len = sizeof("RSD PTR ") - 1; |
559 | 559 | ||
560 | /* | 560 | /* |
561 | * Scan all 16-byte boundaries of the physical memory region for the | 561 | * Scan all 16-byte boundaries of the physical memory region for the |
562 | * RSDP signature. | 562 | * RSDP signature. |
563 | */ | 563 | */ |
564 | for (offset = 0; offset < length; offset += 16) { | 564 | for (offset = 0; offset < length; offset += 16) { |
565 | if (strncmp((char *) (start + offset), "RSD PTR ", sig_len)) | 565 | if (strncmp((char *) (start + offset), "RSD PTR ", sig_len)) |
566 | continue; | 566 | continue; |
567 | return (start + offset); | 567 | return (start + offset); |
568 | } | 568 | } |
569 | 569 | ||
570 | return 0; | 570 | return 0; |
571 | } | 571 | } |
572 | 572 | ||
573 | static int __init acpi_parse_sbf(unsigned long phys_addr, unsigned long size) | 573 | static int __init acpi_parse_sbf(unsigned long phys_addr, unsigned long size) |
574 | { | 574 | { |
575 | struct acpi_table_sbf *sb; | 575 | struct acpi_table_sbf *sb; |
576 | 576 | ||
577 | if (!phys_addr || !size) | 577 | if (!phys_addr || !size) |
578 | return -EINVAL; | 578 | return -EINVAL; |
579 | 579 | ||
580 | sb = (struct acpi_table_sbf *) __acpi_map_table(phys_addr, size); | 580 | sb = (struct acpi_table_sbf *) __acpi_map_table(phys_addr, size); |
581 | if (!sb) { | 581 | if (!sb) { |
582 | printk(KERN_WARNING PREFIX "Unable to map SBF\n"); | 582 | printk(KERN_WARNING PREFIX "Unable to map SBF\n"); |
583 | return -ENODEV; | 583 | return -ENODEV; |
584 | } | 584 | } |
585 | 585 | ||
586 | sbf_port = sb->sbf_cmos; /* Save CMOS port */ | 586 | sbf_port = sb->sbf_cmos; /* Save CMOS port */ |
587 | 587 | ||
588 | return 0; | 588 | return 0; |
589 | } | 589 | } |
590 | 590 | ||
591 | 591 | ||
592 | #ifdef CONFIG_HPET_TIMER | 592 | #ifdef CONFIG_HPET_TIMER |
593 | 593 | ||
594 | static int __init acpi_parse_hpet(unsigned long phys, unsigned long size) | 594 | static int __init acpi_parse_hpet(unsigned long phys, unsigned long size) |
595 | { | 595 | { |
596 | struct acpi_table_hpet *hpet_tbl; | 596 | struct acpi_table_hpet *hpet_tbl; |
597 | 597 | ||
598 | if (!phys || !size) | 598 | if (!phys || !size) |
599 | return -EINVAL; | 599 | return -EINVAL; |
600 | 600 | ||
601 | hpet_tbl = (struct acpi_table_hpet *) __acpi_map_table(phys, size); | 601 | hpet_tbl = (struct acpi_table_hpet *) __acpi_map_table(phys, size); |
602 | if (!hpet_tbl) { | 602 | if (!hpet_tbl) { |
603 | printk(KERN_WARNING PREFIX "Unable to map HPET\n"); | 603 | printk(KERN_WARNING PREFIX "Unable to map HPET\n"); |
604 | return -ENODEV; | 604 | return -ENODEV; |
605 | } | 605 | } |
606 | 606 | ||
607 | if (hpet_tbl->addr.space_id != ACPI_SPACE_MEM) { | 607 | if (hpet_tbl->addr.space_id != ACPI_SPACE_MEM) { |
608 | printk(KERN_WARNING PREFIX "HPET timers must be located in " | 608 | printk(KERN_WARNING PREFIX "HPET timers must be located in " |
609 | "memory.\n"); | 609 | "memory.\n"); |
610 | return -1; | 610 | return -1; |
611 | } | 611 | } |
612 | 612 | ||
613 | #ifdef CONFIG_X86_64 | 613 | #ifdef CONFIG_X86_64 |
614 | vxtime.hpet_address = hpet_tbl->addr.addrl | | 614 | vxtime.hpet_address = hpet_tbl->addr.addrl | |
615 | ((long) hpet_tbl->addr.addrh << 32); | 615 | ((long) hpet_tbl->addr.addrh << 32); |
616 | 616 | ||
617 | printk(KERN_INFO PREFIX "HPET id: %#x base: %#lx\n", | 617 | printk(KERN_INFO PREFIX "HPET id: %#x base: %#lx\n", |
618 | hpet_tbl->id, vxtime.hpet_address); | 618 | hpet_tbl->id, vxtime.hpet_address); |
619 | #else /* X86 */ | 619 | #else /* X86 */ |
620 | { | 620 | { |
621 | extern unsigned long hpet_address; | 621 | extern unsigned long hpet_address; |
622 | 622 | ||
623 | hpet_address = hpet_tbl->addr.addrl; | 623 | hpet_address = hpet_tbl->addr.addrl; |
624 | printk(KERN_INFO PREFIX "HPET id: %#x base: %#lx\n", | 624 | printk(KERN_INFO PREFIX "HPET id: %#x base: %#lx\n", |
625 | hpet_tbl->id, hpet_address); | 625 | hpet_tbl->id, hpet_address); |
626 | } | 626 | } |
627 | #endif /* X86 */ | 627 | #endif /* X86 */ |
628 | 628 | ||
629 | return 0; | 629 | return 0; |
630 | } | 630 | } |
631 | #else | 631 | #else |
632 | #define acpi_parse_hpet NULL | 632 | #define acpi_parse_hpet NULL |
633 | #endif | 633 | #endif |
634 | 634 | ||
635 | #ifdef CONFIG_X86_PM_TIMER | 635 | #ifdef CONFIG_X86_PM_TIMER |
636 | extern u32 pmtmr_ioport; | 636 | extern u32 pmtmr_ioport; |
637 | #endif | 637 | #endif |
638 | 638 | ||
639 | static int __init acpi_parse_fadt(unsigned long phys, unsigned long size) | 639 | static int __init acpi_parse_fadt(unsigned long phys, unsigned long size) |
640 | { | 640 | { |
641 | struct fadt_descriptor_rev2 *fadt = NULL; | 641 | struct fadt_descriptor_rev2 *fadt = NULL; |
642 | 642 | ||
643 | fadt = (struct fadt_descriptor_rev2*) __acpi_map_table(phys,size); | 643 | fadt = (struct fadt_descriptor_rev2*) __acpi_map_table(phys,size); |
644 | if(!fadt) { | 644 | if(!fadt) { |
645 | printk(KERN_WARNING PREFIX "Unable to map FADT\n"); | 645 | printk(KERN_WARNING PREFIX "Unable to map FADT\n"); |
646 | return 0; | 646 | return 0; |
647 | } | 647 | } |
648 | 648 | ||
649 | #ifdef CONFIG_ACPI_INTERPRETER | 649 | #ifdef CONFIG_ACPI_INTERPRETER |
650 | /* initialize sci_int early for INT_SRC_OVR MADT parsing */ | 650 | /* initialize sci_int early for INT_SRC_OVR MADT parsing */ |
651 | acpi_fadt.sci_int = fadt->sci_int; | 651 | acpi_fadt.sci_int = fadt->sci_int; |
652 | #endif | 652 | #endif |
653 | 653 | ||
654 | #ifdef CONFIG_ACPI_BUS | 654 | #ifdef CONFIG_ACPI_BUS |
655 | /* initialize rev and apic_phys_dest_mode for x86_64 genapic */ | 655 | /* initialize rev and apic_phys_dest_mode for x86_64 genapic */ |
656 | acpi_fadt.revision = fadt->revision; | 656 | acpi_fadt.revision = fadt->revision; |
657 | acpi_fadt.force_apic_physical_destination_mode = fadt->force_apic_physical_destination_mode; | 657 | acpi_fadt.force_apic_physical_destination_mode = fadt->force_apic_physical_destination_mode; |
658 | #endif | 658 | #endif |
659 | 659 | ||
660 | #ifdef CONFIG_X86_PM_TIMER | 660 | #ifdef CONFIG_X86_PM_TIMER |
661 | /* detect the location of the ACPI PM Timer */ | 661 | /* detect the location of the ACPI PM Timer */ |
662 | if (fadt->revision >= FADT2_REVISION_ID) { | 662 | if (fadt->revision >= FADT2_REVISION_ID) { |
663 | /* FADT rev. 2 */ | 663 | /* FADT rev. 2 */ |
664 | if (fadt->xpm_tmr_blk.address_space_id != ACPI_ADR_SPACE_SYSTEM_IO) | 664 | if (fadt->xpm_tmr_blk.address_space_id != ACPI_ADR_SPACE_SYSTEM_IO) |
665 | return 0; | 665 | return 0; |
666 | 666 | ||
667 | pmtmr_ioport = fadt->xpm_tmr_blk.address; | 667 | pmtmr_ioport = fadt->xpm_tmr_blk.address; |
668 | } else { | 668 | } else { |
669 | /* FADT rev. 1 */ | 669 | /* FADT rev. 1 */ |
670 | pmtmr_ioport = fadt->V1_pm_tmr_blk; | 670 | pmtmr_ioport = fadt->V1_pm_tmr_blk; |
671 | } | 671 | } |
672 | if (pmtmr_ioport) | 672 | if (pmtmr_ioport) |
673 | printk(KERN_INFO PREFIX "PM-Timer IO Port: %#x\n", pmtmr_ioport); | 673 | printk(KERN_INFO PREFIX "PM-Timer IO Port: %#x\n", pmtmr_ioport); |
674 | #endif | 674 | #endif |
675 | return 0; | 675 | return 0; |
676 | } | 676 | } |
677 | 677 | ||
678 | 678 | ||
679 | unsigned long __init | 679 | unsigned long __init |
680 | acpi_find_rsdp (void) | 680 | acpi_find_rsdp (void) |
681 | { | 681 | { |
682 | unsigned long rsdp_phys = 0; | 682 | unsigned long rsdp_phys = 0; |
683 | 683 | ||
684 | if (efi_enabled) { | 684 | if (efi_enabled) { |
685 | if (efi.acpi20) | 685 | if (efi.acpi20) |
686 | return __pa(efi.acpi20); | 686 | return __pa(efi.acpi20); |
687 | else if (efi.acpi) | 687 | else if (efi.acpi) |
688 | return __pa(efi.acpi); | 688 | return __pa(efi.acpi); |
689 | } | 689 | } |
690 | /* | 690 | /* |
691 | * Scan memory looking for the RSDP signature. First search EBDA (low | 691 | * Scan memory looking for the RSDP signature. First search EBDA (low |
692 | * memory) paragraphs and then search upper memory (E0000-FFFFF). | 692 | * memory) paragraphs and then search upper memory (E0000-FFFFF). |
693 | */ | 693 | */ |
694 | rsdp_phys = acpi_scan_rsdp (0, 0x400); | 694 | rsdp_phys = acpi_scan_rsdp (0, 0x400); |
695 | if (!rsdp_phys) | 695 | if (!rsdp_phys) |
696 | rsdp_phys = acpi_scan_rsdp (0xE0000, 0x20000); | 696 | rsdp_phys = acpi_scan_rsdp (0xE0000, 0x20000); |
697 | 697 | ||
698 | return rsdp_phys; | 698 | return rsdp_phys; |
699 | } | 699 | } |
700 | 700 | ||
701 | #ifdef CONFIG_X86_LOCAL_APIC | 701 | #ifdef CONFIG_X86_LOCAL_APIC |
702 | /* | 702 | /* |
703 | * Parse LAPIC entries in MADT | 703 | * Parse LAPIC entries in MADT |
704 | * returns 0 on success, < 0 on error | 704 | * returns 0 on success, < 0 on error |
705 | */ | 705 | */ |
706 | static int __init | 706 | static int __init |
707 | acpi_parse_madt_lapic_entries(void) | 707 | acpi_parse_madt_lapic_entries(void) |
708 | { | 708 | { |
709 | int count; | 709 | int count; |
710 | 710 | ||
711 | /* | 711 | /* |
712 | * Note that the LAPIC address is obtained from the MADT (32-bit value) | 712 | * Note that the LAPIC address is obtained from the MADT (32-bit value) |
713 | * and (optionally) overriden by a LAPIC_ADDR_OVR entry (64-bit value). | 713 | * and (optionally) overriden by a LAPIC_ADDR_OVR entry (64-bit value). |
714 | */ | 714 | */ |
715 | 715 | ||
716 | count = acpi_table_parse_madt(ACPI_MADT_LAPIC_ADDR_OVR, acpi_parse_lapic_addr_ovr, 0); | 716 | count = acpi_table_parse_madt(ACPI_MADT_LAPIC_ADDR_OVR, acpi_parse_lapic_addr_ovr, 0); |
717 | if (count < 0) { | 717 | if (count < 0) { |
718 | printk(KERN_ERR PREFIX "Error parsing LAPIC address override entry\n"); | 718 | printk(KERN_ERR PREFIX "Error parsing LAPIC address override entry\n"); |
719 | return count; | 719 | return count; |
720 | } | 720 | } |
721 | 721 | ||
722 | mp_register_lapic_address(acpi_lapic_addr); | 722 | mp_register_lapic_address(acpi_lapic_addr); |
723 | 723 | ||
724 | count = acpi_table_parse_madt(ACPI_MADT_LAPIC, acpi_parse_lapic, | 724 | count = acpi_table_parse_madt(ACPI_MADT_LAPIC, acpi_parse_lapic, |
725 | MAX_APICS); | 725 | MAX_APICS); |
726 | if (!count) { | 726 | if (!count) { |
727 | printk(KERN_ERR PREFIX "No LAPIC entries present\n"); | 727 | printk(KERN_ERR PREFIX "No LAPIC entries present\n"); |
728 | /* TBD: Cleanup to allow fallback to MPS */ | 728 | /* TBD: Cleanup to allow fallback to MPS */ |
729 | return -ENODEV; | 729 | return -ENODEV; |
730 | } | 730 | } |
731 | else if (count < 0) { | 731 | else if (count < 0) { |
732 | printk(KERN_ERR PREFIX "Error parsing LAPIC entry\n"); | 732 | printk(KERN_ERR PREFIX "Error parsing LAPIC entry\n"); |
733 | /* TBD: Cleanup to allow fallback to MPS */ | 733 | /* TBD: Cleanup to allow fallback to MPS */ |
734 | return count; | 734 | return count; |
735 | } | 735 | } |
736 | 736 | ||
737 | count = acpi_table_parse_madt(ACPI_MADT_LAPIC_NMI, acpi_parse_lapic_nmi, 0); | 737 | count = acpi_table_parse_madt(ACPI_MADT_LAPIC_NMI, acpi_parse_lapic_nmi, 0); |
738 | if (count < 0) { | 738 | if (count < 0) { |
739 | printk(KERN_ERR PREFIX "Error parsing LAPIC NMI entry\n"); | 739 | printk(KERN_ERR PREFIX "Error parsing LAPIC NMI entry\n"); |
740 | /* TBD: Cleanup to allow fallback to MPS */ | 740 | /* TBD: Cleanup to allow fallback to MPS */ |
741 | return count; | 741 | return count; |
742 | } | 742 | } |
743 | return 0; | 743 | return 0; |
744 | } | 744 | } |
745 | #endif /* CONFIG_X86_LOCAL_APIC */ | 745 | #endif /* CONFIG_X86_LOCAL_APIC */ |
746 | 746 | ||
747 | #if defined(CONFIG_X86_IO_APIC) && defined(CONFIG_ACPI_INTERPRETER) | 747 | #if defined(CONFIG_X86_IO_APIC) && defined(CONFIG_ACPI_INTERPRETER) |
748 | /* | 748 | /* |
749 | * Parse IOAPIC related entries in MADT | 749 | * Parse IOAPIC related entries in MADT |
750 | * returns 0 on success, < 0 on error | 750 | * returns 0 on success, < 0 on error |
751 | */ | 751 | */ |
752 | static int __init | 752 | static int __init |
753 | acpi_parse_madt_ioapic_entries(void) | 753 | acpi_parse_madt_ioapic_entries(void) |
754 | { | 754 | { |
755 | int count; | 755 | int count; |
756 | 756 | ||
757 | /* | 757 | /* |
758 | * ACPI interpreter is required to complete interrupt setup, | 758 | * ACPI interpreter is required to complete interrupt setup, |
759 | * so if it is off, don't enumerate the io-apics with ACPI. | 759 | * so if it is off, don't enumerate the io-apics with ACPI. |
760 | * If MPS is present, it will handle them, | 760 | * If MPS is present, it will handle them, |
761 | * otherwise the system will stay in PIC mode | 761 | * otherwise the system will stay in PIC mode |
762 | */ | 762 | */ |
763 | if (acpi_disabled || acpi_noirq) { | 763 | if (acpi_disabled || acpi_noirq) { |
764 | return -ENODEV; | 764 | return -ENODEV; |
765 | } | 765 | } |
766 | 766 | ||
767 | /* | 767 | /* |
768 | * if "noapic" boot option, don't look for IO-APICs | 768 | * if "noapic" boot option, don't look for IO-APICs |
769 | */ | 769 | */ |
770 | if (skip_ioapic_setup) { | 770 | if (skip_ioapic_setup) { |
771 | printk(KERN_INFO PREFIX "Skipping IOAPIC probe " | 771 | printk(KERN_INFO PREFIX "Skipping IOAPIC probe " |
772 | "due to 'noapic' option.\n"); | 772 | "due to 'noapic' option.\n"); |
773 | return -ENODEV; | 773 | return -ENODEV; |
774 | } | 774 | } |
775 | 775 | ||
776 | count = acpi_table_parse_madt(ACPI_MADT_IOAPIC, acpi_parse_ioapic, MAX_IO_APICS); | 776 | count = acpi_table_parse_madt(ACPI_MADT_IOAPIC, acpi_parse_ioapic, MAX_IO_APICS); |
777 | if (!count) { | 777 | if (!count) { |
778 | printk(KERN_ERR PREFIX "No IOAPIC entries present\n"); | 778 | printk(KERN_ERR PREFIX "No IOAPIC entries present\n"); |
779 | return -ENODEV; | 779 | return -ENODEV; |
780 | } | 780 | } |
781 | else if (count < 0) { | 781 | else if (count < 0) { |
782 | printk(KERN_ERR PREFIX "Error parsing IOAPIC entry\n"); | 782 | printk(KERN_ERR PREFIX "Error parsing IOAPIC entry\n"); |
783 | return count; | 783 | return count; |
784 | } | 784 | } |
785 | 785 | ||
786 | count = acpi_table_parse_madt(ACPI_MADT_INT_SRC_OVR, acpi_parse_int_src_ovr, NR_IRQ_VECTORS); | 786 | count = acpi_table_parse_madt(ACPI_MADT_INT_SRC_OVR, acpi_parse_int_src_ovr, NR_IRQ_VECTORS); |
787 | if (count < 0) { | 787 | if (count < 0) { |
788 | printk(KERN_ERR PREFIX "Error parsing interrupt source overrides entry\n"); | 788 | printk(KERN_ERR PREFIX "Error parsing interrupt source overrides entry\n"); |
789 | /* TBD: Cleanup to allow fallback to MPS */ | 789 | /* TBD: Cleanup to allow fallback to MPS */ |
790 | return count; | 790 | return count; |
791 | } | 791 | } |
792 | 792 | ||
793 | /* | 793 | /* |
794 | * If BIOS did not supply an INT_SRC_OVR for the SCI | 794 | * If BIOS did not supply an INT_SRC_OVR for the SCI |
795 | * pretend we got one so we can set the SCI flags. | 795 | * pretend we got one so we can set the SCI flags. |
796 | */ | 796 | */ |
797 | if (!acpi_sci_override_gsi) | 797 | if (!acpi_sci_override_gsi) |
798 | acpi_sci_ioapic_setup(acpi_fadt.sci_int, 0, 0); | 798 | acpi_sci_ioapic_setup(acpi_fadt.sci_int, 0, 0); |
799 | 799 | ||
800 | /* Fill in identity legacy mapings where no override */ | 800 | /* Fill in identity legacy mapings where no override */ |
801 | mp_config_acpi_legacy_irqs(); | 801 | mp_config_acpi_legacy_irqs(); |
802 | 802 | ||
803 | count = acpi_table_parse_madt(ACPI_MADT_NMI_SRC, acpi_parse_nmi_src, NR_IRQ_VECTORS); | 803 | count = acpi_table_parse_madt(ACPI_MADT_NMI_SRC, acpi_parse_nmi_src, NR_IRQ_VECTORS); |
804 | if (count < 0) { | 804 | if (count < 0) { |
805 | printk(KERN_ERR PREFIX "Error parsing NMI SRC entry\n"); | 805 | printk(KERN_ERR PREFIX "Error parsing NMI SRC entry\n"); |
806 | /* TBD: Cleanup to allow fallback to MPS */ | 806 | /* TBD: Cleanup to allow fallback to MPS */ |
807 | return count; | 807 | return count; |
808 | } | 808 | } |
809 | 809 | ||
810 | return 0; | 810 | return 0; |
811 | } | 811 | } |
812 | #else | 812 | #else |
813 | static inline int acpi_parse_madt_ioapic_entries(void) | 813 | static inline int acpi_parse_madt_ioapic_entries(void) |
814 | { | 814 | { |
815 | return -1; | 815 | return -1; |
816 | } | 816 | } |
817 | #endif /* !(CONFIG_X86_IO_APIC && CONFIG_ACPI_INTERPRETER) */ | 817 | #endif /* !(CONFIG_X86_IO_APIC && CONFIG_ACPI_INTERPRETER) */ |
818 | 818 | ||
819 | 819 | ||
820 | static void __init | 820 | static void __init |
821 | acpi_process_madt(void) | 821 | acpi_process_madt(void) |
822 | { | 822 | { |
823 | #ifdef CONFIG_X86_LOCAL_APIC | 823 | #ifdef CONFIG_X86_LOCAL_APIC |
824 | int count, error; | 824 | int count, error; |
825 | 825 | ||
826 | count = acpi_table_parse(ACPI_APIC, acpi_parse_madt); | 826 | count = acpi_table_parse(ACPI_APIC, acpi_parse_madt); |
827 | if (count >= 1) { | 827 | if (count >= 1) { |
828 | 828 | ||
829 | /* | 829 | /* |
830 | * Parse MADT LAPIC entries | 830 | * Parse MADT LAPIC entries |
831 | */ | 831 | */ |
832 | error = acpi_parse_madt_lapic_entries(); | 832 | error = acpi_parse_madt_lapic_entries(); |
833 | if (!error) { | 833 | if (!error) { |
834 | acpi_lapic = 1; | 834 | acpi_lapic = 1; |
835 | 835 | ||
836 | #ifdef CONFIG_X86_GENERICARCH | ||
837 | generic_bigsmp_probe(); | ||
838 | #endif | ||
836 | /* | 839 | /* |
837 | * Parse MADT IO-APIC entries | 840 | * Parse MADT IO-APIC entries |
838 | */ | 841 | */ |
839 | error = acpi_parse_madt_ioapic_entries(); | 842 | error = acpi_parse_madt_ioapic_entries(); |
840 | if (!error) { | 843 | if (!error) { |
841 | acpi_irq_model = ACPI_IRQ_MODEL_IOAPIC; | 844 | acpi_irq_model = ACPI_IRQ_MODEL_IOAPIC; |
842 | acpi_irq_balance_set(NULL); | 845 | acpi_irq_balance_set(NULL); |
843 | acpi_ioapic = 1; | 846 | acpi_ioapic = 1; |
844 | 847 | ||
845 | smp_found_config = 1; | 848 | smp_found_config = 1; |
846 | clustered_apic_check(); | 849 | clustered_apic_check(); |
847 | } | 850 | } |
848 | } | 851 | } |
849 | if (error == -EINVAL) { | 852 | if (error == -EINVAL) { |
850 | /* | 853 | /* |
851 | * Dell Precision Workstation 410, 610 come here. | 854 | * Dell Precision Workstation 410, 610 come here. |
852 | */ | 855 | */ |
853 | printk(KERN_ERR PREFIX "Invalid BIOS MADT, disabling ACPI\n"); | 856 | printk(KERN_ERR PREFIX "Invalid BIOS MADT, disabling ACPI\n"); |
854 | disable_acpi(); | 857 | disable_acpi(); |
855 | } | 858 | } |
856 | } | 859 | } |
857 | #endif | 860 | #endif |
858 | return; | 861 | return; |
859 | } | 862 | } |
860 | 863 | ||
861 | extern int acpi_force; | 864 | extern int acpi_force; |
862 | 865 | ||
863 | #ifdef __i386__ | 866 | #ifdef __i386__ |
864 | 867 | ||
865 | #ifdef CONFIG_ACPI_PCI | 868 | #ifdef CONFIG_ACPI_PCI |
866 | static int __init disable_acpi_irq(struct dmi_system_id *d) | 869 | static int __init disable_acpi_irq(struct dmi_system_id *d) |
867 | { | 870 | { |
868 | if (!acpi_force) { | 871 | if (!acpi_force) { |
869 | printk(KERN_NOTICE "%s detected: force use of acpi=noirq\n", | 872 | printk(KERN_NOTICE "%s detected: force use of acpi=noirq\n", |
870 | d->ident); | 873 | d->ident); |
871 | acpi_noirq_set(); | 874 | acpi_noirq_set(); |
872 | } | 875 | } |
873 | return 0; | 876 | return 0; |
874 | } | 877 | } |
875 | 878 | ||
876 | static int __init disable_acpi_pci(struct dmi_system_id *d) | 879 | static int __init disable_acpi_pci(struct dmi_system_id *d) |
877 | { | 880 | { |
878 | if (!acpi_force) { | 881 | if (!acpi_force) { |
879 | printk(KERN_NOTICE "%s detected: force use of pci=noacpi\n", | 882 | printk(KERN_NOTICE "%s detected: force use of pci=noacpi\n", |
880 | d->ident); | 883 | d->ident); |
881 | acpi_disable_pci(); | 884 | acpi_disable_pci(); |
882 | } | 885 | } |
883 | return 0; | 886 | return 0; |
884 | } | 887 | } |
885 | #endif | 888 | #endif |
886 | 889 | ||
887 | static int __init dmi_disable_acpi(struct dmi_system_id *d) | 890 | static int __init dmi_disable_acpi(struct dmi_system_id *d) |
888 | { | 891 | { |
889 | if (!acpi_force) { | 892 | if (!acpi_force) { |
890 | printk(KERN_NOTICE "%s detected: acpi off\n",d->ident); | 893 | printk(KERN_NOTICE "%s detected: acpi off\n",d->ident); |
891 | disable_acpi(); | 894 | disable_acpi(); |
892 | } else { | 895 | } else { |
893 | printk(KERN_NOTICE | 896 | printk(KERN_NOTICE |
894 | "Warning: DMI blacklist says broken, but acpi forced\n"); | 897 | "Warning: DMI blacklist says broken, but acpi forced\n"); |
895 | } | 898 | } |
896 | return 0; | 899 | return 0; |
897 | } | 900 | } |
898 | 901 | ||
899 | /* | 902 | /* |
900 | * Limit ACPI to CPU enumeration for HT | 903 | * Limit ACPI to CPU enumeration for HT |
901 | */ | 904 | */ |
902 | static int __init force_acpi_ht(struct dmi_system_id *d) | 905 | static int __init force_acpi_ht(struct dmi_system_id *d) |
903 | { | 906 | { |
904 | if (!acpi_force) { | 907 | if (!acpi_force) { |
905 | printk(KERN_NOTICE "%s detected: force use of acpi=ht\n", d->ident); | 908 | printk(KERN_NOTICE "%s detected: force use of acpi=ht\n", d->ident); |
906 | disable_acpi(); | 909 | disable_acpi(); |
907 | acpi_ht = 1; | 910 | acpi_ht = 1; |
908 | } else { | 911 | } else { |
909 | printk(KERN_NOTICE | 912 | printk(KERN_NOTICE |
910 | "Warning: acpi=force overrules DMI blacklist: acpi=ht\n"); | 913 | "Warning: acpi=force overrules DMI blacklist: acpi=ht\n"); |
911 | } | 914 | } |
912 | return 0; | 915 | return 0; |
913 | } | 916 | } |
914 | 917 | ||
915 | /* | 918 | /* |
916 | * If your system is blacklisted here, but you find that acpi=force | 919 | * If your system is blacklisted here, but you find that acpi=force |
917 | * works for you, please contact acpi-devel@sourceforge.net | 920 | * works for you, please contact acpi-devel@sourceforge.net |
918 | */ | 921 | */ |
919 | static struct dmi_system_id __initdata acpi_dmi_table[] = { | 922 | static struct dmi_system_id __initdata acpi_dmi_table[] = { |
920 | /* | 923 | /* |
921 | * Boxes that need ACPI disabled | 924 | * Boxes that need ACPI disabled |
922 | */ | 925 | */ |
923 | { | 926 | { |
924 | .callback = dmi_disable_acpi, | 927 | .callback = dmi_disable_acpi, |
925 | .ident = "IBM Thinkpad", | 928 | .ident = "IBM Thinkpad", |
926 | .matches = { | 929 | .matches = { |
927 | DMI_MATCH(DMI_BOARD_VENDOR, "IBM"), | 930 | DMI_MATCH(DMI_BOARD_VENDOR, "IBM"), |
928 | DMI_MATCH(DMI_BOARD_NAME, "2629H1G"), | 931 | DMI_MATCH(DMI_BOARD_NAME, "2629H1G"), |
929 | }, | 932 | }, |
930 | }, | 933 | }, |
931 | 934 | ||
932 | /* | 935 | /* |
933 | * Boxes that need acpi=ht | 936 | * Boxes that need acpi=ht |
934 | */ | 937 | */ |
935 | { | 938 | { |
936 | .callback = force_acpi_ht, | 939 | .callback = force_acpi_ht, |
937 | .ident = "FSC Primergy T850", | 940 | .ident = "FSC Primergy T850", |
938 | .matches = { | 941 | .matches = { |
939 | DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"), | 942 | DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"), |
940 | DMI_MATCH(DMI_PRODUCT_NAME, "PRIMERGY T850"), | 943 | DMI_MATCH(DMI_PRODUCT_NAME, "PRIMERGY T850"), |
941 | }, | 944 | }, |
942 | }, | 945 | }, |
943 | { | 946 | { |
944 | .callback = force_acpi_ht, | 947 | .callback = force_acpi_ht, |
945 | .ident = "DELL GX240", | 948 | .ident = "DELL GX240", |
946 | .matches = { | 949 | .matches = { |
947 | DMI_MATCH(DMI_BOARD_VENDOR, "Dell Computer Corporation"), | 950 | DMI_MATCH(DMI_BOARD_VENDOR, "Dell Computer Corporation"), |
948 | DMI_MATCH(DMI_BOARD_NAME, "OptiPlex GX240"), | 951 | DMI_MATCH(DMI_BOARD_NAME, "OptiPlex GX240"), |
949 | }, | 952 | }, |
950 | }, | 953 | }, |
951 | { | 954 | { |
952 | .callback = force_acpi_ht, | 955 | .callback = force_acpi_ht, |
953 | .ident = "HP VISUALIZE NT Workstation", | 956 | .ident = "HP VISUALIZE NT Workstation", |
954 | .matches = { | 957 | .matches = { |
955 | DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"), | 958 | DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"), |
956 | DMI_MATCH(DMI_PRODUCT_NAME, "HP VISUALIZE NT Workstation"), | 959 | DMI_MATCH(DMI_PRODUCT_NAME, "HP VISUALIZE NT Workstation"), |
957 | }, | 960 | }, |
958 | }, | 961 | }, |
959 | { | 962 | { |
960 | .callback = force_acpi_ht, | 963 | .callback = force_acpi_ht, |
961 | .ident = "Compaq Workstation W8000", | 964 | .ident = "Compaq Workstation W8000", |
962 | .matches = { | 965 | .matches = { |
963 | DMI_MATCH(DMI_SYS_VENDOR, "Compaq"), | 966 | DMI_MATCH(DMI_SYS_VENDOR, "Compaq"), |
964 | DMI_MATCH(DMI_PRODUCT_NAME, "Workstation W8000"), | 967 | DMI_MATCH(DMI_PRODUCT_NAME, "Workstation W8000"), |
965 | }, | 968 | }, |
966 | }, | 969 | }, |
967 | { | 970 | { |
968 | .callback = force_acpi_ht, | 971 | .callback = force_acpi_ht, |
969 | .ident = "ASUS P4B266", | 972 | .ident = "ASUS P4B266", |
970 | .matches = { | 973 | .matches = { |
971 | DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."), | 974 | DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."), |
972 | DMI_MATCH(DMI_BOARD_NAME, "P4B266"), | 975 | DMI_MATCH(DMI_BOARD_NAME, "P4B266"), |
973 | }, | 976 | }, |
974 | }, | 977 | }, |
975 | { | 978 | { |
976 | .callback = force_acpi_ht, | 979 | .callback = force_acpi_ht, |
977 | .ident = "ASUS P2B-DS", | 980 | .ident = "ASUS P2B-DS", |
978 | .matches = { | 981 | .matches = { |
979 | DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."), | 982 | DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."), |
980 | DMI_MATCH(DMI_BOARD_NAME, "P2B-DS"), | 983 | DMI_MATCH(DMI_BOARD_NAME, "P2B-DS"), |
981 | }, | 984 | }, |
982 | }, | 985 | }, |
983 | { | 986 | { |
984 | .callback = force_acpi_ht, | 987 | .callback = force_acpi_ht, |
985 | .ident = "ASUS CUR-DLS", | 988 | .ident = "ASUS CUR-DLS", |
986 | .matches = { | 989 | .matches = { |
987 | DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."), | 990 | DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."), |
988 | DMI_MATCH(DMI_BOARD_NAME, "CUR-DLS"), | 991 | DMI_MATCH(DMI_BOARD_NAME, "CUR-DLS"), |
989 | }, | 992 | }, |
990 | }, | 993 | }, |
991 | { | 994 | { |
992 | .callback = force_acpi_ht, | 995 | .callback = force_acpi_ht, |
993 | .ident = "ABIT i440BX-W83977", | 996 | .ident = "ABIT i440BX-W83977", |
994 | .matches = { | 997 | .matches = { |
995 | DMI_MATCH(DMI_BOARD_VENDOR, "ABIT <http://www.abit.com>"), | 998 | DMI_MATCH(DMI_BOARD_VENDOR, "ABIT <http://www.abit.com>"), |
996 | DMI_MATCH(DMI_BOARD_NAME, "i440BX-W83977 (BP6)"), | 999 | DMI_MATCH(DMI_BOARD_NAME, "i440BX-W83977 (BP6)"), |
997 | }, | 1000 | }, |
998 | }, | 1001 | }, |
999 | { | 1002 | { |
1000 | .callback = force_acpi_ht, | 1003 | .callback = force_acpi_ht, |
1001 | .ident = "IBM Bladecenter", | 1004 | .ident = "IBM Bladecenter", |
1002 | .matches = { | 1005 | .matches = { |
1003 | DMI_MATCH(DMI_BOARD_VENDOR, "IBM"), | 1006 | DMI_MATCH(DMI_BOARD_VENDOR, "IBM"), |
1004 | DMI_MATCH(DMI_BOARD_NAME, "IBM eServer BladeCenter HS20"), | 1007 | DMI_MATCH(DMI_BOARD_NAME, "IBM eServer BladeCenter HS20"), |
1005 | }, | 1008 | }, |
1006 | }, | 1009 | }, |
1007 | { | 1010 | { |
1008 | .callback = force_acpi_ht, | 1011 | .callback = force_acpi_ht, |
1009 | .ident = "IBM eServer xSeries 360", | 1012 | .ident = "IBM eServer xSeries 360", |
1010 | .matches = { | 1013 | .matches = { |
1011 | DMI_MATCH(DMI_BOARD_VENDOR, "IBM"), | 1014 | DMI_MATCH(DMI_BOARD_VENDOR, "IBM"), |
1012 | DMI_MATCH(DMI_BOARD_NAME, "eServer xSeries 360"), | 1015 | DMI_MATCH(DMI_BOARD_NAME, "eServer xSeries 360"), |
1013 | }, | 1016 | }, |
1014 | }, | 1017 | }, |
1015 | { | 1018 | { |
1016 | .callback = force_acpi_ht, | 1019 | .callback = force_acpi_ht, |
1017 | .ident = "IBM eserver xSeries 330", | 1020 | .ident = "IBM eserver xSeries 330", |
1018 | .matches = { | 1021 | .matches = { |
1019 | DMI_MATCH(DMI_BOARD_VENDOR, "IBM"), | 1022 | DMI_MATCH(DMI_BOARD_VENDOR, "IBM"), |
1020 | DMI_MATCH(DMI_BOARD_NAME, "eserver xSeries 330"), | 1023 | DMI_MATCH(DMI_BOARD_NAME, "eserver xSeries 330"), |
1021 | }, | 1024 | }, |
1022 | }, | 1025 | }, |
1023 | { | 1026 | { |
1024 | .callback = force_acpi_ht, | 1027 | .callback = force_acpi_ht, |
1025 | .ident = "IBM eserver xSeries 440", | 1028 | .ident = "IBM eserver xSeries 440", |
1026 | .matches = { | 1029 | .matches = { |
1027 | DMI_MATCH(DMI_BOARD_VENDOR, "IBM"), | 1030 | DMI_MATCH(DMI_BOARD_VENDOR, "IBM"), |
1028 | DMI_MATCH(DMI_PRODUCT_NAME, "eserver xSeries 440"), | 1031 | DMI_MATCH(DMI_PRODUCT_NAME, "eserver xSeries 440"), |
1029 | }, | 1032 | }, |
1030 | }, | 1033 | }, |
1031 | 1034 | ||
1032 | #ifdef CONFIG_ACPI_PCI | 1035 | #ifdef CONFIG_ACPI_PCI |
1033 | /* | 1036 | /* |
1034 | * Boxes that need ACPI PCI IRQ routing disabled | 1037 | * Boxes that need ACPI PCI IRQ routing disabled |
1035 | */ | 1038 | */ |
1036 | { | 1039 | { |
1037 | .callback = disable_acpi_irq, | 1040 | .callback = disable_acpi_irq, |
1038 | .ident = "ASUS A7V", | 1041 | .ident = "ASUS A7V", |
1039 | .matches = { | 1042 | .matches = { |
1040 | DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC"), | 1043 | DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC"), |
1041 | DMI_MATCH(DMI_BOARD_NAME, "<A7V>"), | 1044 | DMI_MATCH(DMI_BOARD_NAME, "<A7V>"), |
1042 | /* newer BIOS, Revision 1011, does work */ | 1045 | /* newer BIOS, Revision 1011, does work */ |
1043 | DMI_MATCH(DMI_BIOS_VERSION, "ASUS A7V ACPI BIOS Revision 1007"), | 1046 | DMI_MATCH(DMI_BIOS_VERSION, "ASUS A7V ACPI BIOS Revision 1007"), |
1044 | }, | 1047 | }, |
1045 | }, | 1048 | }, |
1046 | 1049 | ||
1047 | /* | 1050 | /* |
1048 | * Boxes that need ACPI PCI IRQ routing and PCI scan disabled | 1051 | * Boxes that need ACPI PCI IRQ routing and PCI scan disabled |
1049 | */ | 1052 | */ |
1050 | { /* _BBN 0 bug */ | 1053 | { /* _BBN 0 bug */ |
1051 | .callback = disable_acpi_pci, | 1054 | .callback = disable_acpi_pci, |
1052 | .ident = "ASUS PR-DLS", | 1055 | .ident = "ASUS PR-DLS", |
1053 | .matches = { | 1056 | .matches = { |
1054 | DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."), | 1057 | DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."), |
1055 | DMI_MATCH(DMI_BOARD_NAME, "PR-DLS"), | 1058 | DMI_MATCH(DMI_BOARD_NAME, "PR-DLS"), |
1056 | DMI_MATCH(DMI_BIOS_VERSION, "ASUS PR-DLS ACPI BIOS Revision 1010"), | 1059 | DMI_MATCH(DMI_BIOS_VERSION, "ASUS PR-DLS ACPI BIOS Revision 1010"), |
1057 | DMI_MATCH(DMI_BIOS_DATE, "03/21/2003") | 1060 | DMI_MATCH(DMI_BIOS_DATE, "03/21/2003") |
1058 | }, | 1061 | }, |
1059 | }, | 1062 | }, |
1060 | { | 1063 | { |
1061 | .callback = disable_acpi_pci, | 1064 | .callback = disable_acpi_pci, |
1062 | .ident = "Acer TravelMate 36x Laptop", | 1065 | .ident = "Acer TravelMate 36x Laptop", |
1063 | .matches = { | 1066 | .matches = { |
1064 | DMI_MATCH(DMI_SYS_VENDOR, "Acer"), | 1067 | DMI_MATCH(DMI_SYS_VENDOR, "Acer"), |
1065 | DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 360"), | 1068 | DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 360"), |
1066 | }, | 1069 | }, |
1067 | }, | 1070 | }, |
1068 | #endif | 1071 | #endif |
1069 | { } | 1072 | { } |
1070 | }; | 1073 | }; |
1071 | 1074 | ||
1072 | #endif /* __i386__ */ | 1075 | #endif /* __i386__ */ |
1073 | 1076 | ||
1074 | /* | 1077 | /* |
1075 | * acpi_boot_table_init() and acpi_boot_init() | 1078 | * acpi_boot_table_init() and acpi_boot_init() |
1076 | * called from setup_arch(), always. | 1079 | * called from setup_arch(), always. |
1077 | * 1. checksums all tables | 1080 | * 1. checksums all tables |
1078 | * 2. enumerates lapics | 1081 | * 2. enumerates lapics |
1079 | * 3. enumerates io-apics | 1082 | * 3. enumerates io-apics |
1080 | * | 1083 | * |
1081 | * acpi_table_init() is separate to allow reading SRAT without | 1084 | * acpi_table_init() is separate to allow reading SRAT without |
1082 | * other side effects. | 1085 | * other side effects. |
1083 | * | 1086 | * |
1084 | * side effects of acpi_boot_init: | 1087 | * side effects of acpi_boot_init: |
1085 | * acpi_lapic = 1 if LAPIC found | 1088 | * acpi_lapic = 1 if LAPIC found |
1086 | * acpi_ioapic = 1 if IOAPIC found | 1089 | * acpi_ioapic = 1 if IOAPIC found |
1087 | * if (acpi_lapic && acpi_ioapic) smp_found_config = 1; | 1090 | * if (acpi_lapic && acpi_ioapic) smp_found_config = 1; |
1088 | * if acpi_blacklisted() acpi_disabled = 1; | 1091 | * if acpi_blacklisted() acpi_disabled = 1; |
1089 | * acpi_irq_model=... | 1092 | * acpi_irq_model=... |
1090 | * ... | 1093 | * ... |
1091 | * | 1094 | * |
1092 | * return value: (currently ignored) | 1095 | * return value: (currently ignored) |
1093 | * 0: success | 1096 | * 0: success |
1094 | * !0: failure | 1097 | * !0: failure |
1095 | */ | 1098 | */ |
1096 | 1099 | ||
1097 | int __init | 1100 | int __init |
1098 | acpi_boot_table_init(void) | 1101 | acpi_boot_table_init(void) |
1099 | { | 1102 | { |
1100 | int error; | 1103 | int error; |
1101 | 1104 | ||
1102 | #ifdef __i386__ | 1105 | #ifdef __i386__ |
1103 | dmi_check_system(acpi_dmi_table); | 1106 | dmi_check_system(acpi_dmi_table); |
1104 | #endif | 1107 | #endif |
1105 | 1108 | ||
1106 | /* | 1109 | /* |
1107 | * If acpi_disabled, bail out | 1110 | * If acpi_disabled, bail out |
1108 | * One exception: acpi=ht continues far enough to enumerate LAPICs | 1111 | * One exception: acpi=ht continues far enough to enumerate LAPICs |
1109 | */ | 1112 | */ |
1110 | if (acpi_disabled && !acpi_ht) | 1113 | if (acpi_disabled && !acpi_ht) |
1111 | return 1; | 1114 | return 1; |
1112 | 1115 | ||
1113 | /* | 1116 | /* |
1114 | * Initialize the ACPI boot-time table parser. | 1117 | * Initialize the ACPI boot-time table parser. |
1115 | */ | 1118 | */ |
1116 | error = acpi_table_init(); | 1119 | error = acpi_table_init(); |
1117 | if (error) { | 1120 | if (error) { |
1118 | disable_acpi(); | 1121 | disable_acpi(); |
1119 | return error; | 1122 | return error; |
1120 | } | 1123 | } |
1121 | 1124 | ||
1122 | #ifdef __i386__ | 1125 | #ifdef __i386__ |
1123 | check_acpi_pci(); | 1126 | check_acpi_pci(); |
1124 | #endif | 1127 | #endif |
1125 | 1128 | ||
1126 | acpi_table_parse(ACPI_BOOT, acpi_parse_sbf); | 1129 | acpi_table_parse(ACPI_BOOT, acpi_parse_sbf); |
1127 | 1130 | ||
1128 | /* | 1131 | /* |
1129 | * blacklist may disable ACPI entirely | 1132 | * blacklist may disable ACPI entirely |
1130 | */ | 1133 | */ |
1131 | error = acpi_blacklisted(); | 1134 | error = acpi_blacklisted(); |
1132 | if (error) { | 1135 | if (error) { |
1133 | if (acpi_force) { | 1136 | if (acpi_force) { |
1134 | printk(KERN_WARNING PREFIX "acpi=force override\n"); | 1137 | printk(KERN_WARNING PREFIX "acpi=force override\n"); |
1135 | } else { | 1138 | } else { |
1136 | printk(KERN_WARNING PREFIX "Disabling ACPI support\n"); | 1139 | printk(KERN_WARNING PREFIX "Disabling ACPI support\n"); |
1137 | disable_acpi(); | 1140 | disable_acpi(); |
1138 | return error; | 1141 | return error; |
1139 | } | 1142 | } |
1140 | } | 1143 | } |
1141 | 1144 | ||
1142 | return 0; | 1145 | return 0; |
1143 | } | 1146 | } |
1144 | 1147 | ||
1145 | 1148 | ||
1146 | int __init acpi_boot_init(void) | 1149 | int __init acpi_boot_init(void) |
1147 | { | 1150 | { |
1148 | /* | 1151 | /* |
1149 | * If acpi_disabled, bail out | 1152 | * If acpi_disabled, bail out |
1150 | * One exception: acpi=ht continues far enough to enumerate LAPICs | 1153 | * One exception: acpi=ht continues far enough to enumerate LAPICs |
1151 | */ | 1154 | */ |
1152 | if (acpi_disabled && !acpi_ht) | 1155 | if (acpi_disabled && !acpi_ht) |
1153 | return 1; | 1156 | return 1; |
1154 | 1157 | ||
1155 | acpi_table_parse(ACPI_BOOT, acpi_parse_sbf); | 1158 | acpi_table_parse(ACPI_BOOT, acpi_parse_sbf); |
1156 | 1159 | ||
1157 | /* | 1160 | /* |
1158 | * set sci_int and PM timer address | 1161 | * set sci_int and PM timer address |
1159 | */ | 1162 | */ |
1160 | acpi_table_parse(ACPI_FADT, acpi_parse_fadt); | 1163 | acpi_table_parse(ACPI_FADT, acpi_parse_fadt); |
1161 | 1164 | ||
1162 | /* | 1165 | /* |
1163 | * Process the Multiple APIC Description Table (MADT), if present | 1166 | * Process the Multiple APIC Description Table (MADT), if present |
1164 | */ | 1167 | */ |
1165 | acpi_process_madt(); | 1168 | acpi_process_madt(); |
1166 | 1169 | ||
1167 | acpi_table_parse(ACPI_HPET, acpi_parse_hpet); | 1170 | acpi_table_parse(ACPI_HPET, acpi_parse_hpet); |
1168 | 1171 | ||
1169 | return 0; | 1172 | return 0; |
1170 | } | 1173 | } |
1171 | 1174 | ||
1172 | 1175 |
arch/i386/kernel/mpparse.c
1 | /* | 1 | /* |
2 | * Intel Multiprocessor Specification 1.1 and 1.4 | 2 | * Intel Multiprocessor Specification 1.1 and 1.4 |
3 | * compliant MP-table parsing routines. | 3 | * compliant MP-table parsing routines. |
4 | * | 4 | * |
5 | * (c) 1995 Alan Cox, Building #3 <alan@redhat.com> | 5 | * (c) 1995 Alan Cox, Building #3 <alan@redhat.com> |
6 | * (c) 1998, 1999, 2000 Ingo Molnar <mingo@redhat.com> | 6 | * (c) 1998, 1999, 2000 Ingo Molnar <mingo@redhat.com> |
7 | * | 7 | * |
8 | * Fixes | 8 | * Fixes |
9 | * Erich Boleyn : MP v1.4 and additional changes. | 9 | * Erich Boleyn : MP v1.4 and additional changes. |
10 | * Alan Cox : Added EBDA scanning | 10 | * Alan Cox : Added EBDA scanning |
11 | * Ingo Molnar : various cleanups and rewrites | 11 | * Ingo Molnar : various cleanups and rewrites |
12 | * Maciej W. Rozycki: Bits for default MP configurations | 12 | * Maciej W. Rozycki: Bits for default MP configurations |
13 | * Paul Diefenbaugh: Added full ACPI support | 13 | * Paul Diefenbaugh: Added full ACPI support |
14 | */ | 14 | */ |
15 | 15 | ||
16 | #include <linux/mm.h> | 16 | #include <linux/mm.h> |
17 | #include <linux/irq.h> | 17 | #include <linux/irq.h> |
18 | #include <linux/init.h> | 18 | #include <linux/init.h> |
19 | #include <linux/acpi.h> | 19 | #include <linux/acpi.h> |
20 | #include <linux/delay.h> | 20 | #include <linux/delay.h> |
21 | #include <linux/config.h> | 21 | #include <linux/config.h> |
22 | #include <linux/bootmem.h> | 22 | #include <linux/bootmem.h> |
23 | #include <linux/smp_lock.h> | 23 | #include <linux/smp_lock.h> |
24 | #include <linux/kernel_stat.h> | 24 | #include <linux/kernel_stat.h> |
25 | #include <linux/mc146818rtc.h> | 25 | #include <linux/mc146818rtc.h> |
26 | #include <linux/bitops.h> | 26 | #include <linux/bitops.h> |
27 | 27 | ||
28 | #include <asm/smp.h> | 28 | #include <asm/smp.h> |
29 | #include <asm/acpi.h> | 29 | #include <asm/acpi.h> |
30 | #include <asm/mtrr.h> | 30 | #include <asm/mtrr.h> |
31 | #include <asm/mpspec.h> | 31 | #include <asm/mpspec.h> |
32 | #include <asm/io_apic.h> | 32 | #include <asm/io_apic.h> |
33 | 33 | ||
34 | #include <mach_apic.h> | 34 | #include <mach_apic.h> |
35 | #include <mach_mpparse.h> | 35 | #include <mach_mpparse.h> |
36 | #include <bios_ebda.h> | 36 | #include <bios_ebda.h> |
37 | 37 | ||
38 | /* Have we found an MP table */ | 38 | /* Have we found an MP table */ |
39 | int smp_found_config; | 39 | int smp_found_config; |
40 | unsigned int __initdata maxcpus = NR_CPUS; | 40 | unsigned int __initdata maxcpus = NR_CPUS; |
41 | 41 | ||
42 | /* | 42 | /* |
43 | * Various Linux-internal data structures created from the | 43 | * Various Linux-internal data structures created from the |
44 | * MP-table. | 44 | * MP-table. |
45 | */ | 45 | */ |
46 | int apic_version [MAX_APICS]; | 46 | int apic_version [MAX_APICS]; |
47 | int mp_bus_id_to_type [MAX_MP_BUSSES]; | 47 | int mp_bus_id_to_type [MAX_MP_BUSSES]; |
48 | int mp_bus_id_to_node [MAX_MP_BUSSES]; | 48 | int mp_bus_id_to_node [MAX_MP_BUSSES]; |
49 | int mp_bus_id_to_local [MAX_MP_BUSSES]; | 49 | int mp_bus_id_to_local [MAX_MP_BUSSES]; |
50 | int quad_local_to_mp_bus_id [NR_CPUS/4][4]; | 50 | int quad_local_to_mp_bus_id [NR_CPUS/4][4]; |
51 | int mp_bus_id_to_pci_bus [MAX_MP_BUSSES] = { [0 ... MAX_MP_BUSSES-1] = -1 }; | 51 | int mp_bus_id_to_pci_bus [MAX_MP_BUSSES] = { [0 ... MAX_MP_BUSSES-1] = -1 }; |
52 | static int mp_current_pci_id; | 52 | static int mp_current_pci_id; |
53 | 53 | ||
54 | /* I/O APIC entries */ | 54 | /* I/O APIC entries */ |
55 | struct mpc_config_ioapic mp_ioapics[MAX_IO_APICS]; | 55 | struct mpc_config_ioapic mp_ioapics[MAX_IO_APICS]; |
56 | 56 | ||
57 | /* # of MP IRQ source entries */ | 57 | /* # of MP IRQ source entries */ |
58 | struct mpc_config_intsrc mp_irqs[MAX_IRQ_SOURCES]; | 58 | struct mpc_config_intsrc mp_irqs[MAX_IRQ_SOURCES]; |
59 | 59 | ||
60 | /* MP IRQ source entries */ | 60 | /* MP IRQ source entries */ |
61 | int mp_irq_entries; | 61 | int mp_irq_entries; |
62 | 62 | ||
63 | int nr_ioapics; | 63 | int nr_ioapics; |
64 | 64 | ||
65 | int pic_mode; | 65 | int pic_mode; |
66 | unsigned long mp_lapic_addr; | 66 | unsigned long mp_lapic_addr; |
67 | 67 | ||
68 | unsigned int def_to_bigsmp = 0; | ||
69 | |||
68 | /* Processor that is doing the boot up */ | 70 | /* Processor that is doing the boot up */ |
69 | unsigned int boot_cpu_physical_apicid = -1U; | 71 | unsigned int boot_cpu_physical_apicid = -1U; |
70 | /* Internal processor count */ | 72 | /* Internal processor count */ |
71 | static unsigned int __initdata num_processors; | 73 | static unsigned int __initdata num_processors; |
72 | 74 | ||
73 | /* Bitmask of physically existing CPUs */ | 75 | /* Bitmask of physically existing CPUs */ |
74 | physid_mask_t phys_cpu_present_map; | 76 | physid_mask_t phys_cpu_present_map; |
75 | 77 | ||
76 | u8 bios_cpu_apicid[NR_CPUS] = { [0 ... NR_CPUS-1] = BAD_APICID }; | 78 | u8 bios_cpu_apicid[NR_CPUS] = { [0 ... NR_CPUS-1] = BAD_APICID }; |
77 | 79 | ||
78 | /* | 80 | /* |
79 | * Intel MP BIOS table parsing routines: | 81 | * Intel MP BIOS table parsing routines: |
80 | */ | 82 | */ |
81 | 83 | ||
82 | 84 | ||
83 | /* | 85 | /* |
84 | * Checksum an MP configuration block. | 86 | * Checksum an MP configuration block. |
85 | */ | 87 | */ |
86 | 88 | ||
87 | static int __init mpf_checksum(unsigned char *mp, int len) | 89 | static int __init mpf_checksum(unsigned char *mp, int len) |
88 | { | 90 | { |
89 | int sum = 0; | 91 | int sum = 0; |
90 | 92 | ||
91 | while (len--) | 93 | while (len--) |
92 | sum += *mp++; | 94 | sum += *mp++; |
93 | 95 | ||
94 | return sum & 0xFF; | 96 | return sum & 0xFF; |
95 | } | 97 | } |
96 | 98 | ||
97 | /* | 99 | /* |
98 | * Have to match translation table entries to main table entries by counter | 100 | * Have to match translation table entries to main table entries by counter |
99 | * hence the mpc_record variable .... can't see a less disgusting way of | 101 | * hence the mpc_record variable .... can't see a less disgusting way of |
100 | * doing this .... | 102 | * doing this .... |
101 | */ | 103 | */ |
102 | 104 | ||
103 | static int mpc_record; | 105 | static int mpc_record; |
104 | static struct mpc_config_translation *translation_table[MAX_MPC_ENTRY] __initdata; | 106 | static struct mpc_config_translation *translation_table[MAX_MPC_ENTRY] __initdata; |
105 | 107 | ||
106 | #ifdef CONFIG_X86_NUMAQ | 108 | #ifdef CONFIG_X86_NUMAQ |
107 | static int MP_valid_apicid(int apicid, int version) | 109 | static int MP_valid_apicid(int apicid, int version) |
108 | { | 110 | { |
109 | return hweight_long(apicid & 0xf) == 1 && (apicid >> 4) != 0xf; | 111 | return hweight_long(apicid & 0xf) == 1 && (apicid >> 4) != 0xf; |
110 | } | 112 | } |
111 | #else | 113 | #else |
112 | static int MP_valid_apicid(int apicid, int version) | 114 | static int MP_valid_apicid(int apicid, int version) |
113 | { | 115 | { |
114 | if (version >= 0x14) | 116 | if (version >= 0x14) |
115 | return apicid < 0xff; | 117 | return apicid < 0xff; |
116 | else | 118 | else |
117 | return apicid < 0xf; | 119 | return apicid < 0xf; |
118 | } | 120 | } |
119 | #endif | 121 | #endif |
120 | 122 | ||
121 | static void __init MP_processor_info (struct mpc_config_processor *m) | 123 | static void __init MP_processor_info (struct mpc_config_processor *m) |
122 | { | 124 | { |
123 | int ver, apicid; | 125 | int ver, apicid; |
124 | physid_mask_t tmp; | 126 | physid_mask_t tmp; |
125 | 127 | ||
126 | if (!(m->mpc_cpuflag & CPU_ENABLED)) | 128 | if (!(m->mpc_cpuflag & CPU_ENABLED)) |
127 | return; | 129 | return; |
128 | 130 | ||
129 | apicid = mpc_apic_id(m, translation_table[mpc_record]); | 131 | apicid = mpc_apic_id(m, translation_table[mpc_record]); |
130 | 132 | ||
131 | if (m->mpc_featureflag&(1<<0)) | 133 | if (m->mpc_featureflag&(1<<0)) |
132 | Dprintk(" Floating point unit present.\n"); | 134 | Dprintk(" Floating point unit present.\n"); |
133 | if (m->mpc_featureflag&(1<<7)) | 135 | if (m->mpc_featureflag&(1<<7)) |
134 | Dprintk(" Machine Exception supported.\n"); | 136 | Dprintk(" Machine Exception supported.\n"); |
135 | if (m->mpc_featureflag&(1<<8)) | 137 | if (m->mpc_featureflag&(1<<8)) |
136 | Dprintk(" 64 bit compare & exchange supported.\n"); | 138 | Dprintk(" 64 bit compare & exchange supported.\n"); |
137 | if (m->mpc_featureflag&(1<<9)) | 139 | if (m->mpc_featureflag&(1<<9)) |
138 | Dprintk(" Internal APIC present.\n"); | 140 | Dprintk(" Internal APIC present.\n"); |
139 | if (m->mpc_featureflag&(1<<11)) | 141 | if (m->mpc_featureflag&(1<<11)) |
140 | Dprintk(" SEP present.\n"); | 142 | Dprintk(" SEP present.\n"); |
141 | if (m->mpc_featureflag&(1<<12)) | 143 | if (m->mpc_featureflag&(1<<12)) |
142 | Dprintk(" MTRR present.\n"); | 144 | Dprintk(" MTRR present.\n"); |
143 | if (m->mpc_featureflag&(1<<13)) | 145 | if (m->mpc_featureflag&(1<<13)) |
144 | Dprintk(" PGE present.\n"); | 146 | Dprintk(" PGE present.\n"); |
145 | if (m->mpc_featureflag&(1<<14)) | 147 | if (m->mpc_featureflag&(1<<14)) |
146 | Dprintk(" MCA present.\n"); | 148 | Dprintk(" MCA present.\n"); |
147 | if (m->mpc_featureflag&(1<<15)) | 149 | if (m->mpc_featureflag&(1<<15)) |
148 | Dprintk(" CMOV present.\n"); | 150 | Dprintk(" CMOV present.\n"); |
149 | if (m->mpc_featureflag&(1<<16)) | 151 | if (m->mpc_featureflag&(1<<16)) |
150 | Dprintk(" PAT present.\n"); | 152 | Dprintk(" PAT present.\n"); |
151 | if (m->mpc_featureflag&(1<<17)) | 153 | if (m->mpc_featureflag&(1<<17)) |
152 | Dprintk(" PSE present.\n"); | 154 | Dprintk(" PSE present.\n"); |
153 | if (m->mpc_featureflag&(1<<18)) | 155 | if (m->mpc_featureflag&(1<<18)) |
154 | Dprintk(" PSN present.\n"); | 156 | Dprintk(" PSN present.\n"); |
155 | if (m->mpc_featureflag&(1<<19)) | 157 | if (m->mpc_featureflag&(1<<19)) |
156 | Dprintk(" Cache Line Flush Instruction present.\n"); | 158 | Dprintk(" Cache Line Flush Instruction present.\n"); |
157 | /* 20 Reserved */ | 159 | /* 20 Reserved */ |
158 | if (m->mpc_featureflag&(1<<21)) | 160 | if (m->mpc_featureflag&(1<<21)) |
159 | Dprintk(" Debug Trace and EMON Store present.\n"); | 161 | Dprintk(" Debug Trace and EMON Store present.\n"); |
160 | if (m->mpc_featureflag&(1<<22)) | 162 | if (m->mpc_featureflag&(1<<22)) |
161 | Dprintk(" ACPI Thermal Throttle Registers present.\n"); | 163 | Dprintk(" ACPI Thermal Throttle Registers present.\n"); |
162 | if (m->mpc_featureflag&(1<<23)) | 164 | if (m->mpc_featureflag&(1<<23)) |
163 | Dprintk(" MMX present.\n"); | 165 | Dprintk(" MMX present.\n"); |
164 | if (m->mpc_featureflag&(1<<24)) | 166 | if (m->mpc_featureflag&(1<<24)) |
165 | Dprintk(" FXSR present.\n"); | 167 | Dprintk(" FXSR present.\n"); |
166 | if (m->mpc_featureflag&(1<<25)) | 168 | if (m->mpc_featureflag&(1<<25)) |
167 | Dprintk(" XMM present.\n"); | 169 | Dprintk(" XMM present.\n"); |
168 | if (m->mpc_featureflag&(1<<26)) | 170 | if (m->mpc_featureflag&(1<<26)) |
169 | Dprintk(" Willamette New Instructions present.\n"); | 171 | Dprintk(" Willamette New Instructions present.\n"); |
170 | if (m->mpc_featureflag&(1<<27)) | 172 | if (m->mpc_featureflag&(1<<27)) |
171 | Dprintk(" Self Snoop present.\n"); | 173 | Dprintk(" Self Snoop present.\n"); |
172 | if (m->mpc_featureflag&(1<<28)) | 174 | if (m->mpc_featureflag&(1<<28)) |
173 | Dprintk(" HT present.\n"); | 175 | Dprintk(" HT present.\n"); |
174 | if (m->mpc_featureflag&(1<<29)) | 176 | if (m->mpc_featureflag&(1<<29)) |
175 | Dprintk(" Thermal Monitor present.\n"); | 177 | Dprintk(" Thermal Monitor present.\n"); |
176 | /* 30, 31 Reserved */ | 178 | /* 30, 31 Reserved */ |
177 | 179 | ||
178 | 180 | ||
179 | if (m->mpc_cpuflag & CPU_BOOTPROCESSOR) { | 181 | if (m->mpc_cpuflag & CPU_BOOTPROCESSOR) { |
180 | Dprintk(" Bootup CPU\n"); | 182 | Dprintk(" Bootup CPU\n"); |
181 | boot_cpu_physical_apicid = m->mpc_apicid; | 183 | boot_cpu_physical_apicid = m->mpc_apicid; |
182 | } | 184 | } |
183 | 185 | ||
184 | if (num_processors >= NR_CPUS) { | 186 | if (num_processors >= NR_CPUS) { |
185 | printk(KERN_WARNING "WARNING: NR_CPUS limit of %i reached." | 187 | printk(KERN_WARNING "WARNING: NR_CPUS limit of %i reached." |
186 | " Processor ignored.\n", NR_CPUS); | 188 | " Processor ignored.\n", NR_CPUS); |
187 | return; | 189 | return; |
188 | } | 190 | } |
189 | 191 | ||
190 | if (num_processors >= maxcpus) { | 192 | if (num_processors >= maxcpus) { |
191 | printk(KERN_WARNING "WARNING: maxcpus limit of %i reached." | 193 | printk(KERN_WARNING "WARNING: maxcpus limit of %i reached." |
192 | " Processor ignored.\n", maxcpus); | 194 | " Processor ignored.\n", maxcpus); |
193 | return; | 195 | return; |
194 | } | 196 | } |
195 | num_processors++; | 197 | num_processors++; |
196 | ver = m->mpc_apicver; | 198 | ver = m->mpc_apicver; |
197 | 199 | ||
198 | if (!MP_valid_apicid(apicid, ver)) { | 200 | if (!MP_valid_apicid(apicid, ver)) { |
199 | printk(KERN_WARNING "Processor #%d INVALID. (Max ID: %d).\n", | 201 | printk(KERN_WARNING "Processor #%d INVALID. (Max ID: %d).\n", |
200 | m->mpc_apicid, MAX_APICS); | 202 | m->mpc_apicid, MAX_APICS); |
201 | --num_processors; | 203 | --num_processors; |
202 | return; | 204 | return; |
203 | } | 205 | } |
204 | 206 | ||
205 | tmp = apicid_to_cpu_present(apicid); | 207 | tmp = apicid_to_cpu_present(apicid); |
206 | physids_or(phys_cpu_present_map, phys_cpu_present_map, tmp); | 208 | physids_or(phys_cpu_present_map, phys_cpu_present_map, tmp); |
207 | 209 | ||
208 | /* | 210 | /* |
209 | * Validate version | 211 | * Validate version |
210 | */ | 212 | */ |
211 | if (ver == 0x0) { | 213 | if (ver == 0x0) { |
212 | printk(KERN_WARNING "BIOS bug, APIC version is 0 for CPU#%d! fixing up to 0x10. (tell your hw vendor)\n", m->mpc_apicid); | 214 | printk(KERN_WARNING "BIOS bug, APIC version is 0 for CPU#%d! fixing up to 0x10. (tell your hw vendor)\n", m->mpc_apicid); |
213 | ver = 0x10; | 215 | ver = 0x10; |
214 | } | 216 | } |
215 | apic_version[m->mpc_apicid] = ver; | 217 | apic_version[m->mpc_apicid] = ver; |
218 | if ((num_processors > 8) && | ||
219 | APIC_XAPIC(ver) && | ||
220 | (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)) | ||
221 | def_to_bigsmp = 1; | ||
222 | else | ||
223 | def_to_bigsmp = 0; | ||
224 | |||
216 | bios_cpu_apicid[num_processors - 1] = m->mpc_apicid; | 225 | bios_cpu_apicid[num_processors - 1] = m->mpc_apicid; |
217 | } | 226 | } |
218 | 227 | ||
219 | static void __init MP_bus_info (struct mpc_config_bus *m) | 228 | static void __init MP_bus_info (struct mpc_config_bus *m) |
220 | { | 229 | { |
221 | char str[7]; | 230 | char str[7]; |
222 | 231 | ||
223 | memcpy(str, m->mpc_bustype, 6); | 232 | memcpy(str, m->mpc_bustype, 6); |
224 | str[6] = 0; | 233 | str[6] = 0; |
225 | 234 | ||
226 | mpc_oem_bus_info(m, str, translation_table[mpc_record]); | 235 | mpc_oem_bus_info(m, str, translation_table[mpc_record]); |
227 | 236 | ||
228 | if (strncmp(str, BUSTYPE_ISA, sizeof(BUSTYPE_ISA)-1) == 0) { | 237 | if (strncmp(str, BUSTYPE_ISA, sizeof(BUSTYPE_ISA)-1) == 0) { |
229 | mp_bus_id_to_type[m->mpc_busid] = MP_BUS_ISA; | 238 | mp_bus_id_to_type[m->mpc_busid] = MP_BUS_ISA; |
230 | } else if (strncmp(str, BUSTYPE_EISA, sizeof(BUSTYPE_EISA)-1) == 0) { | 239 | } else if (strncmp(str, BUSTYPE_EISA, sizeof(BUSTYPE_EISA)-1) == 0) { |
231 | mp_bus_id_to_type[m->mpc_busid] = MP_BUS_EISA; | 240 | mp_bus_id_to_type[m->mpc_busid] = MP_BUS_EISA; |
232 | } else if (strncmp(str, BUSTYPE_PCI, sizeof(BUSTYPE_PCI)-1) == 0) { | 241 | } else if (strncmp(str, BUSTYPE_PCI, sizeof(BUSTYPE_PCI)-1) == 0) { |
233 | mpc_oem_pci_bus(m, translation_table[mpc_record]); | 242 | mpc_oem_pci_bus(m, translation_table[mpc_record]); |
234 | mp_bus_id_to_type[m->mpc_busid] = MP_BUS_PCI; | 243 | mp_bus_id_to_type[m->mpc_busid] = MP_BUS_PCI; |
235 | mp_bus_id_to_pci_bus[m->mpc_busid] = mp_current_pci_id; | 244 | mp_bus_id_to_pci_bus[m->mpc_busid] = mp_current_pci_id; |
236 | mp_current_pci_id++; | 245 | mp_current_pci_id++; |
237 | } else if (strncmp(str, BUSTYPE_MCA, sizeof(BUSTYPE_MCA)-1) == 0) { | 246 | } else if (strncmp(str, BUSTYPE_MCA, sizeof(BUSTYPE_MCA)-1) == 0) { |
238 | mp_bus_id_to_type[m->mpc_busid] = MP_BUS_MCA; | 247 | mp_bus_id_to_type[m->mpc_busid] = MP_BUS_MCA; |
239 | } else if (strncmp(str, BUSTYPE_NEC98, sizeof(BUSTYPE_NEC98)-1) == 0) { | 248 | } else if (strncmp(str, BUSTYPE_NEC98, sizeof(BUSTYPE_NEC98)-1) == 0) { |
240 | mp_bus_id_to_type[m->mpc_busid] = MP_BUS_NEC98; | 249 | mp_bus_id_to_type[m->mpc_busid] = MP_BUS_NEC98; |
241 | } else { | 250 | } else { |
242 | printk(KERN_WARNING "Unknown bustype %s - ignoring\n", str); | 251 | printk(KERN_WARNING "Unknown bustype %s - ignoring\n", str); |
243 | } | 252 | } |
244 | } | 253 | } |
245 | 254 | ||
246 | static void __init MP_ioapic_info (struct mpc_config_ioapic *m) | 255 | static void __init MP_ioapic_info (struct mpc_config_ioapic *m) |
247 | { | 256 | { |
248 | if (!(m->mpc_flags & MPC_APIC_USABLE)) | 257 | if (!(m->mpc_flags & MPC_APIC_USABLE)) |
249 | return; | 258 | return; |
250 | 259 | ||
251 | printk(KERN_INFO "I/O APIC #%d Version %d at 0x%lX.\n", | 260 | printk(KERN_INFO "I/O APIC #%d Version %d at 0x%lX.\n", |
252 | m->mpc_apicid, m->mpc_apicver, m->mpc_apicaddr); | 261 | m->mpc_apicid, m->mpc_apicver, m->mpc_apicaddr); |
253 | if (nr_ioapics >= MAX_IO_APICS) { | 262 | if (nr_ioapics >= MAX_IO_APICS) { |
254 | printk(KERN_CRIT "Max # of I/O APICs (%d) exceeded (found %d).\n", | 263 | printk(KERN_CRIT "Max # of I/O APICs (%d) exceeded (found %d).\n", |
255 | MAX_IO_APICS, nr_ioapics); | 264 | MAX_IO_APICS, nr_ioapics); |
256 | panic("Recompile kernel with bigger MAX_IO_APICS!.\n"); | 265 | panic("Recompile kernel with bigger MAX_IO_APICS!.\n"); |
257 | } | 266 | } |
258 | if (!m->mpc_apicaddr) { | 267 | if (!m->mpc_apicaddr) { |
259 | printk(KERN_ERR "WARNING: bogus zero I/O APIC address" | 268 | printk(KERN_ERR "WARNING: bogus zero I/O APIC address" |
260 | " found in MP table, skipping!\n"); | 269 | " found in MP table, skipping!\n"); |
261 | return; | 270 | return; |
262 | } | 271 | } |
263 | mp_ioapics[nr_ioapics] = *m; | 272 | mp_ioapics[nr_ioapics] = *m; |
264 | nr_ioapics++; | 273 | nr_ioapics++; |
265 | } | 274 | } |
266 | 275 | ||
267 | static void __init MP_intsrc_info (struct mpc_config_intsrc *m) | 276 | static void __init MP_intsrc_info (struct mpc_config_intsrc *m) |
268 | { | 277 | { |
269 | mp_irqs [mp_irq_entries] = *m; | 278 | mp_irqs [mp_irq_entries] = *m; |
270 | Dprintk("Int: type %d, pol %d, trig %d, bus %d," | 279 | Dprintk("Int: type %d, pol %d, trig %d, bus %d," |
271 | " IRQ %02x, APIC ID %x, APIC INT %02x\n", | 280 | " IRQ %02x, APIC ID %x, APIC INT %02x\n", |
272 | m->mpc_irqtype, m->mpc_irqflag & 3, | 281 | m->mpc_irqtype, m->mpc_irqflag & 3, |
273 | (m->mpc_irqflag >> 2) & 3, m->mpc_srcbus, | 282 | (m->mpc_irqflag >> 2) & 3, m->mpc_srcbus, |
274 | m->mpc_srcbusirq, m->mpc_dstapic, m->mpc_dstirq); | 283 | m->mpc_srcbusirq, m->mpc_dstapic, m->mpc_dstirq); |
275 | if (++mp_irq_entries == MAX_IRQ_SOURCES) | 284 | if (++mp_irq_entries == MAX_IRQ_SOURCES) |
276 | panic("Max # of irq sources exceeded!!\n"); | 285 | panic("Max # of irq sources exceeded!!\n"); |
277 | } | 286 | } |
278 | 287 | ||
279 | static void __init MP_lintsrc_info (struct mpc_config_lintsrc *m) | 288 | static void __init MP_lintsrc_info (struct mpc_config_lintsrc *m) |
280 | { | 289 | { |
281 | Dprintk("Lint: type %d, pol %d, trig %d, bus %d," | 290 | Dprintk("Lint: type %d, pol %d, trig %d, bus %d," |
282 | " IRQ %02x, APIC ID %x, APIC LINT %02x\n", | 291 | " IRQ %02x, APIC ID %x, APIC LINT %02x\n", |
283 | m->mpc_irqtype, m->mpc_irqflag & 3, | 292 | m->mpc_irqtype, m->mpc_irqflag & 3, |
284 | (m->mpc_irqflag >> 2) &3, m->mpc_srcbusid, | 293 | (m->mpc_irqflag >> 2) &3, m->mpc_srcbusid, |
285 | m->mpc_srcbusirq, m->mpc_destapic, m->mpc_destapiclint); | 294 | m->mpc_srcbusirq, m->mpc_destapic, m->mpc_destapiclint); |
286 | /* | 295 | /* |
287 | * Well it seems all SMP boards in existence | 296 | * Well it seems all SMP boards in existence |
288 | * use ExtINT/LVT1 == LINT0 and | 297 | * use ExtINT/LVT1 == LINT0 and |
289 | * NMI/LVT2 == LINT1 - the following check | 298 | * NMI/LVT2 == LINT1 - the following check |
290 | * will show us if this assumptions is false. | 299 | * will show us if this assumptions is false. |
291 | * Until then we do not have to add baggage. | 300 | * Until then we do not have to add baggage. |
292 | */ | 301 | */ |
293 | if ((m->mpc_irqtype == mp_ExtINT) && | 302 | if ((m->mpc_irqtype == mp_ExtINT) && |
294 | (m->mpc_destapiclint != 0)) | 303 | (m->mpc_destapiclint != 0)) |
295 | BUG(); | 304 | BUG(); |
296 | if ((m->mpc_irqtype == mp_NMI) && | 305 | if ((m->mpc_irqtype == mp_NMI) && |
297 | (m->mpc_destapiclint != 1)) | 306 | (m->mpc_destapiclint != 1)) |
298 | BUG(); | 307 | BUG(); |
299 | } | 308 | } |
300 | 309 | ||
301 | #ifdef CONFIG_X86_NUMAQ | 310 | #ifdef CONFIG_X86_NUMAQ |
302 | static void __init MP_translation_info (struct mpc_config_translation *m) | 311 | static void __init MP_translation_info (struct mpc_config_translation *m) |
303 | { | 312 | { |
304 | printk(KERN_INFO "Translation: record %d, type %d, quad %d, global %d, local %d\n", mpc_record, m->trans_type, m->trans_quad, m->trans_global, m->trans_local); | 313 | printk(KERN_INFO "Translation: record %d, type %d, quad %d, global %d, local %d\n", mpc_record, m->trans_type, m->trans_quad, m->trans_global, m->trans_local); |
305 | 314 | ||
306 | if (mpc_record >= MAX_MPC_ENTRY) | 315 | if (mpc_record >= MAX_MPC_ENTRY) |
307 | printk(KERN_ERR "MAX_MPC_ENTRY exceeded!\n"); | 316 | printk(KERN_ERR "MAX_MPC_ENTRY exceeded!\n"); |
308 | else | 317 | else |
309 | translation_table[mpc_record] = m; /* stash this for later */ | 318 | translation_table[mpc_record] = m; /* stash this for later */ |
310 | if (m->trans_quad < MAX_NUMNODES && !node_online(m->trans_quad)) | 319 | if (m->trans_quad < MAX_NUMNODES && !node_online(m->trans_quad)) |
311 | node_set_online(m->trans_quad); | 320 | node_set_online(m->trans_quad); |
312 | } | 321 | } |
313 | 322 | ||
314 | /* | 323 | /* |
315 | * Read/parse the MPC oem tables | 324 | * Read/parse the MPC oem tables |
316 | */ | 325 | */ |
317 | 326 | ||
318 | static void __init smp_read_mpc_oem(struct mp_config_oemtable *oemtable, \ | 327 | static void __init smp_read_mpc_oem(struct mp_config_oemtable *oemtable, \ |
319 | unsigned short oemsize) | 328 | unsigned short oemsize) |
320 | { | 329 | { |
321 | int count = sizeof (*oemtable); /* the header size */ | 330 | int count = sizeof (*oemtable); /* the header size */ |
322 | unsigned char *oemptr = ((unsigned char *)oemtable)+count; | 331 | unsigned char *oemptr = ((unsigned char *)oemtable)+count; |
323 | 332 | ||
324 | mpc_record = 0; | 333 | mpc_record = 0; |
325 | printk(KERN_INFO "Found an OEM MPC table at %8p - parsing it ... \n", oemtable); | 334 | printk(KERN_INFO "Found an OEM MPC table at %8p - parsing it ... \n", oemtable); |
326 | if (memcmp(oemtable->oem_signature,MPC_OEM_SIGNATURE,4)) | 335 | if (memcmp(oemtable->oem_signature,MPC_OEM_SIGNATURE,4)) |
327 | { | 336 | { |
328 | printk(KERN_WARNING "SMP mpc oemtable: bad signature [%c%c%c%c]!\n", | 337 | printk(KERN_WARNING "SMP mpc oemtable: bad signature [%c%c%c%c]!\n", |
329 | oemtable->oem_signature[0], | 338 | oemtable->oem_signature[0], |
330 | oemtable->oem_signature[1], | 339 | oemtable->oem_signature[1], |
331 | oemtable->oem_signature[2], | 340 | oemtable->oem_signature[2], |
332 | oemtable->oem_signature[3]); | 341 | oemtable->oem_signature[3]); |
333 | return; | 342 | return; |
334 | } | 343 | } |
335 | if (mpf_checksum((unsigned char *)oemtable,oemtable->oem_length)) | 344 | if (mpf_checksum((unsigned char *)oemtable,oemtable->oem_length)) |
336 | { | 345 | { |
337 | printk(KERN_WARNING "SMP oem mptable: checksum error!\n"); | 346 | printk(KERN_WARNING "SMP oem mptable: checksum error!\n"); |
338 | return; | 347 | return; |
339 | } | 348 | } |
340 | while (count < oemtable->oem_length) { | 349 | while (count < oemtable->oem_length) { |
341 | switch (*oemptr) { | 350 | switch (*oemptr) { |
342 | case MP_TRANSLATION: | 351 | case MP_TRANSLATION: |
343 | { | 352 | { |
344 | struct mpc_config_translation *m= | 353 | struct mpc_config_translation *m= |
345 | (struct mpc_config_translation *)oemptr; | 354 | (struct mpc_config_translation *)oemptr; |
346 | MP_translation_info(m); | 355 | MP_translation_info(m); |
347 | oemptr += sizeof(*m); | 356 | oemptr += sizeof(*m); |
348 | count += sizeof(*m); | 357 | count += sizeof(*m); |
349 | ++mpc_record; | 358 | ++mpc_record; |
350 | break; | 359 | break; |
351 | } | 360 | } |
352 | default: | 361 | default: |
353 | { | 362 | { |
354 | printk(KERN_WARNING "Unrecognised OEM table entry type! - %d\n", (int) *oemptr); | 363 | printk(KERN_WARNING "Unrecognised OEM table entry type! - %d\n", (int) *oemptr); |
355 | return; | 364 | return; |
356 | } | 365 | } |
357 | } | 366 | } |
358 | } | 367 | } |
359 | } | 368 | } |
360 | 369 | ||
361 | static inline void mps_oem_check(struct mp_config_table *mpc, char *oem, | 370 | static inline void mps_oem_check(struct mp_config_table *mpc, char *oem, |
362 | char *productid) | 371 | char *productid) |
363 | { | 372 | { |
364 | if (strncmp(oem, "IBM NUMA", 8)) | 373 | if (strncmp(oem, "IBM NUMA", 8)) |
365 | printk("Warning! May not be a NUMA-Q system!\n"); | 374 | printk("Warning! May not be a NUMA-Q system!\n"); |
366 | if (mpc->mpc_oemptr) | 375 | if (mpc->mpc_oemptr) |
367 | smp_read_mpc_oem((struct mp_config_oemtable *) mpc->mpc_oemptr, | 376 | smp_read_mpc_oem((struct mp_config_oemtable *) mpc->mpc_oemptr, |
368 | mpc->mpc_oemsize); | 377 | mpc->mpc_oemsize); |
369 | } | 378 | } |
370 | #endif /* CONFIG_X86_NUMAQ */ | 379 | #endif /* CONFIG_X86_NUMAQ */ |
371 | 380 | ||
372 | /* | 381 | /* |
373 | * Read/parse the MPC | 382 | * Read/parse the MPC |
374 | */ | 383 | */ |
375 | 384 | ||
376 | static int __init smp_read_mpc(struct mp_config_table *mpc) | 385 | static int __init smp_read_mpc(struct mp_config_table *mpc) |
377 | { | 386 | { |
378 | char str[16]; | 387 | char str[16]; |
379 | char oem[10]; | 388 | char oem[10]; |
380 | int count=sizeof(*mpc); | 389 | int count=sizeof(*mpc); |
381 | unsigned char *mpt=((unsigned char *)mpc)+count; | 390 | unsigned char *mpt=((unsigned char *)mpc)+count; |
382 | 391 | ||
383 | if (memcmp(mpc->mpc_signature,MPC_SIGNATURE,4)) { | 392 | if (memcmp(mpc->mpc_signature,MPC_SIGNATURE,4)) { |
384 | printk(KERN_ERR "SMP mptable: bad signature [0x%x]!\n", | 393 | printk(KERN_ERR "SMP mptable: bad signature [0x%x]!\n", |
385 | *(u32 *)mpc->mpc_signature); | 394 | *(u32 *)mpc->mpc_signature); |
386 | return 0; | 395 | return 0; |
387 | } | 396 | } |
388 | if (mpf_checksum((unsigned char *)mpc,mpc->mpc_length)) { | 397 | if (mpf_checksum((unsigned char *)mpc,mpc->mpc_length)) { |
389 | printk(KERN_ERR "SMP mptable: checksum error!\n"); | 398 | printk(KERN_ERR "SMP mptable: checksum error!\n"); |
390 | return 0; | 399 | return 0; |
391 | } | 400 | } |
392 | if (mpc->mpc_spec!=0x01 && mpc->mpc_spec!=0x04) { | 401 | if (mpc->mpc_spec!=0x01 && mpc->mpc_spec!=0x04) { |
393 | printk(KERN_ERR "SMP mptable: bad table version (%d)!!\n", | 402 | printk(KERN_ERR "SMP mptable: bad table version (%d)!!\n", |
394 | mpc->mpc_spec); | 403 | mpc->mpc_spec); |
395 | return 0; | 404 | return 0; |
396 | } | 405 | } |
397 | if (!mpc->mpc_lapic) { | 406 | if (!mpc->mpc_lapic) { |
398 | printk(KERN_ERR "SMP mptable: null local APIC address!\n"); | 407 | printk(KERN_ERR "SMP mptable: null local APIC address!\n"); |
399 | return 0; | 408 | return 0; |
400 | } | 409 | } |
401 | memcpy(oem,mpc->mpc_oem,8); | 410 | memcpy(oem,mpc->mpc_oem,8); |
402 | oem[8]=0; | 411 | oem[8]=0; |
403 | printk(KERN_INFO "OEM ID: %s ",oem); | 412 | printk(KERN_INFO "OEM ID: %s ",oem); |
404 | 413 | ||
405 | memcpy(str,mpc->mpc_productid,12); | 414 | memcpy(str,mpc->mpc_productid,12); |
406 | str[12]=0; | 415 | str[12]=0; |
407 | printk("Product ID: %s ",str); | 416 | printk("Product ID: %s ",str); |
408 | 417 | ||
409 | mps_oem_check(mpc, oem, str); | 418 | mps_oem_check(mpc, oem, str); |
410 | 419 | ||
411 | printk("APIC at: 0x%lX\n",mpc->mpc_lapic); | 420 | printk("APIC at: 0x%lX\n",mpc->mpc_lapic); |
412 | 421 | ||
413 | /* | 422 | /* |
414 | * Save the local APIC address (it might be non-default) -- but only | 423 | * Save the local APIC address (it might be non-default) -- but only |
415 | * if we're not using ACPI. | 424 | * if we're not using ACPI. |
416 | */ | 425 | */ |
417 | if (!acpi_lapic) | 426 | if (!acpi_lapic) |
418 | mp_lapic_addr = mpc->mpc_lapic; | 427 | mp_lapic_addr = mpc->mpc_lapic; |
419 | 428 | ||
420 | /* | 429 | /* |
421 | * Now process the configuration blocks. | 430 | * Now process the configuration blocks. |
422 | */ | 431 | */ |
423 | mpc_record = 0; | 432 | mpc_record = 0; |
424 | while (count < mpc->mpc_length) { | 433 | while (count < mpc->mpc_length) { |
425 | switch(*mpt) { | 434 | switch(*mpt) { |
426 | case MP_PROCESSOR: | 435 | case MP_PROCESSOR: |
427 | { | 436 | { |
428 | struct mpc_config_processor *m= | 437 | struct mpc_config_processor *m= |
429 | (struct mpc_config_processor *)mpt; | 438 | (struct mpc_config_processor *)mpt; |
430 | /* ACPI may have already provided this data */ | 439 | /* ACPI may have already provided this data */ |
431 | if (!acpi_lapic) | 440 | if (!acpi_lapic) |
432 | MP_processor_info(m); | 441 | MP_processor_info(m); |
433 | mpt += sizeof(*m); | 442 | mpt += sizeof(*m); |
434 | count += sizeof(*m); | 443 | count += sizeof(*m); |
435 | break; | 444 | break; |
436 | } | 445 | } |
437 | case MP_BUS: | 446 | case MP_BUS: |
438 | { | 447 | { |
439 | struct mpc_config_bus *m= | 448 | struct mpc_config_bus *m= |
440 | (struct mpc_config_bus *)mpt; | 449 | (struct mpc_config_bus *)mpt; |
441 | MP_bus_info(m); | 450 | MP_bus_info(m); |
442 | mpt += sizeof(*m); | 451 | mpt += sizeof(*m); |
443 | count += sizeof(*m); | 452 | count += sizeof(*m); |
444 | break; | 453 | break; |
445 | } | 454 | } |
446 | case MP_IOAPIC: | 455 | case MP_IOAPIC: |
447 | { | 456 | { |
448 | struct mpc_config_ioapic *m= | 457 | struct mpc_config_ioapic *m= |
449 | (struct mpc_config_ioapic *)mpt; | 458 | (struct mpc_config_ioapic *)mpt; |
450 | MP_ioapic_info(m); | 459 | MP_ioapic_info(m); |
451 | mpt+=sizeof(*m); | 460 | mpt+=sizeof(*m); |
452 | count+=sizeof(*m); | 461 | count+=sizeof(*m); |
453 | break; | 462 | break; |
454 | } | 463 | } |
455 | case MP_INTSRC: | 464 | case MP_INTSRC: |
456 | { | 465 | { |
457 | struct mpc_config_intsrc *m= | 466 | struct mpc_config_intsrc *m= |
458 | (struct mpc_config_intsrc *)mpt; | 467 | (struct mpc_config_intsrc *)mpt; |
459 | 468 | ||
460 | MP_intsrc_info(m); | 469 | MP_intsrc_info(m); |
461 | mpt+=sizeof(*m); | 470 | mpt+=sizeof(*m); |
462 | count+=sizeof(*m); | 471 | count+=sizeof(*m); |
463 | break; | 472 | break; |
464 | } | 473 | } |
465 | case MP_LINTSRC: | 474 | case MP_LINTSRC: |
466 | { | 475 | { |
467 | struct mpc_config_lintsrc *m= | 476 | struct mpc_config_lintsrc *m= |
468 | (struct mpc_config_lintsrc *)mpt; | 477 | (struct mpc_config_lintsrc *)mpt; |
469 | MP_lintsrc_info(m); | 478 | MP_lintsrc_info(m); |
470 | mpt+=sizeof(*m); | 479 | mpt+=sizeof(*m); |
471 | count+=sizeof(*m); | 480 | count+=sizeof(*m); |
472 | break; | 481 | break; |
473 | } | 482 | } |
474 | default: | 483 | default: |
475 | { | 484 | { |
476 | count = mpc->mpc_length; | 485 | count = mpc->mpc_length; |
477 | break; | 486 | break; |
478 | } | 487 | } |
479 | } | 488 | } |
480 | ++mpc_record; | 489 | ++mpc_record; |
481 | } | 490 | } |
482 | clustered_apic_check(); | 491 | clustered_apic_check(); |
483 | if (!num_processors) | 492 | if (!num_processors) |
484 | printk(KERN_ERR "SMP mptable: no processors registered!\n"); | 493 | printk(KERN_ERR "SMP mptable: no processors registered!\n"); |
485 | return num_processors; | 494 | return num_processors; |
486 | } | 495 | } |
487 | 496 | ||
488 | static int __init ELCR_trigger(unsigned int irq) | 497 | static int __init ELCR_trigger(unsigned int irq) |
489 | { | 498 | { |
490 | unsigned int port; | 499 | unsigned int port; |
491 | 500 | ||
492 | port = 0x4d0 + (irq >> 3); | 501 | port = 0x4d0 + (irq >> 3); |
493 | return (inb(port) >> (irq & 7)) & 1; | 502 | return (inb(port) >> (irq & 7)) & 1; |
494 | } | 503 | } |
495 | 504 | ||
496 | static void __init construct_default_ioirq_mptable(int mpc_default_type) | 505 | static void __init construct_default_ioirq_mptable(int mpc_default_type) |
497 | { | 506 | { |
498 | struct mpc_config_intsrc intsrc; | 507 | struct mpc_config_intsrc intsrc; |
499 | int i; | 508 | int i; |
500 | int ELCR_fallback = 0; | 509 | int ELCR_fallback = 0; |
501 | 510 | ||
502 | intsrc.mpc_type = MP_INTSRC; | 511 | intsrc.mpc_type = MP_INTSRC; |
503 | intsrc.mpc_irqflag = 0; /* conforming */ | 512 | intsrc.mpc_irqflag = 0; /* conforming */ |
504 | intsrc.mpc_srcbus = 0; | 513 | intsrc.mpc_srcbus = 0; |
505 | intsrc.mpc_dstapic = mp_ioapics[0].mpc_apicid; | 514 | intsrc.mpc_dstapic = mp_ioapics[0].mpc_apicid; |
506 | 515 | ||
507 | intsrc.mpc_irqtype = mp_INT; | 516 | intsrc.mpc_irqtype = mp_INT; |
508 | 517 | ||
509 | /* | 518 | /* |
510 | * If true, we have an ISA/PCI system with no IRQ entries | 519 | * If true, we have an ISA/PCI system with no IRQ entries |
511 | * in the MP table. To prevent the PCI interrupts from being set up | 520 | * in the MP table. To prevent the PCI interrupts from being set up |
512 | * incorrectly, we try to use the ELCR. The sanity check to see if | 521 | * incorrectly, we try to use the ELCR. The sanity check to see if |
513 | * there is good ELCR data is very simple - IRQ0, 1, 2 and 13 can | 522 | * there is good ELCR data is very simple - IRQ0, 1, 2 and 13 can |
514 | * never be level sensitive, so we simply see if the ELCR agrees. | 523 | * never be level sensitive, so we simply see if the ELCR agrees. |
515 | * If it does, we assume it's valid. | 524 | * If it does, we assume it's valid. |
516 | */ | 525 | */ |
517 | if (mpc_default_type == 5) { | 526 | if (mpc_default_type == 5) { |
518 | printk(KERN_INFO "ISA/PCI bus type with no IRQ information... falling back to ELCR\n"); | 527 | printk(KERN_INFO "ISA/PCI bus type with no IRQ information... falling back to ELCR\n"); |
519 | 528 | ||
520 | if (ELCR_trigger(0) || ELCR_trigger(1) || ELCR_trigger(2) || ELCR_trigger(13)) | 529 | if (ELCR_trigger(0) || ELCR_trigger(1) || ELCR_trigger(2) || ELCR_trigger(13)) |
521 | printk(KERN_WARNING "ELCR contains invalid data... not using ELCR\n"); | 530 | printk(KERN_WARNING "ELCR contains invalid data... not using ELCR\n"); |
522 | else { | 531 | else { |
523 | printk(KERN_INFO "Using ELCR to identify PCI interrupts\n"); | 532 | printk(KERN_INFO "Using ELCR to identify PCI interrupts\n"); |
524 | ELCR_fallback = 1; | 533 | ELCR_fallback = 1; |
525 | } | 534 | } |
526 | } | 535 | } |
527 | 536 | ||
528 | for (i = 0; i < 16; i++) { | 537 | for (i = 0; i < 16; i++) { |
529 | switch (mpc_default_type) { | 538 | switch (mpc_default_type) { |
530 | case 2: | 539 | case 2: |
531 | if (i == 0 || i == 13) | 540 | if (i == 0 || i == 13) |
532 | continue; /* IRQ0 & IRQ13 not connected */ | 541 | continue; /* IRQ0 & IRQ13 not connected */ |
533 | /* fall through */ | 542 | /* fall through */ |
534 | default: | 543 | default: |
535 | if (i == 2) | 544 | if (i == 2) |
536 | continue; /* IRQ2 is never connected */ | 545 | continue; /* IRQ2 is never connected */ |
537 | } | 546 | } |
538 | 547 | ||
539 | if (ELCR_fallback) { | 548 | if (ELCR_fallback) { |
540 | /* | 549 | /* |
541 | * If the ELCR indicates a level-sensitive interrupt, we | 550 | * If the ELCR indicates a level-sensitive interrupt, we |
542 | * copy that information over to the MP table in the | 551 | * copy that information over to the MP table in the |
543 | * irqflag field (level sensitive, active high polarity). | 552 | * irqflag field (level sensitive, active high polarity). |
544 | */ | 553 | */ |
545 | if (ELCR_trigger(i)) | 554 | if (ELCR_trigger(i)) |
546 | intsrc.mpc_irqflag = 13; | 555 | intsrc.mpc_irqflag = 13; |
547 | else | 556 | else |
548 | intsrc.mpc_irqflag = 0; | 557 | intsrc.mpc_irqflag = 0; |
549 | } | 558 | } |
550 | 559 | ||
551 | intsrc.mpc_srcbusirq = i; | 560 | intsrc.mpc_srcbusirq = i; |
552 | intsrc.mpc_dstirq = i ? i : 2; /* IRQ0 to INTIN2 */ | 561 | intsrc.mpc_dstirq = i ? i : 2; /* IRQ0 to INTIN2 */ |
553 | MP_intsrc_info(&intsrc); | 562 | MP_intsrc_info(&intsrc); |
554 | } | 563 | } |
555 | 564 | ||
556 | intsrc.mpc_irqtype = mp_ExtINT; | 565 | intsrc.mpc_irqtype = mp_ExtINT; |
557 | intsrc.mpc_srcbusirq = 0; | 566 | intsrc.mpc_srcbusirq = 0; |
558 | intsrc.mpc_dstirq = 0; /* 8259A to INTIN0 */ | 567 | intsrc.mpc_dstirq = 0; /* 8259A to INTIN0 */ |
559 | MP_intsrc_info(&intsrc); | 568 | MP_intsrc_info(&intsrc); |
560 | } | 569 | } |
561 | 570 | ||
562 | static inline void __init construct_default_ISA_mptable(int mpc_default_type) | 571 | static inline void __init construct_default_ISA_mptable(int mpc_default_type) |
563 | { | 572 | { |
564 | struct mpc_config_processor processor; | 573 | struct mpc_config_processor processor; |
565 | struct mpc_config_bus bus; | 574 | struct mpc_config_bus bus; |
566 | struct mpc_config_ioapic ioapic; | 575 | struct mpc_config_ioapic ioapic; |
567 | struct mpc_config_lintsrc lintsrc; | 576 | struct mpc_config_lintsrc lintsrc; |
568 | int linttypes[2] = { mp_ExtINT, mp_NMI }; | 577 | int linttypes[2] = { mp_ExtINT, mp_NMI }; |
569 | int i; | 578 | int i; |
570 | 579 | ||
571 | /* | 580 | /* |
572 | * local APIC has default address | 581 | * local APIC has default address |
573 | */ | 582 | */ |
574 | mp_lapic_addr = APIC_DEFAULT_PHYS_BASE; | 583 | mp_lapic_addr = APIC_DEFAULT_PHYS_BASE; |
575 | 584 | ||
576 | /* | 585 | /* |
577 | * 2 CPUs, numbered 0 & 1. | 586 | * 2 CPUs, numbered 0 & 1. |
578 | */ | 587 | */ |
579 | processor.mpc_type = MP_PROCESSOR; | 588 | processor.mpc_type = MP_PROCESSOR; |
580 | /* Either an integrated APIC or a discrete 82489DX. */ | 589 | /* Either an integrated APIC or a discrete 82489DX. */ |
581 | processor.mpc_apicver = mpc_default_type > 4 ? 0x10 : 0x01; | 590 | processor.mpc_apicver = mpc_default_type > 4 ? 0x10 : 0x01; |
582 | processor.mpc_cpuflag = CPU_ENABLED; | 591 | processor.mpc_cpuflag = CPU_ENABLED; |
583 | processor.mpc_cpufeature = (boot_cpu_data.x86 << 8) | | 592 | processor.mpc_cpufeature = (boot_cpu_data.x86 << 8) | |
584 | (boot_cpu_data.x86_model << 4) | | 593 | (boot_cpu_data.x86_model << 4) | |
585 | boot_cpu_data.x86_mask; | 594 | boot_cpu_data.x86_mask; |
586 | processor.mpc_featureflag = boot_cpu_data.x86_capability[0]; | 595 | processor.mpc_featureflag = boot_cpu_data.x86_capability[0]; |
587 | processor.mpc_reserved[0] = 0; | 596 | processor.mpc_reserved[0] = 0; |
588 | processor.mpc_reserved[1] = 0; | 597 | processor.mpc_reserved[1] = 0; |
589 | for (i = 0; i < 2; i++) { | 598 | for (i = 0; i < 2; i++) { |
590 | processor.mpc_apicid = i; | 599 | processor.mpc_apicid = i; |
591 | MP_processor_info(&processor); | 600 | MP_processor_info(&processor); |
592 | } | 601 | } |
593 | 602 | ||
594 | bus.mpc_type = MP_BUS; | 603 | bus.mpc_type = MP_BUS; |
595 | bus.mpc_busid = 0; | 604 | bus.mpc_busid = 0; |
596 | switch (mpc_default_type) { | 605 | switch (mpc_default_type) { |
597 | default: | 606 | default: |
598 | printk("???\n"); | 607 | printk("???\n"); |
599 | printk(KERN_ERR "Unknown standard configuration %d\n", | 608 | printk(KERN_ERR "Unknown standard configuration %d\n", |
600 | mpc_default_type); | 609 | mpc_default_type); |
601 | /* fall through */ | 610 | /* fall through */ |
602 | case 1: | 611 | case 1: |
603 | case 5: | 612 | case 5: |
604 | memcpy(bus.mpc_bustype, "ISA ", 6); | 613 | memcpy(bus.mpc_bustype, "ISA ", 6); |
605 | break; | 614 | break; |
606 | case 2: | 615 | case 2: |
607 | case 6: | 616 | case 6: |
608 | case 3: | 617 | case 3: |
609 | memcpy(bus.mpc_bustype, "EISA ", 6); | 618 | memcpy(bus.mpc_bustype, "EISA ", 6); |
610 | break; | 619 | break; |
611 | case 4: | 620 | case 4: |
612 | case 7: | 621 | case 7: |
613 | memcpy(bus.mpc_bustype, "MCA ", 6); | 622 | memcpy(bus.mpc_bustype, "MCA ", 6); |
614 | } | 623 | } |
615 | MP_bus_info(&bus); | 624 | MP_bus_info(&bus); |
616 | if (mpc_default_type > 4) { | 625 | if (mpc_default_type > 4) { |
617 | bus.mpc_busid = 1; | 626 | bus.mpc_busid = 1; |
618 | memcpy(bus.mpc_bustype, "PCI ", 6); | 627 | memcpy(bus.mpc_bustype, "PCI ", 6); |
619 | MP_bus_info(&bus); | 628 | MP_bus_info(&bus); |
620 | } | 629 | } |
621 | 630 | ||
622 | ioapic.mpc_type = MP_IOAPIC; | 631 | ioapic.mpc_type = MP_IOAPIC; |
623 | ioapic.mpc_apicid = 2; | 632 | ioapic.mpc_apicid = 2; |
624 | ioapic.mpc_apicver = mpc_default_type > 4 ? 0x10 : 0x01; | 633 | ioapic.mpc_apicver = mpc_default_type > 4 ? 0x10 : 0x01; |
625 | ioapic.mpc_flags = MPC_APIC_USABLE; | 634 | ioapic.mpc_flags = MPC_APIC_USABLE; |
626 | ioapic.mpc_apicaddr = 0xFEC00000; | 635 | ioapic.mpc_apicaddr = 0xFEC00000; |
627 | MP_ioapic_info(&ioapic); | 636 | MP_ioapic_info(&ioapic); |
628 | 637 | ||
629 | /* | 638 | /* |
630 | * We set up most of the low 16 IO-APIC pins according to MPS rules. | 639 | * We set up most of the low 16 IO-APIC pins according to MPS rules. |
631 | */ | 640 | */ |
632 | construct_default_ioirq_mptable(mpc_default_type); | 641 | construct_default_ioirq_mptable(mpc_default_type); |
633 | 642 | ||
634 | lintsrc.mpc_type = MP_LINTSRC; | 643 | lintsrc.mpc_type = MP_LINTSRC; |
635 | lintsrc.mpc_irqflag = 0; /* conforming */ | 644 | lintsrc.mpc_irqflag = 0; /* conforming */ |
636 | lintsrc.mpc_srcbusid = 0; | 645 | lintsrc.mpc_srcbusid = 0; |
637 | lintsrc.mpc_srcbusirq = 0; | 646 | lintsrc.mpc_srcbusirq = 0; |
638 | lintsrc.mpc_destapic = MP_APIC_ALL; | 647 | lintsrc.mpc_destapic = MP_APIC_ALL; |
639 | for (i = 0; i < 2; i++) { | 648 | for (i = 0; i < 2; i++) { |
640 | lintsrc.mpc_irqtype = linttypes[i]; | 649 | lintsrc.mpc_irqtype = linttypes[i]; |
641 | lintsrc.mpc_destapiclint = i; | 650 | lintsrc.mpc_destapiclint = i; |
642 | MP_lintsrc_info(&lintsrc); | 651 | MP_lintsrc_info(&lintsrc); |
643 | } | 652 | } |
644 | } | 653 | } |
645 | 654 | ||
646 | static struct intel_mp_floating *mpf_found; | 655 | static struct intel_mp_floating *mpf_found; |
647 | 656 | ||
648 | /* | 657 | /* |
649 | * Scan the memory blocks for an SMP configuration block. | 658 | * Scan the memory blocks for an SMP configuration block. |
650 | */ | 659 | */ |
651 | void __init get_smp_config (void) | 660 | void __init get_smp_config (void) |
652 | { | 661 | { |
653 | struct intel_mp_floating *mpf = mpf_found; | 662 | struct intel_mp_floating *mpf = mpf_found; |
654 | 663 | ||
655 | /* | 664 | /* |
656 | * ACPI may be used to obtain the entire SMP configuration or just to | 665 | * ACPI may be used to obtain the entire SMP configuration or just to |
657 | * enumerate/configure processors (CONFIG_ACPI_BOOT). Note that | 666 | * enumerate/configure processors (CONFIG_ACPI_BOOT). Note that |
658 | * ACPI supports both logical (e.g. Hyper-Threading) and physical | 667 | * ACPI supports both logical (e.g. Hyper-Threading) and physical |
659 | * processors, where MPS only supports physical. | 668 | * processors, where MPS only supports physical. |
660 | */ | 669 | */ |
661 | if (acpi_lapic && acpi_ioapic) { | 670 | if (acpi_lapic && acpi_ioapic) { |
662 | printk(KERN_INFO "Using ACPI (MADT) for SMP configuration information\n"); | 671 | printk(KERN_INFO "Using ACPI (MADT) for SMP configuration information\n"); |
663 | return; | 672 | return; |
664 | } | 673 | } |
665 | else if (acpi_lapic) | 674 | else if (acpi_lapic) |
666 | printk(KERN_INFO "Using ACPI for processor (LAPIC) configuration information\n"); | 675 | printk(KERN_INFO "Using ACPI for processor (LAPIC) configuration information\n"); |
667 | 676 | ||
668 | printk(KERN_INFO "Intel MultiProcessor Specification v1.%d\n", mpf->mpf_specification); | 677 | printk(KERN_INFO "Intel MultiProcessor Specification v1.%d\n", mpf->mpf_specification); |
669 | if (mpf->mpf_feature2 & (1<<7)) { | 678 | if (mpf->mpf_feature2 & (1<<7)) { |
670 | printk(KERN_INFO " IMCR and PIC compatibility mode.\n"); | 679 | printk(KERN_INFO " IMCR and PIC compatibility mode.\n"); |
671 | pic_mode = 1; | 680 | pic_mode = 1; |
672 | } else { | 681 | } else { |
673 | printk(KERN_INFO " Virtual Wire compatibility mode.\n"); | 682 | printk(KERN_INFO " Virtual Wire compatibility mode.\n"); |
674 | pic_mode = 0; | 683 | pic_mode = 0; |
675 | } | 684 | } |
676 | 685 | ||
677 | /* | 686 | /* |
678 | * Now see if we need to read further. | 687 | * Now see if we need to read further. |
679 | */ | 688 | */ |
680 | if (mpf->mpf_feature1 != 0) { | 689 | if (mpf->mpf_feature1 != 0) { |
681 | 690 | ||
682 | printk(KERN_INFO "Default MP configuration #%d\n", mpf->mpf_feature1); | 691 | printk(KERN_INFO "Default MP configuration #%d\n", mpf->mpf_feature1); |
683 | construct_default_ISA_mptable(mpf->mpf_feature1); | 692 | construct_default_ISA_mptable(mpf->mpf_feature1); |
684 | 693 | ||
685 | } else if (mpf->mpf_physptr) { | 694 | } else if (mpf->mpf_physptr) { |
686 | 695 | ||
687 | /* | 696 | /* |
688 | * Read the physical hardware table. Anything here will | 697 | * Read the physical hardware table. Anything here will |
689 | * override the defaults. | 698 | * override the defaults. |
690 | */ | 699 | */ |
691 | if (!smp_read_mpc((void *)mpf->mpf_physptr)) { | 700 | if (!smp_read_mpc((void *)mpf->mpf_physptr)) { |
692 | smp_found_config = 0; | 701 | smp_found_config = 0; |
693 | printk(KERN_ERR "BIOS bug, MP table errors detected!...\n"); | 702 | printk(KERN_ERR "BIOS bug, MP table errors detected!...\n"); |
694 | printk(KERN_ERR "... disabling SMP support. (tell your hw vendor)\n"); | 703 | printk(KERN_ERR "... disabling SMP support. (tell your hw vendor)\n"); |
695 | return; | 704 | return; |
696 | } | 705 | } |
697 | /* | 706 | /* |
698 | * If there are no explicit MP IRQ entries, then we are | 707 | * If there are no explicit MP IRQ entries, then we are |
699 | * broken. We set up most of the low 16 IO-APIC pins to | 708 | * broken. We set up most of the low 16 IO-APIC pins to |
700 | * ISA defaults and hope it will work. | 709 | * ISA defaults and hope it will work. |
701 | */ | 710 | */ |
702 | if (!mp_irq_entries) { | 711 | if (!mp_irq_entries) { |
703 | struct mpc_config_bus bus; | 712 | struct mpc_config_bus bus; |
704 | 713 | ||
705 | printk(KERN_ERR "BIOS bug, no explicit IRQ entries, using default mptable. (tell your hw vendor)\n"); | 714 | printk(KERN_ERR "BIOS bug, no explicit IRQ entries, using default mptable. (tell your hw vendor)\n"); |
706 | 715 | ||
707 | bus.mpc_type = MP_BUS; | 716 | bus.mpc_type = MP_BUS; |
708 | bus.mpc_busid = 0; | 717 | bus.mpc_busid = 0; |
709 | memcpy(bus.mpc_bustype, "ISA ", 6); | 718 | memcpy(bus.mpc_bustype, "ISA ", 6); |
710 | MP_bus_info(&bus); | 719 | MP_bus_info(&bus); |
711 | 720 | ||
712 | construct_default_ioirq_mptable(0); | 721 | construct_default_ioirq_mptable(0); |
713 | } | 722 | } |
714 | 723 | ||
715 | } else | 724 | } else |
716 | BUG(); | 725 | BUG(); |
717 | 726 | ||
718 | printk(KERN_INFO "Processors: %d\n", num_processors); | 727 | printk(KERN_INFO "Processors: %d\n", num_processors); |
719 | /* | 728 | /* |
720 | * Only use the first configuration found. | 729 | * Only use the first configuration found. |
721 | */ | 730 | */ |
722 | } | 731 | } |
723 | 732 | ||
724 | static int __init smp_scan_config (unsigned long base, unsigned long length) | 733 | static int __init smp_scan_config (unsigned long base, unsigned long length) |
725 | { | 734 | { |
726 | unsigned long *bp = phys_to_virt(base); | 735 | unsigned long *bp = phys_to_virt(base); |
727 | struct intel_mp_floating *mpf; | 736 | struct intel_mp_floating *mpf; |
728 | 737 | ||
729 | Dprintk("Scan SMP from %p for %ld bytes.\n", bp,length); | 738 | Dprintk("Scan SMP from %p for %ld bytes.\n", bp,length); |
730 | if (sizeof(*mpf) != 16) | 739 | if (sizeof(*mpf) != 16) |
731 | printk("Error: MPF size\n"); | 740 | printk("Error: MPF size\n"); |
732 | 741 | ||
733 | while (length > 0) { | 742 | while (length > 0) { |
734 | mpf = (struct intel_mp_floating *)bp; | 743 | mpf = (struct intel_mp_floating *)bp; |
735 | if ((*bp == SMP_MAGIC_IDENT) && | 744 | if ((*bp == SMP_MAGIC_IDENT) && |
736 | (mpf->mpf_length == 1) && | 745 | (mpf->mpf_length == 1) && |
737 | !mpf_checksum((unsigned char *)bp, 16) && | 746 | !mpf_checksum((unsigned char *)bp, 16) && |
738 | ((mpf->mpf_specification == 1) | 747 | ((mpf->mpf_specification == 1) |
739 | || (mpf->mpf_specification == 4)) ) { | 748 | || (mpf->mpf_specification == 4)) ) { |
740 | 749 | ||
741 | smp_found_config = 1; | 750 | smp_found_config = 1; |
742 | printk(KERN_INFO "found SMP MP-table at %08lx\n", | 751 | printk(KERN_INFO "found SMP MP-table at %08lx\n", |
743 | virt_to_phys(mpf)); | 752 | virt_to_phys(mpf)); |
744 | reserve_bootmem(virt_to_phys(mpf), PAGE_SIZE); | 753 | reserve_bootmem(virt_to_phys(mpf), PAGE_SIZE); |
745 | if (mpf->mpf_physptr) { | 754 | if (mpf->mpf_physptr) { |
746 | /* | 755 | /* |
747 | * We cannot access to MPC table to compute | 756 | * We cannot access to MPC table to compute |
748 | * table size yet, as only few megabytes from | 757 | * table size yet, as only few megabytes from |
749 | * the bottom is mapped now. | 758 | * the bottom is mapped now. |
750 | * PC-9800's MPC table places on the very last | 759 | * PC-9800's MPC table places on the very last |
751 | * of physical memory; so that simply reserving | 760 | * of physical memory; so that simply reserving |
752 | * PAGE_SIZE from mpg->mpf_physptr yields BUG() | 761 | * PAGE_SIZE from mpg->mpf_physptr yields BUG() |
753 | * in reserve_bootmem. | 762 | * in reserve_bootmem. |
754 | */ | 763 | */ |
755 | unsigned long size = PAGE_SIZE; | 764 | unsigned long size = PAGE_SIZE; |
756 | unsigned long end = max_low_pfn * PAGE_SIZE; | 765 | unsigned long end = max_low_pfn * PAGE_SIZE; |
757 | if (mpf->mpf_physptr + size > end) | 766 | if (mpf->mpf_physptr + size > end) |
758 | size = end - mpf->mpf_physptr; | 767 | size = end - mpf->mpf_physptr; |
759 | reserve_bootmem(mpf->mpf_physptr, size); | 768 | reserve_bootmem(mpf->mpf_physptr, size); |
760 | } | 769 | } |
761 | 770 | ||
762 | mpf_found = mpf; | 771 | mpf_found = mpf; |
763 | return 1; | 772 | return 1; |
764 | } | 773 | } |
765 | bp += 4; | 774 | bp += 4; |
766 | length -= 16; | 775 | length -= 16; |
767 | } | 776 | } |
768 | return 0; | 777 | return 0; |
769 | } | 778 | } |
770 | 779 | ||
771 | void __init find_smp_config (void) | 780 | void __init find_smp_config (void) |
772 | { | 781 | { |
773 | unsigned int address; | 782 | unsigned int address; |
774 | 783 | ||
775 | /* | 784 | /* |
776 | * FIXME: Linux assumes you have 640K of base ram.. | 785 | * FIXME: Linux assumes you have 640K of base ram.. |
777 | * this continues the error... | 786 | * this continues the error... |
778 | * | 787 | * |
779 | * 1) Scan the bottom 1K for a signature | 788 | * 1) Scan the bottom 1K for a signature |
780 | * 2) Scan the top 1K of base RAM | 789 | * 2) Scan the top 1K of base RAM |
781 | * 3) Scan the 64K of bios | 790 | * 3) Scan the 64K of bios |
782 | */ | 791 | */ |
783 | if (smp_scan_config(0x0,0x400) || | 792 | if (smp_scan_config(0x0,0x400) || |
784 | smp_scan_config(639*0x400,0x400) || | 793 | smp_scan_config(639*0x400,0x400) || |
785 | smp_scan_config(0xF0000,0x10000)) | 794 | smp_scan_config(0xF0000,0x10000)) |
786 | return; | 795 | return; |
787 | /* | 796 | /* |
788 | * If it is an SMP machine we should know now, unless the | 797 | * If it is an SMP machine we should know now, unless the |
789 | * configuration is in an EISA/MCA bus machine with an | 798 | * configuration is in an EISA/MCA bus machine with an |
790 | * extended bios data area. | 799 | * extended bios data area. |
791 | * | 800 | * |
792 | * there is a real-mode segmented pointer pointing to the | 801 | * there is a real-mode segmented pointer pointing to the |
793 | * 4K EBDA area at 0x40E, calculate and scan it here. | 802 | * 4K EBDA area at 0x40E, calculate and scan it here. |
794 | * | 803 | * |
795 | * NOTE! There are Linux loaders that will corrupt the EBDA | 804 | * NOTE! There are Linux loaders that will corrupt the EBDA |
796 | * area, and as such this kind of SMP config may be less | 805 | * area, and as such this kind of SMP config may be less |
797 | * trustworthy, simply because the SMP table may have been | 806 | * trustworthy, simply because the SMP table may have been |
798 | * stomped on during early boot. These loaders are buggy and | 807 | * stomped on during early boot. These loaders are buggy and |
799 | * should be fixed. | 808 | * should be fixed. |
800 | * | 809 | * |
801 | * MP1.4 SPEC states to only scan first 1K of 4K EBDA. | 810 | * MP1.4 SPEC states to only scan first 1K of 4K EBDA. |
802 | */ | 811 | */ |
803 | 812 | ||
804 | address = get_bios_ebda(); | 813 | address = get_bios_ebda(); |
805 | if (address) | 814 | if (address) |
806 | smp_scan_config(address, 0x400); | 815 | smp_scan_config(address, 0x400); |
807 | } | 816 | } |
808 | 817 | ||
809 | /* -------------------------------------------------------------------------- | 818 | /* -------------------------------------------------------------------------- |
810 | ACPI-based MP Configuration | 819 | ACPI-based MP Configuration |
811 | -------------------------------------------------------------------------- */ | 820 | -------------------------------------------------------------------------- */ |
812 | 821 | ||
813 | #ifdef CONFIG_ACPI_BOOT | 822 | #ifdef CONFIG_ACPI_BOOT |
814 | 823 | ||
815 | void __init mp_register_lapic_address ( | 824 | void __init mp_register_lapic_address ( |
816 | u64 address) | 825 | u64 address) |
817 | { | 826 | { |
818 | mp_lapic_addr = (unsigned long) address; | 827 | mp_lapic_addr = (unsigned long) address; |
819 | 828 | ||
820 | set_fixmap_nocache(FIX_APIC_BASE, mp_lapic_addr); | 829 | set_fixmap_nocache(FIX_APIC_BASE, mp_lapic_addr); |
821 | 830 | ||
822 | if (boot_cpu_physical_apicid == -1U) | 831 | if (boot_cpu_physical_apicid == -1U) |
823 | boot_cpu_physical_apicid = GET_APIC_ID(apic_read(APIC_ID)); | 832 | boot_cpu_physical_apicid = GET_APIC_ID(apic_read(APIC_ID)); |
824 | 833 | ||
825 | Dprintk("Boot CPU = %d\n", boot_cpu_physical_apicid); | 834 | Dprintk("Boot CPU = %d\n", boot_cpu_physical_apicid); |
826 | } | 835 | } |
827 | 836 | ||
828 | 837 | ||
829 | void __init mp_register_lapic ( | 838 | void __init mp_register_lapic ( |
830 | u8 id, | 839 | u8 id, |
831 | u8 enabled) | 840 | u8 enabled) |
832 | { | 841 | { |
833 | struct mpc_config_processor processor; | 842 | struct mpc_config_processor processor; |
834 | int boot_cpu = 0; | 843 | int boot_cpu = 0; |
835 | 844 | ||
836 | if (MAX_APICS - id <= 0) { | 845 | if (MAX_APICS - id <= 0) { |
837 | printk(KERN_WARNING "Processor #%d invalid (max %d)\n", | 846 | printk(KERN_WARNING "Processor #%d invalid (max %d)\n", |
838 | id, MAX_APICS); | 847 | id, MAX_APICS); |
839 | return; | 848 | return; |
840 | } | 849 | } |
841 | 850 | ||
842 | if (id == boot_cpu_physical_apicid) | 851 | if (id == boot_cpu_physical_apicid) |
843 | boot_cpu = 1; | 852 | boot_cpu = 1; |
844 | 853 | ||
845 | processor.mpc_type = MP_PROCESSOR; | 854 | processor.mpc_type = MP_PROCESSOR; |
846 | processor.mpc_apicid = id; | 855 | processor.mpc_apicid = id; |
847 | processor.mpc_apicver = GET_APIC_VERSION(apic_read(APIC_LVR)); | 856 | processor.mpc_apicver = GET_APIC_VERSION(apic_read(APIC_LVR)); |
848 | processor.mpc_cpuflag = (enabled ? CPU_ENABLED : 0); | 857 | processor.mpc_cpuflag = (enabled ? CPU_ENABLED : 0); |
849 | processor.mpc_cpuflag |= (boot_cpu ? CPU_BOOTPROCESSOR : 0); | 858 | processor.mpc_cpuflag |= (boot_cpu ? CPU_BOOTPROCESSOR : 0); |
850 | processor.mpc_cpufeature = (boot_cpu_data.x86 << 8) | | 859 | processor.mpc_cpufeature = (boot_cpu_data.x86 << 8) | |
851 | (boot_cpu_data.x86_model << 4) | boot_cpu_data.x86_mask; | 860 | (boot_cpu_data.x86_model << 4) | boot_cpu_data.x86_mask; |
852 | processor.mpc_featureflag = boot_cpu_data.x86_capability[0]; | 861 | processor.mpc_featureflag = boot_cpu_data.x86_capability[0]; |
853 | processor.mpc_reserved[0] = 0; | 862 | processor.mpc_reserved[0] = 0; |
854 | processor.mpc_reserved[1] = 0; | 863 | processor.mpc_reserved[1] = 0; |
855 | 864 | ||
856 | MP_processor_info(&processor); | 865 | MP_processor_info(&processor); |
857 | } | 866 | } |
858 | 867 | ||
859 | #if defined(CONFIG_X86_IO_APIC) && (defined(CONFIG_ACPI_INTERPRETER) || defined(CONFIG_ACPI_BOOT)) | 868 | #if defined(CONFIG_X86_IO_APIC) && (defined(CONFIG_ACPI_INTERPRETER) || defined(CONFIG_ACPI_BOOT)) |
860 | 869 | ||
861 | #define MP_ISA_BUS 0 | 870 | #define MP_ISA_BUS 0 |
862 | #define MP_MAX_IOAPIC_PIN 127 | 871 | #define MP_MAX_IOAPIC_PIN 127 |
863 | 872 | ||
864 | static struct mp_ioapic_routing { | 873 | static struct mp_ioapic_routing { |
865 | int apic_id; | 874 | int apic_id; |
866 | int gsi_base; | 875 | int gsi_base; |
867 | int gsi_end; | 876 | int gsi_end; |
868 | u32 pin_programmed[4]; | 877 | u32 pin_programmed[4]; |
869 | } mp_ioapic_routing[MAX_IO_APICS]; | 878 | } mp_ioapic_routing[MAX_IO_APICS]; |
870 | 879 | ||
871 | 880 | ||
872 | static int mp_find_ioapic ( | 881 | static int mp_find_ioapic ( |
873 | int gsi) | 882 | int gsi) |
874 | { | 883 | { |
875 | int i = 0; | 884 | int i = 0; |
876 | 885 | ||
877 | /* Find the IOAPIC that manages this GSI. */ | 886 | /* Find the IOAPIC that manages this GSI. */ |
878 | for (i = 0; i < nr_ioapics; i++) { | 887 | for (i = 0; i < nr_ioapics; i++) { |
879 | if ((gsi >= mp_ioapic_routing[i].gsi_base) | 888 | if ((gsi >= mp_ioapic_routing[i].gsi_base) |
880 | && (gsi <= mp_ioapic_routing[i].gsi_end)) | 889 | && (gsi <= mp_ioapic_routing[i].gsi_end)) |
881 | return i; | 890 | return i; |
882 | } | 891 | } |
883 | 892 | ||
884 | printk(KERN_ERR "ERROR: Unable to locate IOAPIC for GSI %d\n", gsi); | 893 | printk(KERN_ERR "ERROR: Unable to locate IOAPIC for GSI %d\n", gsi); |
885 | 894 | ||
886 | return -1; | 895 | return -1; |
887 | } | 896 | } |
888 | 897 | ||
889 | 898 | ||
890 | void __init mp_register_ioapic ( | 899 | void __init mp_register_ioapic ( |
891 | u8 id, | 900 | u8 id, |
892 | u32 address, | 901 | u32 address, |
893 | u32 gsi_base) | 902 | u32 gsi_base) |
894 | { | 903 | { |
895 | int idx = 0; | 904 | int idx = 0; |
896 | 905 | ||
897 | if (nr_ioapics >= MAX_IO_APICS) { | 906 | if (nr_ioapics >= MAX_IO_APICS) { |
898 | printk(KERN_ERR "ERROR: Max # of I/O APICs (%d) exceeded " | 907 | printk(KERN_ERR "ERROR: Max # of I/O APICs (%d) exceeded " |
899 | "(found %d)\n", MAX_IO_APICS, nr_ioapics); | 908 | "(found %d)\n", MAX_IO_APICS, nr_ioapics); |
900 | panic("Recompile kernel with bigger MAX_IO_APICS!\n"); | 909 | panic("Recompile kernel with bigger MAX_IO_APICS!\n"); |
901 | } | 910 | } |
902 | if (!address) { | 911 | if (!address) { |
903 | printk(KERN_ERR "WARNING: Bogus (zero) I/O APIC address" | 912 | printk(KERN_ERR "WARNING: Bogus (zero) I/O APIC address" |
904 | " found in MADT table, skipping!\n"); | 913 | " found in MADT table, skipping!\n"); |
905 | return; | 914 | return; |
906 | } | 915 | } |
907 | 916 | ||
908 | idx = nr_ioapics++; | 917 | idx = nr_ioapics++; |
909 | 918 | ||
910 | mp_ioapics[idx].mpc_type = MP_IOAPIC; | 919 | mp_ioapics[idx].mpc_type = MP_IOAPIC; |
911 | mp_ioapics[idx].mpc_flags = MPC_APIC_USABLE; | 920 | mp_ioapics[idx].mpc_flags = MPC_APIC_USABLE; |
912 | mp_ioapics[idx].mpc_apicaddr = address; | 921 | mp_ioapics[idx].mpc_apicaddr = address; |
913 | 922 | ||
914 | set_fixmap_nocache(FIX_IO_APIC_BASE_0 + idx, address); | 923 | set_fixmap_nocache(FIX_IO_APIC_BASE_0 + idx, address); |
915 | if ((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) && (boot_cpu_data.x86 < 15)) | 924 | if ((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) && (boot_cpu_data.x86 < 15)) |
916 | mp_ioapics[idx].mpc_apicid = io_apic_get_unique_id(idx, id); | 925 | mp_ioapics[idx].mpc_apicid = io_apic_get_unique_id(idx, id); |
917 | else | 926 | else |
918 | mp_ioapics[idx].mpc_apicid = id; | 927 | mp_ioapics[idx].mpc_apicid = id; |
919 | mp_ioapics[idx].mpc_apicver = io_apic_get_version(idx); | 928 | mp_ioapics[idx].mpc_apicver = io_apic_get_version(idx); |
920 | 929 | ||
921 | /* | 930 | /* |
922 | * Build basic GSI lookup table to facilitate gsi->io_apic lookups | 931 | * Build basic GSI lookup table to facilitate gsi->io_apic lookups |
923 | * and to prevent reprogramming of IOAPIC pins (PCI GSIs). | 932 | * and to prevent reprogramming of IOAPIC pins (PCI GSIs). |
924 | */ | 933 | */ |
925 | mp_ioapic_routing[idx].apic_id = mp_ioapics[idx].mpc_apicid; | 934 | mp_ioapic_routing[idx].apic_id = mp_ioapics[idx].mpc_apicid; |
926 | mp_ioapic_routing[idx].gsi_base = gsi_base; | 935 | mp_ioapic_routing[idx].gsi_base = gsi_base; |
927 | mp_ioapic_routing[idx].gsi_end = gsi_base + | 936 | mp_ioapic_routing[idx].gsi_end = gsi_base + |
928 | io_apic_get_redir_entries(idx); | 937 | io_apic_get_redir_entries(idx); |
929 | 938 | ||
930 | printk("IOAPIC[%d]: apic_id %d, version %d, address 0x%lx, " | 939 | printk("IOAPIC[%d]: apic_id %d, version %d, address 0x%lx, " |
931 | "GSI %d-%d\n", idx, mp_ioapics[idx].mpc_apicid, | 940 | "GSI %d-%d\n", idx, mp_ioapics[idx].mpc_apicid, |
932 | mp_ioapics[idx].mpc_apicver, mp_ioapics[idx].mpc_apicaddr, | 941 | mp_ioapics[idx].mpc_apicver, mp_ioapics[idx].mpc_apicaddr, |
933 | mp_ioapic_routing[idx].gsi_base, | 942 | mp_ioapic_routing[idx].gsi_base, |
934 | mp_ioapic_routing[idx].gsi_end); | 943 | mp_ioapic_routing[idx].gsi_end); |
935 | 944 | ||
936 | return; | 945 | return; |
937 | } | 946 | } |
938 | 947 | ||
939 | 948 | ||
940 | void __init mp_override_legacy_irq ( | 949 | void __init mp_override_legacy_irq ( |
941 | u8 bus_irq, | 950 | u8 bus_irq, |
942 | u8 polarity, | 951 | u8 polarity, |
943 | u8 trigger, | 952 | u8 trigger, |
944 | u32 gsi) | 953 | u32 gsi) |
945 | { | 954 | { |
946 | struct mpc_config_intsrc intsrc; | 955 | struct mpc_config_intsrc intsrc; |
947 | int ioapic = -1; | 956 | int ioapic = -1; |
948 | int pin = -1; | 957 | int pin = -1; |
949 | 958 | ||
950 | /* | 959 | /* |
951 | * Convert 'gsi' to 'ioapic.pin'. | 960 | * Convert 'gsi' to 'ioapic.pin'. |
952 | */ | 961 | */ |
953 | ioapic = mp_find_ioapic(gsi); | 962 | ioapic = mp_find_ioapic(gsi); |
954 | if (ioapic < 0) | 963 | if (ioapic < 0) |
955 | return; | 964 | return; |
956 | pin = gsi - mp_ioapic_routing[ioapic].gsi_base; | 965 | pin = gsi - mp_ioapic_routing[ioapic].gsi_base; |
957 | 966 | ||
958 | /* | 967 | /* |
959 | * TBD: This check is for faulty timer entries, where the override | 968 | * TBD: This check is for faulty timer entries, where the override |
960 | * erroneously sets the trigger to level, resulting in a HUGE | 969 | * erroneously sets the trigger to level, resulting in a HUGE |
961 | * increase of timer interrupts! | 970 | * increase of timer interrupts! |
962 | */ | 971 | */ |
963 | if ((bus_irq == 0) && (trigger == 3)) | 972 | if ((bus_irq == 0) && (trigger == 3)) |
964 | trigger = 1; | 973 | trigger = 1; |
965 | 974 | ||
966 | intsrc.mpc_type = MP_INTSRC; | 975 | intsrc.mpc_type = MP_INTSRC; |
967 | intsrc.mpc_irqtype = mp_INT; | 976 | intsrc.mpc_irqtype = mp_INT; |
968 | intsrc.mpc_irqflag = (trigger << 2) | polarity; | 977 | intsrc.mpc_irqflag = (trigger << 2) | polarity; |
969 | intsrc.mpc_srcbus = MP_ISA_BUS; | 978 | intsrc.mpc_srcbus = MP_ISA_BUS; |
970 | intsrc.mpc_srcbusirq = bus_irq; /* IRQ */ | 979 | intsrc.mpc_srcbusirq = bus_irq; /* IRQ */ |
971 | intsrc.mpc_dstapic = mp_ioapics[ioapic].mpc_apicid; /* APIC ID */ | 980 | intsrc.mpc_dstapic = mp_ioapics[ioapic].mpc_apicid; /* APIC ID */ |
972 | intsrc.mpc_dstirq = pin; /* INTIN# */ | 981 | intsrc.mpc_dstirq = pin; /* INTIN# */ |
973 | 982 | ||
974 | Dprintk("Int: type %d, pol %d, trig %d, bus %d, irq %d, %d-%d\n", | 983 | Dprintk("Int: type %d, pol %d, trig %d, bus %d, irq %d, %d-%d\n", |
975 | intsrc.mpc_irqtype, intsrc.mpc_irqflag & 3, | 984 | intsrc.mpc_irqtype, intsrc.mpc_irqflag & 3, |
976 | (intsrc.mpc_irqflag >> 2) & 3, intsrc.mpc_srcbus, | 985 | (intsrc.mpc_irqflag >> 2) & 3, intsrc.mpc_srcbus, |
977 | intsrc.mpc_srcbusirq, intsrc.mpc_dstapic, intsrc.mpc_dstirq); | 986 | intsrc.mpc_srcbusirq, intsrc.mpc_dstapic, intsrc.mpc_dstirq); |
978 | 987 | ||
979 | mp_irqs[mp_irq_entries] = intsrc; | 988 | mp_irqs[mp_irq_entries] = intsrc; |
980 | if (++mp_irq_entries == MAX_IRQ_SOURCES) | 989 | if (++mp_irq_entries == MAX_IRQ_SOURCES) |
981 | panic("Max # of irq sources exceeded!\n"); | 990 | panic("Max # of irq sources exceeded!\n"); |
982 | 991 | ||
983 | return; | 992 | return; |
984 | } | 993 | } |
985 | 994 | ||
986 | int es7000_plat; | 995 | int es7000_plat; |
987 | 996 | ||
988 | void __init mp_config_acpi_legacy_irqs (void) | 997 | void __init mp_config_acpi_legacy_irqs (void) |
989 | { | 998 | { |
990 | struct mpc_config_intsrc intsrc; | 999 | struct mpc_config_intsrc intsrc; |
991 | int i = 0; | 1000 | int i = 0; |
992 | int ioapic = -1; | 1001 | int ioapic = -1; |
993 | 1002 | ||
994 | /* | 1003 | /* |
995 | * Fabricate the legacy ISA bus (bus #31). | 1004 | * Fabricate the legacy ISA bus (bus #31). |
996 | */ | 1005 | */ |
997 | mp_bus_id_to_type[MP_ISA_BUS] = MP_BUS_ISA; | 1006 | mp_bus_id_to_type[MP_ISA_BUS] = MP_BUS_ISA; |
998 | Dprintk("Bus #%d is ISA\n", MP_ISA_BUS); | 1007 | Dprintk("Bus #%d is ISA\n", MP_ISA_BUS); |
999 | 1008 | ||
1000 | /* | 1009 | /* |
1001 | * Older generations of ES7000 have no legacy identity mappings | 1010 | * Older generations of ES7000 have no legacy identity mappings |
1002 | */ | 1011 | */ |
1003 | if (es7000_plat == 1) | 1012 | if (es7000_plat == 1) |
1004 | return; | 1013 | return; |
1005 | 1014 | ||
1006 | /* | 1015 | /* |
1007 | * Locate the IOAPIC that manages the ISA IRQs (0-15). | 1016 | * Locate the IOAPIC that manages the ISA IRQs (0-15). |
1008 | */ | 1017 | */ |
1009 | ioapic = mp_find_ioapic(0); | 1018 | ioapic = mp_find_ioapic(0); |
1010 | if (ioapic < 0) | 1019 | if (ioapic < 0) |
1011 | return; | 1020 | return; |
1012 | 1021 | ||
1013 | intsrc.mpc_type = MP_INTSRC; | 1022 | intsrc.mpc_type = MP_INTSRC; |
1014 | intsrc.mpc_irqflag = 0; /* Conforming */ | 1023 | intsrc.mpc_irqflag = 0; /* Conforming */ |
1015 | intsrc.mpc_srcbus = MP_ISA_BUS; | 1024 | intsrc.mpc_srcbus = MP_ISA_BUS; |
1016 | intsrc.mpc_dstapic = mp_ioapics[ioapic].mpc_apicid; | 1025 | intsrc.mpc_dstapic = mp_ioapics[ioapic].mpc_apicid; |
1017 | 1026 | ||
1018 | /* | 1027 | /* |
1019 | * Use the default configuration for the IRQs 0-15. Unless | 1028 | * Use the default configuration for the IRQs 0-15. Unless |
1020 | * overriden by (MADT) interrupt source override entries. | 1029 | * overriden by (MADT) interrupt source override entries. |
1021 | */ | 1030 | */ |
1022 | for (i = 0; i < 16; i++) { | 1031 | for (i = 0; i < 16; i++) { |
1023 | int idx; | 1032 | int idx; |
1024 | 1033 | ||
1025 | for (idx = 0; idx < mp_irq_entries; idx++) { | 1034 | for (idx = 0; idx < mp_irq_entries; idx++) { |
1026 | struct mpc_config_intsrc *irq = mp_irqs + idx; | 1035 | struct mpc_config_intsrc *irq = mp_irqs + idx; |
1027 | 1036 | ||
1028 | /* Do we already have a mapping for this ISA IRQ? */ | 1037 | /* Do we already have a mapping for this ISA IRQ? */ |
1029 | if (irq->mpc_srcbus == MP_ISA_BUS && irq->mpc_srcbusirq == i) | 1038 | if (irq->mpc_srcbus == MP_ISA_BUS && irq->mpc_srcbusirq == i) |
1030 | break; | 1039 | break; |
1031 | 1040 | ||
1032 | /* Do we already have a mapping for this IOAPIC pin */ | 1041 | /* Do we already have a mapping for this IOAPIC pin */ |
1033 | if ((irq->mpc_dstapic == intsrc.mpc_dstapic) && | 1042 | if ((irq->mpc_dstapic == intsrc.mpc_dstapic) && |
1034 | (irq->mpc_dstirq == i)) | 1043 | (irq->mpc_dstirq == i)) |
1035 | break; | 1044 | break; |
1036 | } | 1045 | } |
1037 | 1046 | ||
1038 | if (idx != mp_irq_entries) { | 1047 | if (idx != mp_irq_entries) { |
1039 | printk(KERN_DEBUG "ACPI: IRQ%d used by override.\n", i); | 1048 | printk(KERN_DEBUG "ACPI: IRQ%d used by override.\n", i); |
1040 | continue; /* IRQ already used */ | 1049 | continue; /* IRQ already used */ |
1041 | } | 1050 | } |
1042 | 1051 | ||
1043 | intsrc.mpc_irqtype = mp_INT; | 1052 | intsrc.mpc_irqtype = mp_INT; |
1044 | intsrc.mpc_srcbusirq = i; /* Identity mapped */ | 1053 | intsrc.mpc_srcbusirq = i; /* Identity mapped */ |
1045 | intsrc.mpc_dstirq = i; | 1054 | intsrc.mpc_dstirq = i; |
1046 | 1055 | ||
1047 | Dprintk("Int: type %d, pol %d, trig %d, bus %d, irq %d, " | 1056 | Dprintk("Int: type %d, pol %d, trig %d, bus %d, irq %d, " |
1048 | "%d-%d\n", intsrc.mpc_irqtype, intsrc.mpc_irqflag & 3, | 1057 | "%d-%d\n", intsrc.mpc_irqtype, intsrc.mpc_irqflag & 3, |
1049 | (intsrc.mpc_irqflag >> 2) & 3, intsrc.mpc_srcbus, | 1058 | (intsrc.mpc_irqflag >> 2) & 3, intsrc.mpc_srcbus, |
1050 | intsrc.mpc_srcbusirq, intsrc.mpc_dstapic, | 1059 | intsrc.mpc_srcbusirq, intsrc.mpc_dstapic, |
1051 | intsrc.mpc_dstirq); | 1060 | intsrc.mpc_dstirq); |
1052 | 1061 | ||
1053 | mp_irqs[mp_irq_entries] = intsrc; | 1062 | mp_irqs[mp_irq_entries] = intsrc; |
1054 | if (++mp_irq_entries == MAX_IRQ_SOURCES) | 1063 | if (++mp_irq_entries == MAX_IRQ_SOURCES) |
1055 | panic("Max # of irq sources exceeded!\n"); | 1064 | panic("Max # of irq sources exceeded!\n"); |
1056 | } | 1065 | } |
1057 | } | 1066 | } |
1058 | 1067 | ||
1059 | #define MAX_GSI_NUM 4096 | 1068 | #define MAX_GSI_NUM 4096 |
1060 | 1069 | ||
1061 | int mp_register_gsi (u32 gsi, int edge_level, int active_high_low) | 1070 | int mp_register_gsi (u32 gsi, int edge_level, int active_high_low) |
1062 | { | 1071 | { |
1063 | int ioapic = -1; | 1072 | int ioapic = -1; |
1064 | int ioapic_pin = 0; | 1073 | int ioapic_pin = 0; |
1065 | int idx, bit = 0; | 1074 | int idx, bit = 0; |
1066 | static int pci_irq = 16; | 1075 | static int pci_irq = 16; |
1067 | /* | 1076 | /* |
1068 | * Mapping between Global System Interrups, which | 1077 | * Mapping between Global System Interrups, which |
1069 | * represent all possible interrupts, and IRQs | 1078 | * represent all possible interrupts, and IRQs |
1070 | * assigned to actual devices. | 1079 | * assigned to actual devices. |
1071 | */ | 1080 | */ |
1072 | static int gsi_to_irq[MAX_GSI_NUM]; | 1081 | static int gsi_to_irq[MAX_GSI_NUM]; |
1073 | 1082 | ||
1074 | #ifdef CONFIG_ACPI_BUS | 1083 | #ifdef CONFIG_ACPI_BUS |
1075 | /* Don't set up the ACPI SCI because it's already set up */ | 1084 | /* Don't set up the ACPI SCI because it's already set up */ |
1076 | if (acpi_fadt.sci_int == gsi) | 1085 | if (acpi_fadt.sci_int == gsi) |
1077 | return gsi; | 1086 | return gsi; |
1078 | #endif | 1087 | #endif |
1079 | 1088 | ||
1080 | ioapic = mp_find_ioapic(gsi); | 1089 | ioapic = mp_find_ioapic(gsi); |
1081 | if (ioapic < 0) { | 1090 | if (ioapic < 0) { |
1082 | printk(KERN_WARNING "No IOAPIC for GSI %u\n", gsi); | 1091 | printk(KERN_WARNING "No IOAPIC for GSI %u\n", gsi); |
1083 | return gsi; | 1092 | return gsi; |
1084 | } | 1093 | } |
1085 | 1094 | ||
1086 | ioapic_pin = gsi - mp_ioapic_routing[ioapic].gsi_base; | 1095 | ioapic_pin = gsi - mp_ioapic_routing[ioapic].gsi_base; |
1087 | 1096 | ||
1088 | if (ioapic_renumber_irq) | 1097 | if (ioapic_renumber_irq) |
1089 | gsi = ioapic_renumber_irq(ioapic, gsi); | 1098 | gsi = ioapic_renumber_irq(ioapic, gsi); |
1090 | 1099 | ||
1091 | /* | 1100 | /* |
1092 | * Avoid pin reprogramming. PRTs typically include entries | 1101 | * Avoid pin reprogramming. PRTs typically include entries |
1093 | * with redundant pin->gsi mappings (but unique PCI devices); | 1102 | * with redundant pin->gsi mappings (but unique PCI devices); |
1094 | * we only program the IOAPIC on the first. | 1103 | * we only program the IOAPIC on the first. |
1095 | */ | 1104 | */ |
1096 | bit = ioapic_pin % 32; | 1105 | bit = ioapic_pin % 32; |
1097 | idx = (ioapic_pin < 32) ? 0 : (ioapic_pin / 32); | 1106 | idx = (ioapic_pin < 32) ? 0 : (ioapic_pin / 32); |
1098 | if (idx > 3) { | 1107 | if (idx > 3) { |
1099 | printk(KERN_ERR "Invalid reference to IOAPIC pin " | 1108 | printk(KERN_ERR "Invalid reference to IOAPIC pin " |
1100 | "%d-%d\n", mp_ioapic_routing[ioapic].apic_id, | 1109 | "%d-%d\n", mp_ioapic_routing[ioapic].apic_id, |
1101 | ioapic_pin); | 1110 | ioapic_pin); |
1102 | return gsi; | 1111 | return gsi; |
1103 | } | 1112 | } |
1104 | if ((1<<bit) & mp_ioapic_routing[ioapic].pin_programmed[idx]) { | 1113 | if ((1<<bit) & mp_ioapic_routing[ioapic].pin_programmed[idx]) { |
1105 | Dprintk(KERN_DEBUG "Pin %d-%d already programmed\n", | 1114 | Dprintk(KERN_DEBUG "Pin %d-%d already programmed\n", |
1106 | mp_ioapic_routing[ioapic].apic_id, ioapic_pin); | 1115 | mp_ioapic_routing[ioapic].apic_id, ioapic_pin); |
1107 | return gsi_to_irq[gsi]; | 1116 | return gsi_to_irq[gsi]; |
1108 | } | 1117 | } |
1109 | 1118 | ||
1110 | mp_ioapic_routing[ioapic].pin_programmed[idx] |= (1<<bit); | 1119 | mp_ioapic_routing[ioapic].pin_programmed[idx] |= (1<<bit); |
1111 | 1120 | ||
1112 | if (edge_level) { | 1121 | if (edge_level) { |
1113 | /* | 1122 | /* |
1114 | * For PCI devices assign IRQs in order, avoiding gaps | 1123 | * For PCI devices assign IRQs in order, avoiding gaps |
1115 | * due to unused I/O APIC pins. | 1124 | * due to unused I/O APIC pins. |
1116 | */ | 1125 | */ |
1117 | int irq = gsi; | 1126 | int irq = gsi; |
1118 | if (gsi < MAX_GSI_NUM) { | 1127 | if (gsi < MAX_GSI_NUM) { |
1119 | if (gsi > 15) | 1128 | if (gsi > 15) |
1120 | gsi = pci_irq++; | 1129 | gsi = pci_irq++; |
1121 | #ifdef CONFIG_ACPI_BUS | 1130 | #ifdef CONFIG_ACPI_BUS |
1122 | /* | 1131 | /* |
1123 | * Don't assign IRQ used by ACPI SCI | 1132 | * Don't assign IRQ used by ACPI SCI |
1124 | */ | 1133 | */ |
1125 | if (gsi == acpi_fadt.sci_int) | 1134 | if (gsi == acpi_fadt.sci_int) |
1126 | gsi = pci_irq++; | 1135 | gsi = pci_irq++; |
1127 | #endif | 1136 | #endif |
1128 | gsi_to_irq[irq] = gsi; | 1137 | gsi_to_irq[irq] = gsi; |
1129 | } else { | 1138 | } else { |
1130 | printk(KERN_ERR "GSI %u is too high\n", gsi); | 1139 | printk(KERN_ERR "GSI %u is too high\n", gsi); |
1131 | return gsi; | 1140 | return gsi; |
1132 | } | 1141 | } |
1133 | } | 1142 | } |
1134 | 1143 | ||
1135 | io_apic_set_pci_routing(ioapic, ioapic_pin, gsi, | 1144 | io_apic_set_pci_routing(ioapic, ioapic_pin, gsi, |
1136 | edge_level == ACPI_EDGE_SENSITIVE ? 0 : 1, | 1145 | edge_level == ACPI_EDGE_SENSITIVE ? 0 : 1, |
1137 | active_high_low == ACPI_ACTIVE_HIGH ? 0 : 1); | 1146 | active_high_low == ACPI_ACTIVE_HIGH ? 0 : 1); |
1138 | return gsi; | 1147 | return gsi; |
1139 | } | 1148 | } |
1140 | 1149 | ||
1141 | #endif /*CONFIG_X86_IO_APIC && (CONFIG_ACPI_INTERPRETER || CONFIG_ACPI_BOOT)*/ | 1150 | #endif /*CONFIG_X86_IO_APIC && (CONFIG_ACPI_INTERPRETER || CONFIG_ACPI_BOOT)*/ |
1142 | #endif /*CONFIG_ACPI_BOOT*/ | 1151 | #endif /*CONFIG_ACPI_BOOT*/ |
1143 | 1152 |
arch/i386/kernel/setup.c
1 | /* | 1 | /* |
2 | * linux/arch/i386/kernel/setup.c | 2 | * linux/arch/i386/kernel/setup.c |
3 | * | 3 | * |
4 | * Copyright (C) 1995 Linus Torvalds | 4 | * Copyright (C) 1995 Linus Torvalds |
5 | * | 5 | * |
6 | * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999 | 6 | * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999 |
7 | * | 7 | * |
8 | * Memory region support | 8 | * Memory region support |
9 | * David Parsons <orc@pell.chi.il.us>, July-August 1999 | 9 | * David Parsons <orc@pell.chi.il.us>, July-August 1999 |
10 | * | 10 | * |
11 | * Added E820 sanitization routine (removes overlapping memory regions); | 11 | * Added E820 sanitization routine (removes overlapping memory regions); |
12 | * Brian Moyle <bmoyle@mvista.com>, February 2001 | 12 | * Brian Moyle <bmoyle@mvista.com>, February 2001 |
13 | * | 13 | * |
14 | * Moved CPU detection code to cpu/${cpu}.c | 14 | * Moved CPU detection code to cpu/${cpu}.c |
15 | * Patrick Mochel <mochel@osdl.org>, March 2002 | 15 | * Patrick Mochel <mochel@osdl.org>, March 2002 |
16 | * | 16 | * |
17 | * Provisions for empty E820 memory regions (reported by certain BIOSes). | 17 | * Provisions for empty E820 memory regions (reported by certain BIOSes). |
18 | * Alex Achenbach <xela@slit.de>, December 2002. | 18 | * Alex Achenbach <xela@slit.de>, December 2002. |
19 | * | 19 | * |
20 | */ | 20 | */ |
21 | 21 | ||
22 | /* | 22 | /* |
23 | * This file handles the architecture-dependent parts of initialization | 23 | * This file handles the architecture-dependent parts of initialization |
24 | */ | 24 | */ |
25 | 25 | ||
26 | #include <linux/config.h> | 26 | #include <linux/config.h> |
27 | #include <linux/sched.h> | 27 | #include <linux/sched.h> |
28 | #include <linux/mm.h> | 28 | #include <linux/mm.h> |
29 | #include <linux/mmzone.h> | 29 | #include <linux/mmzone.h> |
30 | #include <linux/tty.h> | 30 | #include <linux/tty.h> |
31 | #include <linux/ioport.h> | 31 | #include <linux/ioport.h> |
32 | #include <linux/acpi.h> | 32 | #include <linux/acpi.h> |
33 | #include <linux/apm_bios.h> | 33 | #include <linux/apm_bios.h> |
34 | #include <linux/initrd.h> | 34 | #include <linux/initrd.h> |
35 | #include <linux/bootmem.h> | 35 | #include <linux/bootmem.h> |
36 | #include <linux/seq_file.h> | 36 | #include <linux/seq_file.h> |
37 | #include <linux/console.h> | 37 | #include <linux/console.h> |
38 | #include <linux/mca.h> | 38 | #include <linux/mca.h> |
39 | #include <linux/root_dev.h> | 39 | #include <linux/root_dev.h> |
40 | #include <linux/highmem.h> | 40 | #include <linux/highmem.h> |
41 | #include <linux/module.h> | 41 | #include <linux/module.h> |
42 | #include <linux/efi.h> | 42 | #include <linux/efi.h> |
43 | #include <linux/init.h> | 43 | #include <linux/init.h> |
44 | #include <linux/edd.h> | 44 | #include <linux/edd.h> |
45 | #include <linux/nodemask.h> | 45 | #include <linux/nodemask.h> |
46 | #include <linux/kexec.h> | 46 | #include <linux/kexec.h> |
47 | #include <linux/crash_dump.h> | 47 | #include <linux/crash_dump.h> |
48 | 48 | ||
49 | #include <video/edid.h> | 49 | #include <video/edid.h> |
50 | 50 | ||
51 | #include <asm/apic.h> | 51 | #include <asm/apic.h> |
52 | #include <asm/e820.h> | 52 | #include <asm/e820.h> |
53 | #include <asm/mpspec.h> | 53 | #include <asm/mpspec.h> |
54 | #include <asm/setup.h> | 54 | #include <asm/setup.h> |
55 | #include <asm/arch_hooks.h> | 55 | #include <asm/arch_hooks.h> |
56 | #include <asm/sections.h> | 56 | #include <asm/sections.h> |
57 | #include <asm/io_apic.h> | 57 | #include <asm/io_apic.h> |
58 | #include <asm/ist.h> | 58 | #include <asm/ist.h> |
59 | #include <asm/io.h> | 59 | #include <asm/io.h> |
60 | #include "setup_arch_pre.h" | 60 | #include "setup_arch_pre.h" |
61 | #include <bios_ebda.h> | 61 | #include <bios_ebda.h> |
62 | 62 | ||
63 | /* Forward Declaration. */ | 63 | /* Forward Declaration. */ |
64 | void __init find_max_pfn(void); | 64 | void __init find_max_pfn(void); |
65 | 65 | ||
66 | /* This value is set up by the early boot code to point to the value | 66 | /* This value is set up by the early boot code to point to the value |
67 | immediately after the boot time page tables. It contains a *physical* | 67 | immediately after the boot time page tables. It contains a *physical* |
68 | address, and must not be in the .bss segment! */ | 68 | address, and must not be in the .bss segment! */ |
69 | unsigned long init_pg_tables_end __initdata = ~0UL; | 69 | unsigned long init_pg_tables_end __initdata = ~0UL; |
70 | 70 | ||
71 | int disable_pse __devinitdata = 0; | 71 | int disable_pse __devinitdata = 0; |
72 | 72 | ||
73 | /* | 73 | /* |
74 | * Machine setup.. | 74 | * Machine setup.. |
75 | */ | 75 | */ |
76 | 76 | ||
77 | #ifdef CONFIG_EFI | 77 | #ifdef CONFIG_EFI |
78 | int efi_enabled = 0; | 78 | int efi_enabled = 0; |
79 | EXPORT_SYMBOL(efi_enabled); | 79 | EXPORT_SYMBOL(efi_enabled); |
80 | #endif | 80 | #endif |
81 | 81 | ||
82 | /* cpu data as detected by the assembly code in head.S */ | 82 | /* cpu data as detected by the assembly code in head.S */ |
83 | struct cpuinfo_x86 new_cpu_data __initdata = { 0, 0, 0, 0, -1, 1, 0, 0, -1 }; | 83 | struct cpuinfo_x86 new_cpu_data __initdata = { 0, 0, 0, 0, -1, 1, 0, 0, -1 }; |
84 | /* common cpu data for all cpus */ | 84 | /* common cpu data for all cpus */ |
85 | struct cpuinfo_x86 boot_cpu_data = { 0, 0, 0, 0, -1, 1, 0, 0, -1 }; | 85 | struct cpuinfo_x86 boot_cpu_data = { 0, 0, 0, 0, -1, 1, 0, 0, -1 }; |
86 | EXPORT_SYMBOL(boot_cpu_data); | 86 | EXPORT_SYMBOL(boot_cpu_data); |
87 | 87 | ||
88 | unsigned long mmu_cr4_features; | 88 | unsigned long mmu_cr4_features; |
89 | 89 | ||
90 | #ifdef CONFIG_ACPI_INTERPRETER | 90 | #ifdef CONFIG_ACPI_INTERPRETER |
91 | int acpi_disabled = 0; | 91 | int acpi_disabled = 0; |
92 | #else | 92 | #else |
93 | int acpi_disabled = 1; | 93 | int acpi_disabled = 1; |
94 | #endif | 94 | #endif |
95 | EXPORT_SYMBOL(acpi_disabled); | 95 | EXPORT_SYMBOL(acpi_disabled); |
96 | 96 | ||
97 | #ifdef CONFIG_ACPI_BOOT | 97 | #ifdef CONFIG_ACPI_BOOT |
98 | int __initdata acpi_force = 0; | 98 | int __initdata acpi_force = 0; |
99 | extern acpi_interrupt_flags acpi_sci_flags; | 99 | extern acpi_interrupt_flags acpi_sci_flags; |
100 | #endif | 100 | #endif |
101 | 101 | ||
102 | /* for MCA, but anyone else can use it if they want */ | 102 | /* for MCA, but anyone else can use it if they want */ |
103 | unsigned int machine_id; | 103 | unsigned int machine_id; |
104 | #ifdef CONFIG_MCA | 104 | #ifdef CONFIG_MCA |
105 | EXPORT_SYMBOL(machine_id); | 105 | EXPORT_SYMBOL(machine_id); |
106 | #endif | 106 | #endif |
107 | unsigned int machine_submodel_id; | 107 | unsigned int machine_submodel_id; |
108 | unsigned int BIOS_revision; | 108 | unsigned int BIOS_revision; |
109 | unsigned int mca_pentium_flag; | 109 | unsigned int mca_pentium_flag; |
110 | 110 | ||
111 | /* For PCI or other memory-mapped resources */ | 111 | /* For PCI or other memory-mapped resources */ |
112 | unsigned long pci_mem_start = 0x10000000; | 112 | unsigned long pci_mem_start = 0x10000000; |
113 | #ifdef CONFIG_PCI | 113 | #ifdef CONFIG_PCI |
114 | EXPORT_SYMBOL(pci_mem_start); | 114 | EXPORT_SYMBOL(pci_mem_start); |
115 | #endif | 115 | #endif |
116 | 116 | ||
117 | /* Boot loader ID as an integer, for the benefit of proc_dointvec */ | 117 | /* Boot loader ID as an integer, for the benefit of proc_dointvec */ |
118 | int bootloader_type; | 118 | int bootloader_type; |
119 | 119 | ||
120 | /* user-defined highmem size */ | 120 | /* user-defined highmem size */ |
121 | static unsigned int highmem_pages = -1; | 121 | static unsigned int highmem_pages = -1; |
122 | 122 | ||
123 | /* | 123 | /* |
124 | * Setup options | 124 | * Setup options |
125 | */ | 125 | */ |
126 | struct drive_info_struct { char dummy[32]; } drive_info; | 126 | struct drive_info_struct { char dummy[32]; } drive_info; |
127 | #if defined(CONFIG_BLK_DEV_IDE) || defined(CONFIG_BLK_DEV_HD) || \ | 127 | #if defined(CONFIG_BLK_DEV_IDE) || defined(CONFIG_BLK_DEV_HD) || \ |
128 | defined(CONFIG_BLK_DEV_IDE_MODULE) || defined(CONFIG_BLK_DEV_HD_MODULE) | 128 | defined(CONFIG_BLK_DEV_IDE_MODULE) || defined(CONFIG_BLK_DEV_HD_MODULE) |
129 | EXPORT_SYMBOL(drive_info); | 129 | EXPORT_SYMBOL(drive_info); |
130 | #endif | 130 | #endif |
131 | struct screen_info screen_info; | 131 | struct screen_info screen_info; |
132 | #ifdef CONFIG_VT | 132 | #ifdef CONFIG_VT |
133 | EXPORT_SYMBOL(screen_info); | 133 | EXPORT_SYMBOL(screen_info); |
134 | #endif | 134 | #endif |
135 | struct apm_info apm_info; | 135 | struct apm_info apm_info; |
136 | EXPORT_SYMBOL(apm_info); | 136 | EXPORT_SYMBOL(apm_info); |
137 | struct sys_desc_table_struct { | 137 | struct sys_desc_table_struct { |
138 | unsigned short length; | 138 | unsigned short length; |
139 | unsigned char table[0]; | 139 | unsigned char table[0]; |
140 | }; | 140 | }; |
141 | struct edid_info edid_info; | 141 | struct edid_info edid_info; |
142 | struct ist_info ist_info; | 142 | struct ist_info ist_info; |
143 | #if defined(CONFIG_X86_SPEEDSTEP_SMI) || \ | 143 | #if defined(CONFIG_X86_SPEEDSTEP_SMI) || \ |
144 | defined(CONFIG_X86_SPEEDSTEP_SMI_MODULE) | 144 | defined(CONFIG_X86_SPEEDSTEP_SMI_MODULE) |
145 | EXPORT_SYMBOL(ist_info); | 145 | EXPORT_SYMBOL(ist_info); |
146 | #endif | 146 | #endif |
147 | struct e820map e820; | 147 | struct e820map e820; |
148 | 148 | ||
149 | extern void early_cpu_init(void); | 149 | extern void early_cpu_init(void); |
150 | extern void dmi_scan_machine(void); | 150 | extern void dmi_scan_machine(void); |
151 | extern void generic_apic_probe(char *); | 151 | extern void generic_apic_probe(char *); |
152 | extern int root_mountflags; | 152 | extern int root_mountflags; |
153 | 153 | ||
154 | unsigned long saved_videomode; | 154 | unsigned long saved_videomode; |
155 | 155 | ||
156 | #define RAMDISK_IMAGE_START_MASK 0x07FF | 156 | #define RAMDISK_IMAGE_START_MASK 0x07FF |
157 | #define RAMDISK_PROMPT_FLAG 0x8000 | 157 | #define RAMDISK_PROMPT_FLAG 0x8000 |
158 | #define RAMDISK_LOAD_FLAG 0x4000 | 158 | #define RAMDISK_LOAD_FLAG 0x4000 |
159 | 159 | ||
160 | static char command_line[COMMAND_LINE_SIZE]; | 160 | static char command_line[COMMAND_LINE_SIZE]; |
161 | 161 | ||
162 | unsigned char __initdata boot_params[PARAM_SIZE]; | 162 | unsigned char __initdata boot_params[PARAM_SIZE]; |
163 | 163 | ||
164 | static struct resource data_resource = { | 164 | static struct resource data_resource = { |
165 | .name = "Kernel data", | 165 | .name = "Kernel data", |
166 | .start = 0, | 166 | .start = 0, |
167 | .end = 0, | 167 | .end = 0, |
168 | .flags = IORESOURCE_BUSY | IORESOURCE_MEM | 168 | .flags = IORESOURCE_BUSY | IORESOURCE_MEM |
169 | }; | 169 | }; |
170 | 170 | ||
171 | static struct resource code_resource = { | 171 | static struct resource code_resource = { |
172 | .name = "Kernel code", | 172 | .name = "Kernel code", |
173 | .start = 0, | 173 | .start = 0, |
174 | .end = 0, | 174 | .end = 0, |
175 | .flags = IORESOURCE_BUSY | IORESOURCE_MEM | 175 | .flags = IORESOURCE_BUSY | IORESOURCE_MEM |
176 | }; | 176 | }; |
177 | 177 | ||
178 | static struct resource system_rom_resource = { | 178 | static struct resource system_rom_resource = { |
179 | .name = "System ROM", | 179 | .name = "System ROM", |
180 | .start = 0xf0000, | 180 | .start = 0xf0000, |
181 | .end = 0xfffff, | 181 | .end = 0xfffff, |
182 | .flags = IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM | 182 | .flags = IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM |
183 | }; | 183 | }; |
184 | 184 | ||
185 | static struct resource extension_rom_resource = { | 185 | static struct resource extension_rom_resource = { |
186 | .name = "Extension ROM", | 186 | .name = "Extension ROM", |
187 | .start = 0xe0000, | 187 | .start = 0xe0000, |
188 | .end = 0xeffff, | 188 | .end = 0xeffff, |
189 | .flags = IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM | 189 | .flags = IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM |
190 | }; | 190 | }; |
191 | 191 | ||
192 | static struct resource adapter_rom_resources[] = { { | 192 | static struct resource adapter_rom_resources[] = { { |
193 | .name = "Adapter ROM", | 193 | .name = "Adapter ROM", |
194 | .start = 0xc8000, | 194 | .start = 0xc8000, |
195 | .end = 0, | 195 | .end = 0, |
196 | .flags = IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM | 196 | .flags = IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM |
197 | }, { | 197 | }, { |
198 | .name = "Adapter ROM", | 198 | .name = "Adapter ROM", |
199 | .start = 0, | 199 | .start = 0, |
200 | .end = 0, | 200 | .end = 0, |
201 | .flags = IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM | 201 | .flags = IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM |
202 | }, { | 202 | }, { |
203 | .name = "Adapter ROM", | 203 | .name = "Adapter ROM", |
204 | .start = 0, | 204 | .start = 0, |
205 | .end = 0, | 205 | .end = 0, |
206 | .flags = IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM | 206 | .flags = IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM |
207 | }, { | 207 | }, { |
208 | .name = "Adapter ROM", | 208 | .name = "Adapter ROM", |
209 | .start = 0, | 209 | .start = 0, |
210 | .end = 0, | 210 | .end = 0, |
211 | .flags = IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM | 211 | .flags = IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM |
212 | }, { | 212 | }, { |
213 | .name = "Adapter ROM", | 213 | .name = "Adapter ROM", |
214 | .start = 0, | 214 | .start = 0, |
215 | .end = 0, | 215 | .end = 0, |
216 | .flags = IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM | 216 | .flags = IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM |
217 | }, { | 217 | }, { |
218 | .name = "Adapter ROM", | 218 | .name = "Adapter ROM", |
219 | .start = 0, | 219 | .start = 0, |
220 | .end = 0, | 220 | .end = 0, |
221 | .flags = IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM | 221 | .flags = IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM |
222 | } }; | 222 | } }; |
223 | 223 | ||
224 | #define ADAPTER_ROM_RESOURCES \ | 224 | #define ADAPTER_ROM_RESOURCES \ |
225 | (sizeof adapter_rom_resources / sizeof adapter_rom_resources[0]) | 225 | (sizeof adapter_rom_resources / sizeof adapter_rom_resources[0]) |
226 | 226 | ||
227 | static struct resource video_rom_resource = { | 227 | static struct resource video_rom_resource = { |
228 | .name = "Video ROM", | 228 | .name = "Video ROM", |
229 | .start = 0xc0000, | 229 | .start = 0xc0000, |
230 | .end = 0xc7fff, | 230 | .end = 0xc7fff, |
231 | .flags = IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM | 231 | .flags = IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM |
232 | }; | 232 | }; |
233 | 233 | ||
234 | static struct resource video_ram_resource = { | 234 | static struct resource video_ram_resource = { |
235 | .name = "Video RAM area", | 235 | .name = "Video RAM area", |
236 | .start = 0xa0000, | 236 | .start = 0xa0000, |
237 | .end = 0xbffff, | 237 | .end = 0xbffff, |
238 | .flags = IORESOURCE_BUSY | IORESOURCE_MEM | 238 | .flags = IORESOURCE_BUSY | IORESOURCE_MEM |
239 | }; | 239 | }; |
240 | 240 | ||
241 | static struct resource standard_io_resources[] = { { | 241 | static struct resource standard_io_resources[] = { { |
242 | .name = "dma1", | 242 | .name = "dma1", |
243 | .start = 0x0000, | 243 | .start = 0x0000, |
244 | .end = 0x001f, | 244 | .end = 0x001f, |
245 | .flags = IORESOURCE_BUSY | IORESOURCE_IO | 245 | .flags = IORESOURCE_BUSY | IORESOURCE_IO |
246 | }, { | 246 | }, { |
247 | .name = "pic1", | 247 | .name = "pic1", |
248 | .start = 0x0020, | 248 | .start = 0x0020, |
249 | .end = 0x0021, | 249 | .end = 0x0021, |
250 | .flags = IORESOURCE_BUSY | IORESOURCE_IO | 250 | .flags = IORESOURCE_BUSY | IORESOURCE_IO |
251 | }, { | 251 | }, { |
252 | .name = "timer0", | 252 | .name = "timer0", |
253 | .start = 0x0040, | 253 | .start = 0x0040, |
254 | .end = 0x0043, | 254 | .end = 0x0043, |
255 | .flags = IORESOURCE_BUSY | IORESOURCE_IO | 255 | .flags = IORESOURCE_BUSY | IORESOURCE_IO |
256 | }, { | 256 | }, { |
257 | .name = "timer1", | 257 | .name = "timer1", |
258 | .start = 0x0050, | 258 | .start = 0x0050, |
259 | .end = 0x0053, | 259 | .end = 0x0053, |
260 | .flags = IORESOURCE_BUSY | IORESOURCE_IO | 260 | .flags = IORESOURCE_BUSY | IORESOURCE_IO |
261 | }, { | 261 | }, { |
262 | .name = "keyboard", | 262 | .name = "keyboard", |
263 | .start = 0x0060, | 263 | .start = 0x0060, |
264 | .end = 0x006f, | 264 | .end = 0x006f, |
265 | .flags = IORESOURCE_BUSY | IORESOURCE_IO | 265 | .flags = IORESOURCE_BUSY | IORESOURCE_IO |
266 | }, { | 266 | }, { |
267 | .name = "dma page reg", | 267 | .name = "dma page reg", |
268 | .start = 0x0080, | 268 | .start = 0x0080, |
269 | .end = 0x008f, | 269 | .end = 0x008f, |
270 | .flags = IORESOURCE_BUSY | IORESOURCE_IO | 270 | .flags = IORESOURCE_BUSY | IORESOURCE_IO |
271 | }, { | 271 | }, { |
272 | .name = "pic2", | 272 | .name = "pic2", |
273 | .start = 0x00a0, | 273 | .start = 0x00a0, |
274 | .end = 0x00a1, | 274 | .end = 0x00a1, |
275 | .flags = IORESOURCE_BUSY | IORESOURCE_IO | 275 | .flags = IORESOURCE_BUSY | IORESOURCE_IO |
276 | }, { | 276 | }, { |
277 | .name = "dma2", | 277 | .name = "dma2", |
278 | .start = 0x00c0, | 278 | .start = 0x00c0, |
279 | .end = 0x00df, | 279 | .end = 0x00df, |
280 | .flags = IORESOURCE_BUSY | IORESOURCE_IO | 280 | .flags = IORESOURCE_BUSY | IORESOURCE_IO |
281 | }, { | 281 | }, { |
282 | .name = "fpu", | 282 | .name = "fpu", |
283 | .start = 0x00f0, | 283 | .start = 0x00f0, |
284 | .end = 0x00ff, | 284 | .end = 0x00ff, |
285 | .flags = IORESOURCE_BUSY | IORESOURCE_IO | 285 | .flags = IORESOURCE_BUSY | IORESOURCE_IO |
286 | } }; | 286 | } }; |
287 | 287 | ||
288 | #define STANDARD_IO_RESOURCES \ | 288 | #define STANDARD_IO_RESOURCES \ |
289 | (sizeof standard_io_resources / sizeof standard_io_resources[0]) | 289 | (sizeof standard_io_resources / sizeof standard_io_resources[0]) |
290 | 290 | ||
291 | #define romsignature(x) (*(unsigned short *)(x) == 0xaa55) | 291 | #define romsignature(x) (*(unsigned short *)(x) == 0xaa55) |
292 | 292 | ||
293 | static int __init romchecksum(unsigned char *rom, unsigned long length) | 293 | static int __init romchecksum(unsigned char *rom, unsigned long length) |
294 | { | 294 | { |
295 | unsigned char *p, sum = 0; | 295 | unsigned char *p, sum = 0; |
296 | 296 | ||
297 | for (p = rom; p < rom + length; p++) | 297 | for (p = rom; p < rom + length; p++) |
298 | sum += *p; | 298 | sum += *p; |
299 | return sum == 0; | 299 | return sum == 0; |
300 | } | 300 | } |
301 | 301 | ||
302 | static void __init probe_roms(void) | 302 | static void __init probe_roms(void) |
303 | { | 303 | { |
304 | unsigned long start, length, upper; | 304 | unsigned long start, length, upper; |
305 | unsigned char *rom; | 305 | unsigned char *rom; |
306 | int i; | 306 | int i; |
307 | 307 | ||
308 | /* video rom */ | 308 | /* video rom */ |
309 | upper = adapter_rom_resources[0].start; | 309 | upper = adapter_rom_resources[0].start; |
310 | for (start = video_rom_resource.start; start < upper; start += 2048) { | 310 | for (start = video_rom_resource.start; start < upper; start += 2048) { |
311 | rom = isa_bus_to_virt(start); | 311 | rom = isa_bus_to_virt(start); |
312 | if (!romsignature(rom)) | 312 | if (!romsignature(rom)) |
313 | continue; | 313 | continue; |
314 | 314 | ||
315 | video_rom_resource.start = start; | 315 | video_rom_resource.start = start; |
316 | 316 | ||
317 | /* 0 < length <= 0x7f * 512, historically */ | 317 | /* 0 < length <= 0x7f * 512, historically */ |
318 | length = rom[2] * 512; | 318 | length = rom[2] * 512; |
319 | 319 | ||
320 | /* if checksum okay, trust length byte */ | 320 | /* if checksum okay, trust length byte */ |
321 | if (length && romchecksum(rom, length)) | 321 | if (length && romchecksum(rom, length)) |
322 | video_rom_resource.end = start + length - 1; | 322 | video_rom_resource.end = start + length - 1; |
323 | 323 | ||
324 | request_resource(&iomem_resource, &video_rom_resource); | 324 | request_resource(&iomem_resource, &video_rom_resource); |
325 | break; | 325 | break; |
326 | } | 326 | } |
327 | 327 | ||
328 | start = (video_rom_resource.end + 1 + 2047) & ~2047UL; | 328 | start = (video_rom_resource.end + 1 + 2047) & ~2047UL; |
329 | if (start < upper) | 329 | if (start < upper) |
330 | start = upper; | 330 | start = upper; |
331 | 331 | ||
332 | /* system rom */ | 332 | /* system rom */ |
333 | request_resource(&iomem_resource, &system_rom_resource); | 333 | request_resource(&iomem_resource, &system_rom_resource); |
334 | upper = system_rom_resource.start; | 334 | upper = system_rom_resource.start; |
335 | 335 | ||
336 | /* check for extension rom (ignore length byte!) */ | 336 | /* check for extension rom (ignore length byte!) */ |
337 | rom = isa_bus_to_virt(extension_rom_resource.start); | 337 | rom = isa_bus_to_virt(extension_rom_resource.start); |
338 | if (romsignature(rom)) { | 338 | if (romsignature(rom)) { |
339 | length = extension_rom_resource.end - extension_rom_resource.start + 1; | 339 | length = extension_rom_resource.end - extension_rom_resource.start + 1; |
340 | if (romchecksum(rom, length)) { | 340 | if (romchecksum(rom, length)) { |
341 | request_resource(&iomem_resource, &extension_rom_resource); | 341 | request_resource(&iomem_resource, &extension_rom_resource); |
342 | upper = extension_rom_resource.start; | 342 | upper = extension_rom_resource.start; |
343 | } | 343 | } |
344 | } | 344 | } |
345 | 345 | ||
346 | /* check for adapter roms on 2k boundaries */ | 346 | /* check for adapter roms on 2k boundaries */ |
347 | for (i = 0; i < ADAPTER_ROM_RESOURCES && start < upper; start += 2048) { | 347 | for (i = 0; i < ADAPTER_ROM_RESOURCES && start < upper; start += 2048) { |
348 | rom = isa_bus_to_virt(start); | 348 | rom = isa_bus_to_virt(start); |
349 | if (!romsignature(rom)) | 349 | if (!romsignature(rom)) |
350 | continue; | 350 | continue; |
351 | 351 | ||
352 | /* 0 < length <= 0x7f * 512, historically */ | 352 | /* 0 < length <= 0x7f * 512, historically */ |
353 | length = rom[2] * 512; | 353 | length = rom[2] * 512; |
354 | 354 | ||
355 | /* but accept any length that fits if checksum okay */ | 355 | /* but accept any length that fits if checksum okay */ |
356 | if (!length || start + length > upper || !romchecksum(rom, length)) | 356 | if (!length || start + length > upper || !romchecksum(rom, length)) |
357 | continue; | 357 | continue; |
358 | 358 | ||
359 | adapter_rom_resources[i].start = start; | 359 | adapter_rom_resources[i].start = start; |
360 | adapter_rom_resources[i].end = start + length - 1; | 360 | adapter_rom_resources[i].end = start + length - 1; |
361 | request_resource(&iomem_resource, &adapter_rom_resources[i]); | 361 | request_resource(&iomem_resource, &adapter_rom_resources[i]); |
362 | 362 | ||
363 | start = adapter_rom_resources[i++].end & ~2047UL; | 363 | start = adapter_rom_resources[i++].end & ~2047UL; |
364 | } | 364 | } |
365 | } | 365 | } |
366 | 366 | ||
367 | static void __init limit_regions(unsigned long long size) | 367 | static void __init limit_regions(unsigned long long size) |
368 | { | 368 | { |
369 | unsigned long long current_addr = 0; | 369 | unsigned long long current_addr = 0; |
370 | int i; | 370 | int i; |
371 | 371 | ||
372 | if (efi_enabled) { | 372 | if (efi_enabled) { |
373 | efi_memory_desc_t *md; | 373 | efi_memory_desc_t *md; |
374 | void *p; | 374 | void *p; |
375 | 375 | ||
376 | for (p = memmap.map, i = 0; p < memmap.map_end; | 376 | for (p = memmap.map, i = 0; p < memmap.map_end; |
377 | p += memmap.desc_size, i++) { | 377 | p += memmap.desc_size, i++) { |
378 | md = p; | 378 | md = p; |
379 | current_addr = md->phys_addr + (md->num_pages << 12); | 379 | current_addr = md->phys_addr + (md->num_pages << 12); |
380 | if (md->type == EFI_CONVENTIONAL_MEMORY) { | 380 | if (md->type == EFI_CONVENTIONAL_MEMORY) { |
381 | if (current_addr >= size) { | 381 | if (current_addr >= size) { |
382 | md->num_pages -= | 382 | md->num_pages -= |
383 | (((current_addr-size) + PAGE_SIZE-1) >> PAGE_SHIFT); | 383 | (((current_addr-size) + PAGE_SIZE-1) >> PAGE_SHIFT); |
384 | memmap.nr_map = i + 1; | 384 | memmap.nr_map = i + 1; |
385 | return; | 385 | return; |
386 | } | 386 | } |
387 | } | 387 | } |
388 | } | 388 | } |
389 | } | 389 | } |
390 | for (i = 0; i < e820.nr_map; i++) { | 390 | for (i = 0; i < e820.nr_map; i++) { |
391 | if (e820.map[i].type == E820_RAM) { | 391 | if (e820.map[i].type == E820_RAM) { |
392 | current_addr = e820.map[i].addr + e820.map[i].size; | 392 | current_addr = e820.map[i].addr + e820.map[i].size; |
393 | if (current_addr >= size) { | 393 | if (current_addr >= size) { |
394 | e820.map[i].size -= current_addr-size; | 394 | e820.map[i].size -= current_addr-size; |
395 | e820.nr_map = i + 1; | 395 | e820.nr_map = i + 1; |
396 | return; | 396 | return; |
397 | } | 397 | } |
398 | } | 398 | } |
399 | } | 399 | } |
400 | } | 400 | } |
401 | 401 | ||
402 | static void __init add_memory_region(unsigned long long start, | 402 | static void __init add_memory_region(unsigned long long start, |
403 | unsigned long long size, int type) | 403 | unsigned long long size, int type) |
404 | { | 404 | { |
405 | int x; | 405 | int x; |
406 | 406 | ||
407 | if (!efi_enabled) { | 407 | if (!efi_enabled) { |
408 | x = e820.nr_map; | 408 | x = e820.nr_map; |
409 | 409 | ||
410 | if (x == E820MAX) { | 410 | if (x == E820MAX) { |
411 | printk(KERN_ERR "Ooops! Too many entries in the memory map!\n"); | 411 | printk(KERN_ERR "Ooops! Too many entries in the memory map!\n"); |
412 | return; | 412 | return; |
413 | } | 413 | } |
414 | 414 | ||
415 | e820.map[x].addr = start; | 415 | e820.map[x].addr = start; |
416 | e820.map[x].size = size; | 416 | e820.map[x].size = size; |
417 | e820.map[x].type = type; | 417 | e820.map[x].type = type; |
418 | e820.nr_map++; | 418 | e820.nr_map++; |
419 | } | 419 | } |
420 | } /* add_memory_region */ | 420 | } /* add_memory_region */ |
421 | 421 | ||
422 | #define E820_DEBUG 1 | 422 | #define E820_DEBUG 1 |
423 | 423 | ||
424 | static void __init print_memory_map(char *who) | 424 | static void __init print_memory_map(char *who) |
425 | { | 425 | { |
426 | int i; | 426 | int i; |
427 | 427 | ||
428 | for (i = 0; i < e820.nr_map; i++) { | 428 | for (i = 0; i < e820.nr_map; i++) { |
429 | printk(" %s: %016Lx - %016Lx ", who, | 429 | printk(" %s: %016Lx - %016Lx ", who, |
430 | e820.map[i].addr, | 430 | e820.map[i].addr, |
431 | e820.map[i].addr + e820.map[i].size); | 431 | e820.map[i].addr + e820.map[i].size); |
432 | switch (e820.map[i].type) { | 432 | switch (e820.map[i].type) { |
433 | case E820_RAM: printk("(usable)\n"); | 433 | case E820_RAM: printk("(usable)\n"); |
434 | break; | 434 | break; |
435 | case E820_RESERVED: | 435 | case E820_RESERVED: |
436 | printk("(reserved)\n"); | 436 | printk("(reserved)\n"); |
437 | break; | 437 | break; |
438 | case E820_ACPI: | 438 | case E820_ACPI: |
439 | printk("(ACPI data)\n"); | 439 | printk("(ACPI data)\n"); |
440 | break; | 440 | break; |
441 | case E820_NVS: | 441 | case E820_NVS: |
442 | printk("(ACPI NVS)\n"); | 442 | printk("(ACPI NVS)\n"); |
443 | break; | 443 | break; |
444 | default: printk("type %lu\n", e820.map[i].type); | 444 | default: printk("type %lu\n", e820.map[i].type); |
445 | break; | 445 | break; |
446 | } | 446 | } |
447 | } | 447 | } |
448 | } | 448 | } |
449 | 449 | ||
450 | /* | 450 | /* |
451 | * Sanitize the BIOS e820 map. | 451 | * Sanitize the BIOS e820 map. |
452 | * | 452 | * |
453 | * Some e820 responses include overlapping entries. The following | 453 | * Some e820 responses include overlapping entries. The following |
454 | * replaces the original e820 map with a new one, removing overlaps. | 454 | * replaces the original e820 map with a new one, removing overlaps. |
455 | * | 455 | * |
456 | */ | 456 | */ |
457 | struct change_member { | 457 | struct change_member { |
458 | struct e820entry *pbios; /* pointer to original bios entry */ | 458 | struct e820entry *pbios; /* pointer to original bios entry */ |
459 | unsigned long long addr; /* address for this change point */ | 459 | unsigned long long addr; /* address for this change point */ |
460 | }; | 460 | }; |
461 | static struct change_member change_point_list[2*E820MAX] __initdata; | 461 | static struct change_member change_point_list[2*E820MAX] __initdata; |
462 | static struct change_member *change_point[2*E820MAX] __initdata; | 462 | static struct change_member *change_point[2*E820MAX] __initdata; |
463 | static struct e820entry *overlap_list[E820MAX] __initdata; | 463 | static struct e820entry *overlap_list[E820MAX] __initdata; |
464 | static struct e820entry new_bios[E820MAX] __initdata; | 464 | static struct e820entry new_bios[E820MAX] __initdata; |
465 | 465 | ||
466 | static int __init sanitize_e820_map(struct e820entry * biosmap, char * pnr_map) | 466 | static int __init sanitize_e820_map(struct e820entry * biosmap, char * pnr_map) |
467 | { | 467 | { |
468 | struct change_member *change_tmp; | 468 | struct change_member *change_tmp; |
469 | unsigned long current_type, last_type; | 469 | unsigned long current_type, last_type; |
470 | unsigned long long last_addr; | 470 | unsigned long long last_addr; |
471 | int chgidx, still_changing; | 471 | int chgidx, still_changing; |
472 | int overlap_entries; | 472 | int overlap_entries; |
473 | int new_bios_entry; | 473 | int new_bios_entry; |
474 | int old_nr, new_nr, chg_nr; | 474 | int old_nr, new_nr, chg_nr; |
475 | int i; | 475 | int i; |
476 | 476 | ||
477 | /* | 477 | /* |
478 | Visually we're performing the following (1,2,3,4 = memory types)... | 478 | Visually we're performing the following (1,2,3,4 = memory types)... |
479 | 479 | ||
480 | Sample memory map (w/overlaps): | 480 | Sample memory map (w/overlaps): |
481 | ____22__________________ | 481 | ____22__________________ |
482 | ______________________4_ | 482 | ______________________4_ |
483 | ____1111________________ | 483 | ____1111________________ |
484 | _44_____________________ | 484 | _44_____________________ |
485 | 11111111________________ | 485 | 11111111________________ |
486 | ____________________33__ | 486 | ____________________33__ |
487 | ___________44___________ | 487 | ___________44___________ |
488 | __________33333_________ | 488 | __________33333_________ |
489 | ______________22________ | 489 | ______________22________ |
490 | ___________________2222_ | 490 | ___________________2222_ |
491 | _________111111111______ | 491 | _________111111111______ |
492 | _____________________11_ | 492 | _____________________11_ |
493 | _________________4______ | 493 | _________________4______ |
494 | 494 | ||
495 | Sanitized equivalent (no overlap): | 495 | Sanitized equivalent (no overlap): |
496 | 1_______________________ | 496 | 1_______________________ |
497 | _44_____________________ | 497 | _44_____________________ |
498 | ___1____________________ | 498 | ___1____________________ |
499 | ____22__________________ | 499 | ____22__________________ |
500 | ______11________________ | 500 | ______11________________ |
501 | _________1______________ | 501 | _________1______________ |
502 | __________3_____________ | 502 | __________3_____________ |
503 | ___________44___________ | 503 | ___________44___________ |
504 | _____________33_________ | 504 | _____________33_________ |
505 | _______________2________ | 505 | _______________2________ |
506 | ________________1_______ | 506 | ________________1_______ |
507 | _________________4______ | 507 | _________________4______ |
508 | ___________________2____ | 508 | ___________________2____ |
509 | ____________________33__ | 509 | ____________________33__ |
510 | ______________________4_ | 510 | ______________________4_ |
511 | */ | 511 | */ |
512 | 512 | ||
513 | /* if there's only one memory region, don't bother */ | 513 | /* if there's only one memory region, don't bother */ |
514 | if (*pnr_map < 2) | 514 | if (*pnr_map < 2) |
515 | return -1; | 515 | return -1; |
516 | 516 | ||
517 | old_nr = *pnr_map; | 517 | old_nr = *pnr_map; |
518 | 518 | ||
519 | /* bail out if we find any unreasonable addresses in bios map */ | 519 | /* bail out if we find any unreasonable addresses in bios map */ |
520 | for (i=0; i<old_nr; i++) | 520 | for (i=0; i<old_nr; i++) |
521 | if (biosmap[i].addr + biosmap[i].size < biosmap[i].addr) | 521 | if (biosmap[i].addr + biosmap[i].size < biosmap[i].addr) |
522 | return -1; | 522 | return -1; |
523 | 523 | ||
524 | /* create pointers for initial change-point information (for sorting) */ | 524 | /* create pointers for initial change-point information (for sorting) */ |
525 | for (i=0; i < 2*old_nr; i++) | 525 | for (i=0; i < 2*old_nr; i++) |
526 | change_point[i] = &change_point_list[i]; | 526 | change_point[i] = &change_point_list[i]; |
527 | 527 | ||
528 | /* record all known change-points (starting and ending addresses), | 528 | /* record all known change-points (starting and ending addresses), |
529 | omitting those that are for empty memory regions */ | 529 | omitting those that are for empty memory regions */ |
530 | chgidx = 0; | 530 | chgidx = 0; |
531 | for (i=0; i < old_nr; i++) { | 531 | for (i=0; i < old_nr; i++) { |
532 | if (biosmap[i].size != 0) { | 532 | if (biosmap[i].size != 0) { |
533 | change_point[chgidx]->addr = biosmap[i].addr; | 533 | change_point[chgidx]->addr = biosmap[i].addr; |
534 | change_point[chgidx++]->pbios = &biosmap[i]; | 534 | change_point[chgidx++]->pbios = &biosmap[i]; |
535 | change_point[chgidx]->addr = biosmap[i].addr + biosmap[i].size; | 535 | change_point[chgidx]->addr = biosmap[i].addr + biosmap[i].size; |
536 | change_point[chgidx++]->pbios = &biosmap[i]; | 536 | change_point[chgidx++]->pbios = &biosmap[i]; |
537 | } | 537 | } |
538 | } | 538 | } |
539 | chg_nr = chgidx; /* true number of change-points */ | 539 | chg_nr = chgidx; /* true number of change-points */ |
540 | 540 | ||
541 | /* sort change-point list by memory addresses (low -> high) */ | 541 | /* sort change-point list by memory addresses (low -> high) */ |
542 | still_changing = 1; | 542 | still_changing = 1; |
543 | while (still_changing) { | 543 | while (still_changing) { |
544 | still_changing = 0; | 544 | still_changing = 0; |
545 | for (i=1; i < chg_nr; i++) { | 545 | for (i=1; i < chg_nr; i++) { |
546 | /* if <current_addr> > <last_addr>, swap */ | 546 | /* if <current_addr> > <last_addr>, swap */ |
547 | /* or, if current=<start_addr> & last=<end_addr>, swap */ | 547 | /* or, if current=<start_addr> & last=<end_addr>, swap */ |
548 | if ((change_point[i]->addr < change_point[i-1]->addr) || | 548 | if ((change_point[i]->addr < change_point[i-1]->addr) || |
549 | ((change_point[i]->addr == change_point[i-1]->addr) && | 549 | ((change_point[i]->addr == change_point[i-1]->addr) && |
550 | (change_point[i]->addr == change_point[i]->pbios->addr) && | 550 | (change_point[i]->addr == change_point[i]->pbios->addr) && |
551 | (change_point[i-1]->addr != change_point[i-1]->pbios->addr)) | 551 | (change_point[i-1]->addr != change_point[i-1]->pbios->addr)) |
552 | ) | 552 | ) |
553 | { | 553 | { |
554 | change_tmp = change_point[i]; | 554 | change_tmp = change_point[i]; |
555 | change_point[i] = change_point[i-1]; | 555 | change_point[i] = change_point[i-1]; |
556 | change_point[i-1] = change_tmp; | 556 | change_point[i-1] = change_tmp; |
557 | still_changing=1; | 557 | still_changing=1; |
558 | } | 558 | } |
559 | } | 559 | } |
560 | } | 560 | } |
561 | 561 | ||
562 | /* create a new bios memory map, removing overlaps */ | 562 | /* create a new bios memory map, removing overlaps */ |
563 | overlap_entries=0; /* number of entries in the overlap table */ | 563 | overlap_entries=0; /* number of entries in the overlap table */ |
564 | new_bios_entry=0; /* index for creating new bios map entries */ | 564 | new_bios_entry=0; /* index for creating new bios map entries */ |
565 | last_type = 0; /* start with undefined memory type */ | 565 | last_type = 0; /* start with undefined memory type */ |
566 | last_addr = 0; /* start with 0 as last starting address */ | 566 | last_addr = 0; /* start with 0 as last starting address */ |
567 | /* loop through change-points, determining affect on the new bios map */ | 567 | /* loop through change-points, determining affect on the new bios map */ |
568 | for (chgidx=0; chgidx < chg_nr; chgidx++) | 568 | for (chgidx=0; chgidx < chg_nr; chgidx++) |
569 | { | 569 | { |
570 | /* keep track of all overlapping bios entries */ | 570 | /* keep track of all overlapping bios entries */ |
571 | if (change_point[chgidx]->addr == change_point[chgidx]->pbios->addr) | 571 | if (change_point[chgidx]->addr == change_point[chgidx]->pbios->addr) |
572 | { | 572 | { |
573 | /* add map entry to overlap list (> 1 entry implies an overlap) */ | 573 | /* add map entry to overlap list (> 1 entry implies an overlap) */ |
574 | overlap_list[overlap_entries++]=change_point[chgidx]->pbios; | 574 | overlap_list[overlap_entries++]=change_point[chgidx]->pbios; |
575 | } | 575 | } |
576 | else | 576 | else |
577 | { | 577 | { |
578 | /* remove entry from list (order independent, so swap with last) */ | 578 | /* remove entry from list (order independent, so swap with last) */ |
579 | for (i=0; i<overlap_entries; i++) | 579 | for (i=0; i<overlap_entries; i++) |
580 | { | 580 | { |
581 | if (overlap_list[i] == change_point[chgidx]->pbios) | 581 | if (overlap_list[i] == change_point[chgidx]->pbios) |
582 | overlap_list[i] = overlap_list[overlap_entries-1]; | 582 | overlap_list[i] = overlap_list[overlap_entries-1]; |
583 | } | 583 | } |
584 | overlap_entries--; | 584 | overlap_entries--; |
585 | } | 585 | } |
586 | /* if there are overlapping entries, decide which "type" to use */ | 586 | /* if there are overlapping entries, decide which "type" to use */ |
587 | /* (larger value takes precedence -- 1=usable, 2,3,4,4+=unusable) */ | 587 | /* (larger value takes precedence -- 1=usable, 2,3,4,4+=unusable) */ |
588 | current_type = 0; | 588 | current_type = 0; |
589 | for (i=0; i<overlap_entries; i++) | 589 | for (i=0; i<overlap_entries; i++) |
590 | if (overlap_list[i]->type > current_type) | 590 | if (overlap_list[i]->type > current_type) |
591 | current_type = overlap_list[i]->type; | 591 | current_type = overlap_list[i]->type; |
592 | /* continue building up new bios map based on this information */ | 592 | /* continue building up new bios map based on this information */ |
593 | if (current_type != last_type) { | 593 | if (current_type != last_type) { |
594 | if (last_type != 0) { | 594 | if (last_type != 0) { |
595 | new_bios[new_bios_entry].size = | 595 | new_bios[new_bios_entry].size = |
596 | change_point[chgidx]->addr - last_addr; | 596 | change_point[chgidx]->addr - last_addr; |
597 | /* move forward only if the new size was non-zero */ | 597 | /* move forward only if the new size was non-zero */ |
598 | if (new_bios[new_bios_entry].size != 0) | 598 | if (new_bios[new_bios_entry].size != 0) |
599 | if (++new_bios_entry >= E820MAX) | 599 | if (++new_bios_entry >= E820MAX) |
600 | break; /* no more space left for new bios entries */ | 600 | break; /* no more space left for new bios entries */ |
601 | } | 601 | } |
602 | if (current_type != 0) { | 602 | if (current_type != 0) { |
603 | new_bios[new_bios_entry].addr = change_point[chgidx]->addr; | 603 | new_bios[new_bios_entry].addr = change_point[chgidx]->addr; |
604 | new_bios[new_bios_entry].type = current_type; | 604 | new_bios[new_bios_entry].type = current_type; |
605 | last_addr=change_point[chgidx]->addr; | 605 | last_addr=change_point[chgidx]->addr; |
606 | } | 606 | } |
607 | last_type = current_type; | 607 | last_type = current_type; |
608 | } | 608 | } |
609 | } | 609 | } |
610 | new_nr = new_bios_entry; /* retain count for new bios entries */ | 610 | new_nr = new_bios_entry; /* retain count for new bios entries */ |
611 | 611 | ||
612 | /* copy new bios mapping into original location */ | 612 | /* copy new bios mapping into original location */ |
613 | memcpy(biosmap, new_bios, new_nr*sizeof(struct e820entry)); | 613 | memcpy(biosmap, new_bios, new_nr*sizeof(struct e820entry)); |
614 | *pnr_map = new_nr; | 614 | *pnr_map = new_nr; |
615 | 615 | ||
616 | return 0; | 616 | return 0; |
617 | } | 617 | } |
618 | 618 | ||
619 | /* | 619 | /* |
620 | * Copy the BIOS e820 map into a safe place. | 620 | * Copy the BIOS e820 map into a safe place. |
621 | * | 621 | * |
622 | * Sanity-check it while we're at it.. | 622 | * Sanity-check it while we're at it.. |
623 | * | 623 | * |
624 | * If we're lucky and live on a modern system, the setup code | 624 | * If we're lucky and live on a modern system, the setup code |
625 | * will have given us a memory map that we can use to properly | 625 | * will have given us a memory map that we can use to properly |
626 | * set up memory. If we aren't, we'll fake a memory map. | 626 | * set up memory. If we aren't, we'll fake a memory map. |
627 | * | 627 | * |
628 | * We check to see that the memory map contains at least 2 elements | 628 | * We check to see that the memory map contains at least 2 elements |
629 | * before we'll use it, because the detection code in setup.S may | 629 | * before we'll use it, because the detection code in setup.S may |
630 | * not be perfect and most every PC known to man has two memory | 630 | * not be perfect and most every PC known to man has two memory |
631 | * regions: one from 0 to 640k, and one from 1mb up. (The IBM | 631 | * regions: one from 0 to 640k, and one from 1mb up. (The IBM |
632 | * thinkpad 560x, for example, does not cooperate with the memory | 632 | * thinkpad 560x, for example, does not cooperate with the memory |
633 | * detection code.) | 633 | * detection code.) |
634 | */ | 634 | */ |
635 | static int __init copy_e820_map(struct e820entry * biosmap, int nr_map) | 635 | static int __init copy_e820_map(struct e820entry * biosmap, int nr_map) |
636 | { | 636 | { |
637 | /* Only one memory region (or negative)? Ignore it */ | 637 | /* Only one memory region (or negative)? Ignore it */ |
638 | if (nr_map < 2) | 638 | if (nr_map < 2) |
639 | return -1; | 639 | return -1; |
640 | 640 | ||
641 | do { | 641 | do { |
642 | unsigned long long start = biosmap->addr; | 642 | unsigned long long start = biosmap->addr; |
643 | unsigned long long size = biosmap->size; | 643 | unsigned long long size = biosmap->size; |
644 | unsigned long long end = start + size; | 644 | unsigned long long end = start + size; |
645 | unsigned long type = biosmap->type; | 645 | unsigned long type = biosmap->type; |
646 | 646 | ||
647 | /* Overflow in 64 bits? Ignore the memory map. */ | 647 | /* Overflow in 64 bits? Ignore the memory map. */ |
648 | if (start > end) | 648 | if (start > end) |
649 | return -1; | 649 | return -1; |
650 | 650 | ||
651 | /* | 651 | /* |
652 | * Some BIOSes claim RAM in the 640k - 1M region. | 652 | * Some BIOSes claim RAM in the 640k - 1M region. |
653 | * Not right. Fix it up. | 653 | * Not right. Fix it up. |
654 | */ | 654 | */ |
655 | if (type == E820_RAM) { | 655 | if (type == E820_RAM) { |
656 | if (start < 0x100000ULL && end > 0xA0000ULL) { | 656 | if (start < 0x100000ULL && end > 0xA0000ULL) { |
657 | if (start < 0xA0000ULL) | 657 | if (start < 0xA0000ULL) |
658 | add_memory_region(start, 0xA0000ULL-start, type); | 658 | add_memory_region(start, 0xA0000ULL-start, type); |
659 | if (end <= 0x100000ULL) | 659 | if (end <= 0x100000ULL) |
660 | continue; | 660 | continue; |
661 | start = 0x100000ULL; | 661 | start = 0x100000ULL; |
662 | size = end - start; | 662 | size = end - start; |
663 | } | 663 | } |
664 | } | 664 | } |
665 | add_memory_region(start, size, type); | 665 | add_memory_region(start, size, type); |
666 | } while (biosmap++,--nr_map); | 666 | } while (biosmap++,--nr_map); |
667 | return 0; | 667 | return 0; |
668 | } | 668 | } |
669 | 669 | ||
670 | #if defined(CONFIG_EDD) || defined(CONFIG_EDD_MODULE) | 670 | #if defined(CONFIG_EDD) || defined(CONFIG_EDD_MODULE) |
671 | struct edd edd; | 671 | struct edd edd; |
672 | #ifdef CONFIG_EDD_MODULE | 672 | #ifdef CONFIG_EDD_MODULE |
673 | EXPORT_SYMBOL(edd); | 673 | EXPORT_SYMBOL(edd); |
674 | #endif | 674 | #endif |
675 | /** | 675 | /** |
676 | * copy_edd() - Copy the BIOS EDD information | 676 | * copy_edd() - Copy the BIOS EDD information |
677 | * from boot_params into a safe place. | 677 | * from boot_params into a safe place. |
678 | * | 678 | * |
679 | */ | 679 | */ |
680 | static inline void copy_edd(void) | 680 | static inline void copy_edd(void) |
681 | { | 681 | { |
682 | memcpy(edd.mbr_signature, EDD_MBR_SIGNATURE, sizeof(edd.mbr_signature)); | 682 | memcpy(edd.mbr_signature, EDD_MBR_SIGNATURE, sizeof(edd.mbr_signature)); |
683 | memcpy(edd.edd_info, EDD_BUF, sizeof(edd.edd_info)); | 683 | memcpy(edd.edd_info, EDD_BUF, sizeof(edd.edd_info)); |
684 | edd.mbr_signature_nr = EDD_MBR_SIG_NR; | 684 | edd.mbr_signature_nr = EDD_MBR_SIG_NR; |
685 | edd.edd_info_nr = EDD_NR; | 685 | edd.edd_info_nr = EDD_NR; |
686 | } | 686 | } |
687 | #else | 687 | #else |
688 | static inline void copy_edd(void) | 688 | static inline void copy_edd(void) |
689 | { | 689 | { |
690 | } | 690 | } |
691 | #endif | 691 | #endif |
692 | 692 | ||
693 | /* | 693 | /* |
694 | * Do NOT EVER look at the BIOS memory size location. | 694 | * Do NOT EVER look at the BIOS memory size location. |
695 | * It does not work on many machines. | 695 | * It does not work on many machines. |
696 | */ | 696 | */ |
697 | #define LOWMEMSIZE() (0x9f000) | 697 | #define LOWMEMSIZE() (0x9f000) |
698 | 698 | ||
699 | static void __init parse_cmdline_early (char ** cmdline_p) | 699 | static void __init parse_cmdline_early (char ** cmdline_p) |
700 | { | 700 | { |
701 | char c = ' ', *to = command_line, *from = saved_command_line; | 701 | char c = ' ', *to = command_line, *from = saved_command_line; |
702 | int len = 0; | 702 | int len = 0; |
703 | int userdef = 0; | 703 | int userdef = 0; |
704 | 704 | ||
705 | /* Save unparsed command line copy for /proc/cmdline */ | 705 | /* Save unparsed command line copy for /proc/cmdline */ |
706 | saved_command_line[COMMAND_LINE_SIZE-1] = '\0'; | 706 | saved_command_line[COMMAND_LINE_SIZE-1] = '\0'; |
707 | 707 | ||
708 | for (;;) { | 708 | for (;;) { |
709 | if (c != ' ') | 709 | if (c != ' ') |
710 | goto next_char; | 710 | goto next_char; |
711 | /* | 711 | /* |
712 | * "mem=nopentium" disables the 4MB page tables. | 712 | * "mem=nopentium" disables the 4MB page tables. |
713 | * "mem=XXX[kKmM]" defines a memory region from HIGH_MEM | 713 | * "mem=XXX[kKmM]" defines a memory region from HIGH_MEM |
714 | * to <mem>, overriding the bios size. | 714 | * to <mem>, overriding the bios size. |
715 | * "memmap=XXX[KkmM]@XXX[KkmM]" defines a memory region from | 715 | * "memmap=XXX[KkmM]@XXX[KkmM]" defines a memory region from |
716 | * <start> to <start>+<mem>, overriding the bios size. | 716 | * <start> to <start>+<mem>, overriding the bios size. |
717 | * | 717 | * |
718 | * HPA tells me bootloaders need to parse mem=, so no new | 718 | * HPA tells me bootloaders need to parse mem=, so no new |
719 | * option should be mem= [also see Documentation/i386/boot.txt] | 719 | * option should be mem= [also see Documentation/i386/boot.txt] |
720 | */ | 720 | */ |
721 | if (!memcmp(from, "mem=", 4)) { | 721 | if (!memcmp(from, "mem=", 4)) { |
722 | if (to != command_line) | 722 | if (to != command_line) |
723 | to--; | 723 | to--; |
724 | if (!memcmp(from+4, "nopentium", 9)) { | 724 | if (!memcmp(from+4, "nopentium", 9)) { |
725 | from += 9+4; | 725 | from += 9+4; |
726 | clear_bit(X86_FEATURE_PSE, boot_cpu_data.x86_capability); | 726 | clear_bit(X86_FEATURE_PSE, boot_cpu_data.x86_capability); |
727 | disable_pse = 1; | 727 | disable_pse = 1; |
728 | } else { | 728 | } else { |
729 | /* If the user specifies memory size, we | 729 | /* If the user specifies memory size, we |
730 | * limit the BIOS-provided memory map to | 730 | * limit the BIOS-provided memory map to |
731 | * that size. exactmap can be used to specify | 731 | * that size. exactmap can be used to specify |
732 | * the exact map. mem=number can be used to | 732 | * the exact map. mem=number can be used to |
733 | * trim the existing memory map. | 733 | * trim the existing memory map. |
734 | */ | 734 | */ |
735 | unsigned long long mem_size; | 735 | unsigned long long mem_size; |
736 | 736 | ||
737 | mem_size = memparse(from+4, &from); | 737 | mem_size = memparse(from+4, &from); |
738 | limit_regions(mem_size); | 738 | limit_regions(mem_size); |
739 | userdef=1; | 739 | userdef=1; |
740 | } | 740 | } |
741 | } | 741 | } |
742 | 742 | ||
743 | else if (!memcmp(from, "memmap=", 7)) { | 743 | else if (!memcmp(from, "memmap=", 7)) { |
744 | if (to != command_line) | 744 | if (to != command_line) |
745 | to--; | 745 | to--; |
746 | if (!memcmp(from+7, "exactmap", 8)) { | 746 | if (!memcmp(from+7, "exactmap", 8)) { |
747 | #ifdef CONFIG_CRASH_DUMP | 747 | #ifdef CONFIG_CRASH_DUMP |
748 | /* If we are doing a crash dump, we | 748 | /* If we are doing a crash dump, we |
749 | * still need to know the real mem | 749 | * still need to know the real mem |
750 | * size before original memory map is | 750 | * size before original memory map is |
751 | * reset. | 751 | * reset. |
752 | */ | 752 | */ |
753 | find_max_pfn(); | 753 | find_max_pfn(); |
754 | saved_max_pfn = max_pfn; | 754 | saved_max_pfn = max_pfn; |
755 | #endif | 755 | #endif |
756 | from += 8+7; | 756 | from += 8+7; |
757 | e820.nr_map = 0; | 757 | e820.nr_map = 0; |
758 | userdef = 1; | 758 | userdef = 1; |
759 | } else { | 759 | } else { |
760 | /* If the user specifies memory size, we | 760 | /* If the user specifies memory size, we |
761 | * limit the BIOS-provided memory map to | 761 | * limit the BIOS-provided memory map to |
762 | * that size. exactmap can be used to specify | 762 | * that size. exactmap can be used to specify |
763 | * the exact map. mem=number can be used to | 763 | * the exact map. mem=number can be used to |
764 | * trim the existing memory map. | 764 | * trim the existing memory map. |
765 | */ | 765 | */ |
766 | unsigned long long start_at, mem_size; | 766 | unsigned long long start_at, mem_size; |
767 | 767 | ||
768 | mem_size = memparse(from+7, &from); | 768 | mem_size = memparse(from+7, &from); |
769 | if (*from == '@') { | 769 | if (*from == '@') { |
770 | start_at = memparse(from+1, &from); | 770 | start_at = memparse(from+1, &from); |
771 | add_memory_region(start_at, mem_size, E820_RAM); | 771 | add_memory_region(start_at, mem_size, E820_RAM); |
772 | } else if (*from == '#') { | 772 | } else if (*from == '#') { |
773 | start_at = memparse(from+1, &from); | 773 | start_at = memparse(from+1, &from); |
774 | add_memory_region(start_at, mem_size, E820_ACPI); | 774 | add_memory_region(start_at, mem_size, E820_ACPI); |
775 | } else if (*from == '$') { | 775 | } else if (*from == '$') { |
776 | start_at = memparse(from+1, &from); | 776 | start_at = memparse(from+1, &from); |
777 | add_memory_region(start_at, mem_size, E820_RESERVED); | 777 | add_memory_region(start_at, mem_size, E820_RESERVED); |
778 | } else { | 778 | } else { |
779 | limit_regions(mem_size); | 779 | limit_regions(mem_size); |
780 | userdef=1; | 780 | userdef=1; |
781 | } | 781 | } |
782 | } | 782 | } |
783 | } | 783 | } |
784 | 784 | ||
785 | else if (!memcmp(from, "noexec=", 7)) | 785 | else if (!memcmp(from, "noexec=", 7)) |
786 | noexec_setup(from + 7); | 786 | noexec_setup(from + 7); |
787 | 787 | ||
788 | 788 | ||
789 | #ifdef CONFIG_X86_SMP | 789 | #ifdef CONFIG_X86_SMP |
790 | /* | 790 | /* |
791 | * If the BIOS enumerates physical processors before logical, | 791 | * If the BIOS enumerates physical processors before logical, |
792 | * maxcpus=N at enumeration-time can be used to disable HT. | 792 | * maxcpus=N at enumeration-time can be used to disable HT. |
793 | */ | 793 | */ |
794 | else if (!memcmp(from, "maxcpus=", 8)) { | 794 | else if (!memcmp(from, "maxcpus=", 8)) { |
795 | extern unsigned int maxcpus; | 795 | extern unsigned int maxcpus; |
796 | 796 | ||
797 | maxcpus = simple_strtoul(from + 8, NULL, 0); | 797 | maxcpus = simple_strtoul(from + 8, NULL, 0); |
798 | } | 798 | } |
799 | #endif | 799 | #endif |
800 | 800 | ||
801 | #ifdef CONFIG_ACPI_BOOT | 801 | #ifdef CONFIG_ACPI_BOOT |
802 | /* "acpi=off" disables both ACPI table parsing and interpreter */ | 802 | /* "acpi=off" disables both ACPI table parsing and interpreter */ |
803 | else if (!memcmp(from, "acpi=off", 8)) { | 803 | else if (!memcmp(from, "acpi=off", 8)) { |
804 | disable_acpi(); | 804 | disable_acpi(); |
805 | } | 805 | } |
806 | 806 | ||
807 | /* acpi=force to over-ride black-list */ | 807 | /* acpi=force to over-ride black-list */ |
808 | else if (!memcmp(from, "acpi=force", 10)) { | 808 | else if (!memcmp(from, "acpi=force", 10)) { |
809 | acpi_force = 1; | 809 | acpi_force = 1; |
810 | acpi_ht = 1; | 810 | acpi_ht = 1; |
811 | acpi_disabled = 0; | 811 | acpi_disabled = 0; |
812 | } | 812 | } |
813 | 813 | ||
814 | /* acpi=strict disables out-of-spec workarounds */ | 814 | /* acpi=strict disables out-of-spec workarounds */ |
815 | else if (!memcmp(from, "acpi=strict", 11)) { | 815 | else if (!memcmp(from, "acpi=strict", 11)) { |
816 | acpi_strict = 1; | 816 | acpi_strict = 1; |
817 | } | 817 | } |
818 | 818 | ||
819 | /* Limit ACPI just to boot-time to enable HT */ | 819 | /* Limit ACPI just to boot-time to enable HT */ |
820 | else if (!memcmp(from, "acpi=ht", 7)) { | 820 | else if (!memcmp(from, "acpi=ht", 7)) { |
821 | if (!acpi_force) | 821 | if (!acpi_force) |
822 | disable_acpi(); | 822 | disable_acpi(); |
823 | acpi_ht = 1; | 823 | acpi_ht = 1; |
824 | } | 824 | } |
825 | 825 | ||
826 | /* "pci=noacpi" disable ACPI IRQ routing and PCI scan */ | 826 | /* "pci=noacpi" disable ACPI IRQ routing and PCI scan */ |
827 | else if (!memcmp(from, "pci=noacpi", 10)) { | 827 | else if (!memcmp(from, "pci=noacpi", 10)) { |
828 | acpi_disable_pci(); | 828 | acpi_disable_pci(); |
829 | } | 829 | } |
830 | /* "acpi=noirq" disables ACPI interrupt routing */ | 830 | /* "acpi=noirq" disables ACPI interrupt routing */ |
831 | else if (!memcmp(from, "acpi=noirq", 10)) { | 831 | else if (!memcmp(from, "acpi=noirq", 10)) { |
832 | acpi_noirq_set(); | 832 | acpi_noirq_set(); |
833 | } | 833 | } |
834 | 834 | ||
835 | else if (!memcmp(from, "acpi_sci=edge", 13)) | 835 | else if (!memcmp(from, "acpi_sci=edge", 13)) |
836 | acpi_sci_flags.trigger = 1; | 836 | acpi_sci_flags.trigger = 1; |
837 | 837 | ||
838 | else if (!memcmp(from, "acpi_sci=level", 14)) | 838 | else if (!memcmp(from, "acpi_sci=level", 14)) |
839 | acpi_sci_flags.trigger = 3; | 839 | acpi_sci_flags.trigger = 3; |
840 | 840 | ||
841 | else if (!memcmp(from, "acpi_sci=high", 13)) | 841 | else if (!memcmp(from, "acpi_sci=high", 13)) |
842 | acpi_sci_flags.polarity = 1; | 842 | acpi_sci_flags.polarity = 1; |
843 | 843 | ||
844 | else if (!memcmp(from, "acpi_sci=low", 12)) | 844 | else if (!memcmp(from, "acpi_sci=low", 12)) |
845 | acpi_sci_flags.polarity = 3; | 845 | acpi_sci_flags.polarity = 3; |
846 | 846 | ||
847 | #ifdef CONFIG_X86_IO_APIC | 847 | #ifdef CONFIG_X86_IO_APIC |
848 | else if (!memcmp(from, "acpi_skip_timer_override", 24)) | 848 | else if (!memcmp(from, "acpi_skip_timer_override", 24)) |
849 | acpi_skip_timer_override = 1; | 849 | acpi_skip_timer_override = 1; |
850 | #endif | 850 | #endif |
851 | 851 | ||
852 | #ifdef CONFIG_X86_LOCAL_APIC | 852 | #ifdef CONFIG_X86_LOCAL_APIC |
853 | /* disable IO-APIC */ | 853 | /* disable IO-APIC */ |
854 | else if (!memcmp(from, "noapic", 6)) | 854 | else if (!memcmp(from, "noapic", 6)) |
855 | disable_ioapic_setup(); | 855 | disable_ioapic_setup(); |
856 | #endif /* CONFIG_X86_LOCAL_APIC */ | 856 | #endif /* CONFIG_X86_LOCAL_APIC */ |
857 | #endif /* CONFIG_ACPI_BOOT */ | 857 | #endif /* CONFIG_ACPI_BOOT */ |
858 | 858 | ||
859 | #ifdef CONFIG_X86_LOCAL_APIC | 859 | #ifdef CONFIG_X86_LOCAL_APIC |
860 | /* enable local APIC */ | 860 | /* enable local APIC */ |
861 | else if (!memcmp(from, "lapic", 5)) | 861 | else if (!memcmp(from, "lapic", 5)) |
862 | lapic_enable(); | 862 | lapic_enable(); |
863 | 863 | ||
864 | /* disable local APIC */ | 864 | /* disable local APIC */ |
865 | else if (!memcmp(from, "nolapic", 6)) | 865 | else if (!memcmp(from, "nolapic", 6)) |
866 | lapic_disable(); | 866 | lapic_disable(); |
867 | #endif /* CONFIG_X86_LOCAL_APIC */ | 867 | #endif /* CONFIG_X86_LOCAL_APIC */ |
868 | 868 | ||
869 | #ifdef CONFIG_KEXEC | 869 | #ifdef CONFIG_KEXEC |
870 | /* crashkernel=size@addr specifies the location to reserve for | 870 | /* crashkernel=size@addr specifies the location to reserve for |
871 | * a crash kernel. By reserving this memory we guarantee | 871 | * a crash kernel. By reserving this memory we guarantee |
872 | * that linux never set's it up as a DMA target. | 872 | * that linux never set's it up as a DMA target. |
873 | * Useful for holding code to do something appropriate | 873 | * Useful for holding code to do something appropriate |
874 | * after a kernel panic. | 874 | * after a kernel panic. |
875 | */ | 875 | */ |
876 | else if (!memcmp(from, "crashkernel=", 12)) { | 876 | else if (!memcmp(from, "crashkernel=", 12)) { |
877 | unsigned long size, base; | 877 | unsigned long size, base; |
878 | size = memparse(from+12, &from); | 878 | size = memparse(from+12, &from); |
879 | if (*from == '@') { | 879 | if (*from == '@') { |
880 | base = memparse(from+1, &from); | 880 | base = memparse(from+1, &from); |
881 | /* FIXME: Do I want a sanity check | 881 | /* FIXME: Do I want a sanity check |
882 | * to validate the memory range? | 882 | * to validate the memory range? |
883 | */ | 883 | */ |
884 | crashk_res.start = base; | 884 | crashk_res.start = base; |
885 | crashk_res.end = base + size - 1; | 885 | crashk_res.end = base + size - 1; |
886 | } | 886 | } |
887 | } | 887 | } |
888 | #endif | 888 | #endif |
889 | #ifdef CONFIG_CRASH_DUMP | 889 | #ifdef CONFIG_CRASH_DUMP |
890 | /* elfcorehdr= specifies the location of elf core header | 890 | /* elfcorehdr= specifies the location of elf core header |
891 | * stored by the crashed kernel. | 891 | * stored by the crashed kernel. |
892 | */ | 892 | */ |
893 | else if (!memcmp(from, "elfcorehdr=", 11)) | 893 | else if (!memcmp(from, "elfcorehdr=", 11)) |
894 | elfcorehdr_addr = memparse(from+11, &from); | 894 | elfcorehdr_addr = memparse(from+11, &from); |
895 | #endif | 895 | #endif |
896 | 896 | ||
897 | /* | 897 | /* |
898 | * highmem=size forces highmem to be exactly 'size' bytes. | 898 | * highmem=size forces highmem to be exactly 'size' bytes. |
899 | * This works even on boxes that have no highmem otherwise. | 899 | * This works even on boxes that have no highmem otherwise. |
900 | * This also works to reduce highmem size on bigger boxes. | 900 | * This also works to reduce highmem size on bigger boxes. |
901 | */ | 901 | */ |
902 | else if (!memcmp(from, "highmem=", 8)) | 902 | else if (!memcmp(from, "highmem=", 8)) |
903 | highmem_pages = memparse(from+8, &from) >> PAGE_SHIFT; | 903 | highmem_pages = memparse(from+8, &from) >> PAGE_SHIFT; |
904 | 904 | ||
905 | /* | 905 | /* |
906 | * vmalloc=size forces the vmalloc area to be exactly 'size' | 906 | * vmalloc=size forces the vmalloc area to be exactly 'size' |
907 | * bytes. This can be used to increase (or decrease) the | 907 | * bytes. This can be used to increase (or decrease) the |
908 | * vmalloc area - the default is 128m. | 908 | * vmalloc area - the default is 128m. |
909 | */ | 909 | */ |
910 | else if (!memcmp(from, "vmalloc=", 8)) | 910 | else if (!memcmp(from, "vmalloc=", 8)) |
911 | __VMALLOC_RESERVE = memparse(from+8, &from); | 911 | __VMALLOC_RESERVE = memparse(from+8, &from); |
912 | 912 | ||
913 | next_char: | 913 | next_char: |
914 | c = *(from++); | 914 | c = *(from++); |
915 | if (!c) | 915 | if (!c) |
916 | break; | 916 | break; |
917 | if (COMMAND_LINE_SIZE <= ++len) | 917 | if (COMMAND_LINE_SIZE <= ++len) |
918 | break; | 918 | break; |
919 | *(to++) = c; | 919 | *(to++) = c; |
920 | } | 920 | } |
921 | *to = '\0'; | 921 | *to = '\0'; |
922 | *cmdline_p = command_line; | 922 | *cmdline_p = command_line; |
923 | if (userdef) { | 923 | if (userdef) { |
924 | printk(KERN_INFO "user-defined physical RAM map:\n"); | 924 | printk(KERN_INFO "user-defined physical RAM map:\n"); |
925 | print_memory_map("user"); | 925 | print_memory_map("user"); |
926 | } | 926 | } |
927 | } | 927 | } |
928 | 928 | ||
929 | /* | 929 | /* |
930 | * Callback for efi_memory_walk. | 930 | * Callback for efi_memory_walk. |
931 | */ | 931 | */ |
932 | static int __init | 932 | static int __init |
933 | efi_find_max_pfn(unsigned long start, unsigned long end, void *arg) | 933 | efi_find_max_pfn(unsigned long start, unsigned long end, void *arg) |
934 | { | 934 | { |
935 | unsigned long *max_pfn = arg, pfn; | 935 | unsigned long *max_pfn = arg, pfn; |
936 | 936 | ||
937 | if (start < end) { | 937 | if (start < end) { |
938 | pfn = PFN_UP(end -1); | 938 | pfn = PFN_UP(end -1); |
939 | if (pfn > *max_pfn) | 939 | if (pfn > *max_pfn) |
940 | *max_pfn = pfn; | 940 | *max_pfn = pfn; |
941 | } | 941 | } |
942 | return 0; | 942 | return 0; |
943 | } | 943 | } |
944 | 944 | ||
945 | 945 | ||
946 | /* | 946 | /* |
947 | * Find the highest page frame number we have available | 947 | * Find the highest page frame number we have available |
948 | */ | 948 | */ |
949 | void __init find_max_pfn(void) | 949 | void __init find_max_pfn(void) |
950 | { | 950 | { |
951 | int i; | 951 | int i; |
952 | 952 | ||
953 | max_pfn = 0; | 953 | max_pfn = 0; |
954 | if (efi_enabled) { | 954 | if (efi_enabled) { |
955 | efi_memmap_walk(efi_find_max_pfn, &max_pfn); | 955 | efi_memmap_walk(efi_find_max_pfn, &max_pfn); |
956 | return; | 956 | return; |
957 | } | 957 | } |
958 | 958 | ||
959 | for (i = 0; i < e820.nr_map; i++) { | 959 | for (i = 0; i < e820.nr_map; i++) { |
960 | unsigned long start, end; | 960 | unsigned long start, end; |
961 | /* RAM? */ | 961 | /* RAM? */ |
962 | if (e820.map[i].type != E820_RAM) | 962 | if (e820.map[i].type != E820_RAM) |
963 | continue; | 963 | continue; |
964 | start = PFN_UP(e820.map[i].addr); | 964 | start = PFN_UP(e820.map[i].addr); |
965 | end = PFN_DOWN(e820.map[i].addr + e820.map[i].size); | 965 | end = PFN_DOWN(e820.map[i].addr + e820.map[i].size); |
966 | if (start >= end) | 966 | if (start >= end) |
967 | continue; | 967 | continue; |
968 | if (end > max_pfn) | 968 | if (end > max_pfn) |
969 | max_pfn = end; | 969 | max_pfn = end; |
970 | } | 970 | } |
971 | } | 971 | } |
972 | 972 | ||
973 | /* | 973 | /* |
974 | * Determine low and high memory ranges: | 974 | * Determine low and high memory ranges: |
975 | */ | 975 | */ |
976 | unsigned long __init find_max_low_pfn(void) | 976 | unsigned long __init find_max_low_pfn(void) |
977 | { | 977 | { |
978 | unsigned long max_low_pfn; | 978 | unsigned long max_low_pfn; |
979 | 979 | ||
980 | max_low_pfn = max_pfn; | 980 | max_low_pfn = max_pfn; |
981 | if (max_low_pfn > MAXMEM_PFN) { | 981 | if (max_low_pfn > MAXMEM_PFN) { |
982 | if (highmem_pages == -1) | 982 | if (highmem_pages == -1) |
983 | highmem_pages = max_pfn - MAXMEM_PFN; | 983 | highmem_pages = max_pfn - MAXMEM_PFN; |
984 | if (highmem_pages + MAXMEM_PFN < max_pfn) | 984 | if (highmem_pages + MAXMEM_PFN < max_pfn) |
985 | max_pfn = MAXMEM_PFN + highmem_pages; | 985 | max_pfn = MAXMEM_PFN + highmem_pages; |
986 | if (highmem_pages + MAXMEM_PFN > max_pfn) { | 986 | if (highmem_pages + MAXMEM_PFN > max_pfn) { |
987 | printk("only %luMB highmem pages available, ignoring highmem size of %uMB.\n", pages_to_mb(max_pfn - MAXMEM_PFN), pages_to_mb(highmem_pages)); | 987 | printk("only %luMB highmem pages available, ignoring highmem size of %uMB.\n", pages_to_mb(max_pfn - MAXMEM_PFN), pages_to_mb(highmem_pages)); |
988 | highmem_pages = 0; | 988 | highmem_pages = 0; |
989 | } | 989 | } |
990 | max_low_pfn = MAXMEM_PFN; | 990 | max_low_pfn = MAXMEM_PFN; |
991 | #ifndef CONFIG_HIGHMEM | 991 | #ifndef CONFIG_HIGHMEM |
992 | /* Maximum memory usable is what is directly addressable */ | 992 | /* Maximum memory usable is what is directly addressable */ |
993 | printk(KERN_WARNING "Warning only %ldMB will be used.\n", | 993 | printk(KERN_WARNING "Warning only %ldMB will be used.\n", |
994 | MAXMEM>>20); | 994 | MAXMEM>>20); |
995 | if (max_pfn > MAX_NONPAE_PFN) | 995 | if (max_pfn > MAX_NONPAE_PFN) |
996 | printk(KERN_WARNING "Use a PAE enabled kernel.\n"); | 996 | printk(KERN_WARNING "Use a PAE enabled kernel.\n"); |
997 | else | 997 | else |
998 | printk(KERN_WARNING "Use a HIGHMEM enabled kernel.\n"); | 998 | printk(KERN_WARNING "Use a HIGHMEM enabled kernel.\n"); |
999 | max_pfn = MAXMEM_PFN; | 999 | max_pfn = MAXMEM_PFN; |
1000 | #else /* !CONFIG_HIGHMEM */ | 1000 | #else /* !CONFIG_HIGHMEM */ |
1001 | #ifndef CONFIG_X86_PAE | 1001 | #ifndef CONFIG_X86_PAE |
1002 | if (max_pfn > MAX_NONPAE_PFN) { | 1002 | if (max_pfn > MAX_NONPAE_PFN) { |
1003 | max_pfn = MAX_NONPAE_PFN; | 1003 | max_pfn = MAX_NONPAE_PFN; |
1004 | printk(KERN_WARNING "Warning only 4GB will be used.\n"); | 1004 | printk(KERN_WARNING "Warning only 4GB will be used.\n"); |
1005 | printk(KERN_WARNING "Use a PAE enabled kernel.\n"); | 1005 | printk(KERN_WARNING "Use a PAE enabled kernel.\n"); |
1006 | } | 1006 | } |
1007 | #endif /* !CONFIG_X86_PAE */ | 1007 | #endif /* !CONFIG_X86_PAE */ |
1008 | #endif /* !CONFIG_HIGHMEM */ | 1008 | #endif /* !CONFIG_HIGHMEM */ |
1009 | } else { | 1009 | } else { |
1010 | if (highmem_pages == -1) | 1010 | if (highmem_pages == -1) |
1011 | highmem_pages = 0; | 1011 | highmem_pages = 0; |
1012 | #ifdef CONFIG_HIGHMEM | 1012 | #ifdef CONFIG_HIGHMEM |
1013 | if (highmem_pages >= max_pfn) { | 1013 | if (highmem_pages >= max_pfn) { |
1014 | printk(KERN_ERR "highmem size specified (%uMB) is bigger than pages available (%luMB)!.\n", pages_to_mb(highmem_pages), pages_to_mb(max_pfn)); | 1014 | printk(KERN_ERR "highmem size specified (%uMB) is bigger than pages available (%luMB)!.\n", pages_to_mb(highmem_pages), pages_to_mb(max_pfn)); |
1015 | highmem_pages = 0; | 1015 | highmem_pages = 0; |
1016 | } | 1016 | } |
1017 | if (highmem_pages) { | 1017 | if (highmem_pages) { |
1018 | if (max_low_pfn-highmem_pages < 64*1024*1024/PAGE_SIZE){ | 1018 | if (max_low_pfn-highmem_pages < 64*1024*1024/PAGE_SIZE){ |
1019 | printk(KERN_ERR "highmem size %uMB results in smaller than 64MB lowmem, ignoring it.\n", pages_to_mb(highmem_pages)); | 1019 | printk(KERN_ERR "highmem size %uMB results in smaller than 64MB lowmem, ignoring it.\n", pages_to_mb(highmem_pages)); |
1020 | highmem_pages = 0; | 1020 | highmem_pages = 0; |
1021 | } | 1021 | } |
1022 | max_low_pfn -= highmem_pages; | 1022 | max_low_pfn -= highmem_pages; |
1023 | } | 1023 | } |
1024 | #else | 1024 | #else |
1025 | if (highmem_pages) | 1025 | if (highmem_pages) |
1026 | printk(KERN_ERR "ignoring highmem size on non-highmem kernel!\n"); | 1026 | printk(KERN_ERR "ignoring highmem size on non-highmem kernel!\n"); |
1027 | #endif | 1027 | #endif |
1028 | } | 1028 | } |
1029 | return max_low_pfn; | 1029 | return max_low_pfn; |
1030 | } | 1030 | } |
1031 | 1031 | ||
1032 | /* | 1032 | /* |
1033 | * Free all available memory for boot time allocation. Used | 1033 | * Free all available memory for boot time allocation. Used |
1034 | * as a callback function by efi_memory_walk() | 1034 | * as a callback function by efi_memory_walk() |
1035 | */ | 1035 | */ |
1036 | 1036 | ||
1037 | static int __init | 1037 | static int __init |
1038 | free_available_memory(unsigned long start, unsigned long end, void *arg) | 1038 | free_available_memory(unsigned long start, unsigned long end, void *arg) |
1039 | { | 1039 | { |
1040 | /* check max_low_pfn */ | 1040 | /* check max_low_pfn */ |
1041 | if (start >= ((max_low_pfn + 1) << PAGE_SHIFT)) | 1041 | if (start >= ((max_low_pfn + 1) << PAGE_SHIFT)) |
1042 | return 0; | 1042 | return 0; |
1043 | if (end >= ((max_low_pfn + 1) << PAGE_SHIFT)) | 1043 | if (end >= ((max_low_pfn + 1) << PAGE_SHIFT)) |
1044 | end = (max_low_pfn + 1) << PAGE_SHIFT; | 1044 | end = (max_low_pfn + 1) << PAGE_SHIFT; |
1045 | if (start < end) | 1045 | if (start < end) |
1046 | free_bootmem(start, end - start); | 1046 | free_bootmem(start, end - start); |
1047 | 1047 | ||
1048 | return 0; | 1048 | return 0; |
1049 | } | 1049 | } |
1050 | /* | 1050 | /* |
1051 | * Register fully available low RAM pages with the bootmem allocator. | 1051 | * Register fully available low RAM pages with the bootmem allocator. |
1052 | */ | 1052 | */ |
1053 | static void __init register_bootmem_low_pages(unsigned long max_low_pfn) | 1053 | static void __init register_bootmem_low_pages(unsigned long max_low_pfn) |
1054 | { | 1054 | { |
1055 | int i; | 1055 | int i; |
1056 | 1056 | ||
1057 | if (efi_enabled) { | 1057 | if (efi_enabled) { |
1058 | efi_memmap_walk(free_available_memory, NULL); | 1058 | efi_memmap_walk(free_available_memory, NULL); |
1059 | return; | 1059 | return; |
1060 | } | 1060 | } |
1061 | for (i = 0; i < e820.nr_map; i++) { | 1061 | for (i = 0; i < e820.nr_map; i++) { |
1062 | unsigned long curr_pfn, last_pfn, size; | 1062 | unsigned long curr_pfn, last_pfn, size; |
1063 | /* | 1063 | /* |
1064 | * Reserve usable low memory | 1064 | * Reserve usable low memory |
1065 | */ | 1065 | */ |
1066 | if (e820.map[i].type != E820_RAM) | 1066 | if (e820.map[i].type != E820_RAM) |
1067 | continue; | 1067 | continue; |
1068 | /* | 1068 | /* |
1069 | * We are rounding up the start address of usable memory: | 1069 | * We are rounding up the start address of usable memory: |
1070 | */ | 1070 | */ |
1071 | curr_pfn = PFN_UP(e820.map[i].addr); | 1071 | curr_pfn = PFN_UP(e820.map[i].addr); |
1072 | if (curr_pfn >= max_low_pfn) | 1072 | if (curr_pfn >= max_low_pfn) |
1073 | continue; | 1073 | continue; |
1074 | /* | 1074 | /* |
1075 | * ... and at the end of the usable range downwards: | 1075 | * ... and at the end of the usable range downwards: |
1076 | */ | 1076 | */ |
1077 | last_pfn = PFN_DOWN(e820.map[i].addr + e820.map[i].size); | 1077 | last_pfn = PFN_DOWN(e820.map[i].addr + e820.map[i].size); |
1078 | 1078 | ||
1079 | if (last_pfn > max_low_pfn) | 1079 | if (last_pfn > max_low_pfn) |
1080 | last_pfn = max_low_pfn; | 1080 | last_pfn = max_low_pfn; |
1081 | 1081 | ||
1082 | /* | 1082 | /* |
1083 | * .. finally, did all the rounding and playing | 1083 | * .. finally, did all the rounding and playing |
1084 | * around just make the area go away? | 1084 | * around just make the area go away? |
1085 | */ | 1085 | */ |
1086 | if (last_pfn <= curr_pfn) | 1086 | if (last_pfn <= curr_pfn) |
1087 | continue; | 1087 | continue; |
1088 | 1088 | ||
1089 | size = last_pfn - curr_pfn; | 1089 | size = last_pfn - curr_pfn; |
1090 | free_bootmem(PFN_PHYS(curr_pfn), PFN_PHYS(size)); | 1090 | free_bootmem(PFN_PHYS(curr_pfn), PFN_PHYS(size)); |
1091 | } | 1091 | } |
1092 | } | 1092 | } |
1093 | 1093 | ||
1094 | /* | 1094 | /* |
1095 | * workaround for Dell systems that neglect to reserve EBDA | 1095 | * workaround for Dell systems that neglect to reserve EBDA |
1096 | */ | 1096 | */ |
1097 | static void __init reserve_ebda_region(void) | 1097 | static void __init reserve_ebda_region(void) |
1098 | { | 1098 | { |
1099 | unsigned int addr; | 1099 | unsigned int addr; |
1100 | addr = get_bios_ebda(); | 1100 | addr = get_bios_ebda(); |
1101 | if (addr) | 1101 | if (addr) |
1102 | reserve_bootmem(addr, PAGE_SIZE); | 1102 | reserve_bootmem(addr, PAGE_SIZE); |
1103 | } | 1103 | } |
1104 | 1104 | ||
1105 | #ifndef CONFIG_NEED_MULTIPLE_NODES | 1105 | #ifndef CONFIG_NEED_MULTIPLE_NODES |
1106 | void __init setup_bootmem_allocator(void); | 1106 | void __init setup_bootmem_allocator(void); |
1107 | static unsigned long __init setup_memory(void) | 1107 | static unsigned long __init setup_memory(void) |
1108 | { | 1108 | { |
1109 | /* | 1109 | /* |
1110 | * partially used pages are not usable - thus | 1110 | * partially used pages are not usable - thus |
1111 | * we are rounding upwards: | 1111 | * we are rounding upwards: |
1112 | */ | 1112 | */ |
1113 | min_low_pfn = PFN_UP(init_pg_tables_end); | 1113 | min_low_pfn = PFN_UP(init_pg_tables_end); |
1114 | 1114 | ||
1115 | find_max_pfn(); | 1115 | find_max_pfn(); |
1116 | 1116 | ||
1117 | max_low_pfn = find_max_low_pfn(); | 1117 | max_low_pfn = find_max_low_pfn(); |
1118 | 1118 | ||
1119 | #ifdef CONFIG_HIGHMEM | 1119 | #ifdef CONFIG_HIGHMEM |
1120 | highstart_pfn = highend_pfn = max_pfn; | 1120 | highstart_pfn = highend_pfn = max_pfn; |
1121 | if (max_pfn > max_low_pfn) { | 1121 | if (max_pfn > max_low_pfn) { |
1122 | highstart_pfn = max_low_pfn; | 1122 | highstart_pfn = max_low_pfn; |
1123 | } | 1123 | } |
1124 | printk(KERN_NOTICE "%ldMB HIGHMEM available.\n", | 1124 | printk(KERN_NOTICE "%ldMB HIGHMEM available.\n", |
1125 | pages_to_mb(highend_pfn - highstart_pfn)); | 1125 | pages_to_mb(highend_pfn - highstart_pfn)); |
1126 | #endif | 1126 | #endif |
1127 | printk(KERN_NOTICE "%ldMB LOWMEM available.\n", | 1127 | printk(KERN_NOTICE "%ldMB LOWMEM available.\n", |
1128 | pages_to_mb(max_low_pfn)); | 1128 | pages_to_mb(max_low_pfn)); |
1129 | 1129 | ||
1130 | setup_bootmem_allocator(); | 1130 | setup_bootmem_allocator(); |
1131 | 1131 | ||
1132 | return max_low_pfn; | 1132 | return max_low_pfn; |
1133 | } | 1133 | } |
1134 | 1134 | ||
1135 | void __init zone_sizes_init(void) | 1135 | void __init zone_sizes_init(void) |
1136 | { | 1136 | { |
1137 | unsigned long zones_size[MAX_NR_ZONES] = {0, 0, 0}; | 1137 | unsigned long zones_size[MAX_NR_ZONES] = {0, 0, 0}; |
1138 | unsigned int max_dma, low; | 1138 | unsigned int max_dma, low; |
1139 | 1139 | ||
1140 | max_dma = virt_to_phys((char *)MAX_DMA_ADDRESS) >> PAGE_SHIFT; | 1140 | max_dma = virt_to_phys((char *)MAX_DMA_ADDRESS) >> PAGE_SHIFT; |
1141 | low = max_low_pfn; | 1141 | low = max_low_pfn; |
1142 | 1142 | ||
1143 | if (low < max_dma) | 1143 | if (low < max_dma) |
1144 | zones_size[ZONE_DMA] = low; | 1144 | zones_size[ZONE_DMA] = low; |
1145 | else { | 1145 | else { |
1146 | zones_size[ZONE_DMA] = max_dma; | 1146 | zones_size[ZONE_DMA] = max_dma; |
1147 | zones_size[ZONE_NORMAL] = low - max_dma; | 1147 | zones_size[ZONE_NORMAL] = low - max_dma; |
1148 | #ifdef CONFIG_HIGHMEM | 1148 | #ifdef CONFIG_HIGHMEM |
1149 | zones_size[ZONE_HIGHMEM] = highend_pfn - low; | 1149 | zones_size[ZONE_HIGHMEM] = highend_pfn - low; |
1150 | #endif | 1150 | #endif |
1151 | } | 1151 | } |
1152 | free_area_init(zones_size); | 1152 | free_area_init(zones_size); |
1153 | } | 1153 | } |
1154 | #else | 1154 | #else |
1155 | extern unsigned long __init setup_memory(void); | 1155 | extern unsigned long __init setup_memory(void); |
1156 | extern void zone_sizes_init(void); | 1156 | extern void zone_sizes_init(void); |
1157 | #endif /* !CONFIG_NEED_MULTIPLE_NODES */ | 1157 | #endif /* !CONFIG_NEED_MULTIPLE_NODES */ |
1158 | 1158 | ||
1159 | void __init setup_bootmem_allocator(void) | 1159 | void __init setup_bootmem_allocator(void) |
1160 | { | 1160 | { |
1161 | unsigned long bootmap_size; | 1161 | unsigned long bootmap_size; |
1162 | /* | 1162 | /* |
1163 | * Initialize the boot-time allocator (with low memory only): | 1163 | * Initialize the boot-time allocator (with low memory only): |
1164 | */ | 1164 | */ |
1165 | bootmap_size = init_bootmem(min_low_pfn, max_low_pfn); | 1165 | bootmap_size = init_bootmem(min_low_pfn, max_low_pfn); |
1166 | 1166 | ||
1167 | register_bootmem_low_pages(max_low_pfn); | 1167 | register_bootmem_low_pages(max_low_pfn); |
1168 | 1168 | ||
1169 | /* | 1169 | /* |
1170 | * Reserve the bootmem bitmap itself as well. We do this in two | 1170 | * Reserve the bootmem bitmap itself as well. We do this in two |
1171 | * steps (first step was init_bootmem()) because this catches | 1171 | * steps (first step was init_bootmem()) because this catches |
1172 | * the (very unlikely) case of us accidentally initializing the | 1172 | * the (very unlikely) case of us accidentally initializing the |
1173 | * bootmem allocator with an invalid RAM area. | 1173 | * bootmem allocator with an invalid RAM area. |
1174 | */ | 1174 | */ |
1175 | reserve_bootmem(__PHYSICAL_START, (PFN_PHYS(min_low_pfn) + | 1175 | reserve_bootmem(__PHYSICAL_START, (PFN_PHYS(min_low_pfn) + |
1176 | bootmap_size + PAGE_SIZE-1) - (__PHYSICAL_START)); | 1176 | bootmap_size + PAGE_SIZE-1) - (__PHYSICAL_START)); |
1177 | 1177 | ||
1178 | /* | 1178 | /* |
1179 | * reserve physical page 0 - it's a special BIOS page on many boxes, | 1179 | * reserve physical page 0 - it's a special BIOS page on many boxes, |
1180 | * enabling clean reboots, SMP operation, laptop functions. | 1180 | * enabling clean reboots, SMP operation, laptop functions. |
1181 | */ | 1181 | */ |
1182 | reserve_bootmem(0, PAGE_SIZE); | 1182 | reserve_bootmem(0, PAGE_SIZE); |
1183 | 1183 | ||
1184 | /* reserve EBDA region, it's a 4K region */ | 1184 | /* reserve EBDA region, it's a 4K region */ |
1185 | reserve_ebda_region(); | 1185 | reserve_ebda_region(); |
1186 | 1186 | ||
1187 | /* could be an AMD 768MPX chipset. Reserve a page before VGA to prevent | 1187 | /* could be an AMD 768MPX chipset. Reserve a page before VGA to prevent |
1188 | PCI prefetch into it (errata #56). Usually the page is reserved anyways, | 1188 | PCI prefetch into it (errata #56). Usually the page is reserved anyways, |
1189 | unless you have no PS/2 mouse plugged in. */ | 1189 | unless you have no PS/2 mouse plugged in. */ |
1190 | if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD && | 1190 | if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD && |
1191 | boot_cpu_data.x86 == 6) | 1191 | boot_cpu_data.x86 == 6) |
1192 | reserve_bootmem(0xa0000 - 4096, 4096); | 1192 | reserve_bootmem(0xa0000 - 4096, 4096); |
1193 | 1193 | ||
1194 | #ifdef CONFIG_SMP | 1194 | #ifdef CONFIG_SMP |
1195 | /* | 1195 | /* |
1196 | * But first pinch a few for the stack/trampoline stuff | 1196 | * But first pinch a few for the stack/trampoline stuff |
1197 | * FIXME: Don't need the extra page at 4K, but need to fix | 1197 | * FIXME: Don't need the extra page at 4K, but need to fix |
1198 | * trampoline before removing it. (see the GDT stuff) | 1198 | * trampoline before removing it. (see the GDT stuff) |
1199 | */ | 1199 | */ |
1200 | reserve_bootmem(PAGE_SIZE, PAGE_SIZE); | 1200 | reserve_bootmem(PAGE_SIZE, PAGE_SIZE); |
1201 | #endif | 1201 | #endif |
1202 | #ifdef CONFIG_ACPI_SLEEP | 1202 | #ifdef CONFIG_ACPI_SLEEP |
1203 | /* | 1203 | /* |
1204 | * Reserve low memory region for sleep support. | 1204 | * Reserve low memory region for sleep support. |
1205 | */ | 1205 | */ |
1206 | acpi_reserve_bootmem(); | 1206 | acpi_reserve_bootmem(); |
1207 | #endif | 1207 | #endif |
1208 | #ifdef CONFIG_X86_FIND_SMP_CONFIG | 1208 | #ifdef CONFIG_X86_FIND_SMP_CONFIG |
1209 | /* | 1209 | /* |
1210 | * Find and reserve possible boot-time SMP configuration: | 1210 | * Find and reserve possible boot-time SMP configuration: |
1211 | */ | 1211 | */ |
1212 | find_smp_config(); | 1212 | find_smp_config(); |
1213 | #endif | 1213 | #endif |
1214 | 1214 | ||
1215 | #ifdef CONFIG_BLK_DEV_INITRD | 1215 | #ifdef CONFIG_BLK_DEV_INITRD |
1216 | if (LOADER_TYPE && INITRD_START) { | 1216 | if (LOADER_TYPE && INITRD_START) { |
1217 | if (INITRD_START + INITRD_SIZE <= (max_low_pfn << PAGE_SHIFT)) { | 1217 | if (INITRD_START + INITRD_SIZE <= (max_low_pfn << PAGE_SHIFT)) { |
1218 | reserve_bootmem(INITRD_START, INITRD_SIZE); | 1218 | reserve_bootmem(INITRD_START, INITRD_SIZE); |
1219 | initrd_start = | 1219 | initrd_start = |
1220 | INITRD_START ? INITRD_START + PAGE_OFFSET : 0; | 1220 | INITRD_START ? INITRD_START + PAGE_OFFSET : 0; |
1221 | initrd_end = initrd_start+INITRD_SIZE; | 1221 | initrd_end = initrd_start+INITRD_SIZE; |
1222 | } | 1222 | } |
1223 | else { | 1223 | else { |
1224 | printk(KERN_ERR "initrd extends beyond end of memory " | 1224 | printk(KERN_ERR "initrd extends beyond end of memory " |
1225 | "(0x%08lx > 0x%08lx)\ndisabling initrd\n", | 1225 | "(0x%08lx > 0x%08lx)\ndisabling initrd\n", |
1226 | INITRD_START + INITRD_SIZE, | 1226 | INITRD_START + INITRD_SIZE, |
1227 | max_low_pfn << PAGE_SHIFT); | 1227 | max_low_pfn << PAGE_SHIFT); |
1228 | initrd_start = 0; | 1228 | initrd_start = 0; |
1229 | } | 1229 | } |
1230 | } | 1230 | } |
1231 | #endif | 1231 | #endif |
1232 | #ifdef CONFIG_KEXEC | 1232 | #ifdef CONFIG_KEXEC |
1233 | if (crashk_res.start != crashk_res.end) | 1233 | if (crashk_res.start != crashk_res.end) |
1234 | reserve_bootmem(crashk_res.start, | 1234 | reserve_bootmem(crashk_res.start, |
1235 | crashk_res.end - crashk_res.start + 1); | 1235 | crashk_res.end - crashk_res.start + 1); |
1236 | #endif | 1236 | #endif |
1237 | } | 1237 | } |
1238 | 1238 | ||
1239 | /* | 1239 | /* |
1240 | * The node 0 pgdat is initialized before all of these because | 1240 | * The node 0 pgdat is initialized before all of these because |
1241 | * it's needed for bootmem. node>0 pgdats have their virtual | 1241 | * it's needed for bootmem. node>0 pgdats have their virtual |
1242 | * space allocated before the pagetables are in place to access | 1242 | * space allocated before the pagetables are in place to access |
1243 | * them, so they can't be cleared then. | 1243 | * them, so they can't be cleared then. |
1244 | * | 1244 | * |
1245 | * This should all compile down to nothing when NUMA is off. | 1245 | * This should all compile down to nothing when NUMA is off. |
1246 | */ | 1246 | */ |
1247 | void __init remapped_pgdat_init(void) | 1247 | void __init remapped_pgdat_init(void) |
1248 | { | 1248 | { |
1249 | int nid; | 1249 | int nid; |
1250 | 1250 | ||
1251 | for_each_online_node(nid) { | 1251 | for_each_online_node(nid) { |
1252 | if (nid != 0) | 1252 | if (nid != 0) |
1253 | memset(NODE_DATA(nid), 0, sizeof(struct pglist_data)); | 1253 | memset(NODE_DATA(nid), 0, sizeof(struct pglist_data)); |
1254 | } | 1254 | } |
1255 | } | 1255 | } |
1256 | 1256 | ||
1257 | /* | 1257 | /* |
1258 | * Request address space for all standard RAM and ROM resources | 1258 | * Request address space for all standard RAM and ROM resources |
1259 | * and also for regions reported as reserved by the e820. | 1259 | * and also for regions reported as reserved by the e820. |
1260 | */ | 1260 | */ |
1261 | static void __init | 1261 | static void __init |
1262 | legacy_init_iomem_resources(struct resource *code_resource, struct resource *data_resource) | 1262 | legacy_init_iomem_resources(struct resource *code_resource, struct resource *data_resource) |
1263 | { | 1263 | { |
1264 | int i; | 1264 | int i; |
1265 | 1265 | ||
1266 | probe_roms(); | 1266 | probe_roms(); |
1267 | for (i = 0; i < e820.nr_map; i++) { | 1267 | for (i = 0; i < e820.nr_map; i++) { |
1268 | struct resource *res; | 1268 | struct resource *res; |
1269 | if (e820.map[i].addr + e820.map[i].size > 0x100000000ULL) | 1269 | if (e820.map[i].addr + e820.map[i].size > 0x100000000ULL) |
1270 | continue; | 1270 | continue; |
1271 | res = alloc_bootmem_low(sizeof(struct resource)); | 1271 | res = alloc_bootmem_low(sizeof(struct resource)); |
1272 | switch (e820.map[i].type) { | 1272 | switch (e820.map[i].type) { |
1273 | case E820_RAM: res->name = "System RAM"; break; | 1273 | case E820_RAM: res->name = "System RAM"; break; |
1274 | case E820_ACPI: res->name = "ACPI Tables"; break; | 1274 | case E820_ACPI: res->name = "ACPI Tables"; break; |
1275 | case E820_NVS: res->name = "ACPI Non-volatile Storage"; break; | 1275 | case E820_NVS: res->name = "ACPI Non-volatile Storage"; break; |
1276 | default: res->name = "reserved"; | 1276 | default: res->name = "reserved"; |
1277 | } | 1277 | } |
1278 | res->start = e820.map[i].addr; | 1278 | res->start = e820.map[i].addr; |
1279 | res->end = res->start + e820.map[i].size - 1; | 1279 | res->end = res->start + e820.map[i].size - 1; |
1280 | res->flags = IORESOURCE_MEM | IORESOURCE_BUSY; | 1280 | res->flags = IORESOURCE_MEM | IORESOURCE_BUSY; |
1281 | request_resource(&iomem_resource, res); | 1281 | request_resource(&iomem_resource, res); |
1282 | if (e820.map[i].type == E820_RAM) { | 1282 | if (e820.map[i].type == E820_RAM) { |
1283 | /* | 1283 | /* |
1284 | * We don't know which RAM region contains kernel data, | 1284 | * We don't know which RAM region contains kernel data, |
1285 | * so we try it repeatedly and let the resource manager | 1285 | * so we try it repeatedly and let the resource manager |
1286 | * test it. | 1286 | * test it. |
1287 | */ | 1287 | */ |
1288 | request_resource(res, code_resource); | 1288 | request_resource(res, code_resource); |
1289 | request_resource(res, data_resource); | 1289 | request_resource(res, data_resource); |
1290 | #ifdef CONFIG_KEXEC | 1290 | #ifdef CONFIG_KEXEC |
1291 | request_resource(res, &crashk_res); | 1291 | request_resource(res, &crashk_res); |
1292 | #endif | 1292 | #endif |
1293 | } | 1293 | } |
1294 | } | 1294 | } |
1295 | } | 1295 | } |
1296 | 1296 | ||
1297 | /* | 1297 | /* |
1298 | * Request address space for all standard resources | 1298 | * Request address space for all standard resources |
1299 | */ | 1299 | */ |
1300 | static void __init register_memory(void) | 1300 | static void __init register_memory(void) |
1301 | { | 1301 | { |
1302 | unsigned long gapstart, gapsize; | 1302 | unsigned long gapstart, gapsize; |
1303 | unsigned long long last; | 1303 | unsigned long long last; |
1304 | int i; | 1304 | int i; |
1305 | 1305 | ||
1306 | if (efi_enabled) | 1306 | if (efi_enabled) |
1307 | efi_initialize_iomem_resources(&code_resource, &data_resource); | 1307 | efi_initialize_iomem_resources(&code_resource, &data_resource); |
1308 | else | 1308 | else |
1309 | legacy_init_iomem_resources(&code_resource, &data_resource); | 1309 | legacy_init_iomem_resources(&code_resource, &data_resource); |
1310 | 1310 | ||
1311 | /* EFI systems may still have VGA */ | 1311 | /* EFI systems may still have VGA */ |
1312 | request_resource(&iomem_resource, &video_ram_resource); | 1312 | request_resource(&iomem_resource, &video_ram_resource); |
1313 | 1313 | ||
1314 | /* request I/O space for devices used on all i[345]86 PCs */ | 1314 | /* request I/O space for devices used on all i[345]86 PCs */ |
1315 | for (i = 0; i < STANDARD_IO_RESOURCES; i++) | 1315 | for (i = 0; i < STANDARD_IO_RESOURCES; i++) |
1316 | request_resource(&ioport_resource, &standard_io_resources[i]); | 1316 | request_resource(&ioport_resource, &standard_io_resources[i]); |
1317 | 1317 | ||
1318 | /* | 1318 | /* |
1319 | * Search for the bigest gap in the low 32 bits of the e820 | 1319 | * Search for the bigest gap in the low 32 bits of the e820 |
1320 | * memory space. | 1320 | * memory space. |
1321 | */ | 1321 | */ |
1322 | last = 0x100000000ull; | 1322 | last = 0x100000000ull; |
1323 | gapstart = 0x10000000; | 1323 | gapstart = 0x10000000; |
1324 | gapsize = 0x400000; | 1324 | gapsize = 0x400000; |
1325 | i = e820.nr_map; | 1325 | i = e820.nr_map; |
1326 | while (--i >= 0) { | 1326 | while (--i >= 0) { |
1327 | unsigned long long start = e820.map[i].addr; | 1327 | unsigned long long start = e820.map[i].addr; |
1328 | unsigned long long end = start + e820.map[i].size; | 1328 | unsigned long long end = start + e820.map[i].size; |
1329 | 1329 | ||
1330 | /* | 1330 | /* |
1331 | * Since "last" is at most 4GB, we know we'll | 1331 | * Since "last" is at most 4GB, we know we'll |
1332 | * fit in 32 bits if this condition is true | 1332 | * fit in 32 bits if this condition is true |
1333 | */ | 1333 | */ |
1334 | if (last > end) { | 1334 | if (last > end) { |
1335 | unsigned long gap = last - end; | 1335 | unsigned long gap = last - end; |
1336 | 1336 | ||
1337 | if (gap > gapsize) { | 1337 | if (gap > gapsize) { |
1338 | gapsize = gap; | 1338 | gapsize = gap; |
1339 | gapstart = end; | 1339 | gapstart = end; |
1340 | } | 1340 | } |
1341 | } | 1341 | } |
1342 | if (start < last) | 1342 | if (start < last) |
1343 | last = start; | 1343 | last = start; |
1344 | } | 1344 | } |
1345 | 1345 | ||
1346 | /* | 1346 | /* |
1347 | * Start allocating dynamic PCI memory a bit into the gap, | 1347 | * Start allocating dynamic PCI memory a bit into the gap, |
1348 | * aligned up to the nearest megabyte. | 1348 | * aligned up to the nearest megabyte. |
1349 | * | 1349 | * |
1350 | * Question: should we try to pad it up a bit (do something | 1350 | * Question: should we try to pad it up a bit (do something |
1351 | * like " + (gapsize >> 3)" in there too?). We now have the | 1351 | * like " + (gapsize >> 3)" in there too?). We now have the |
1352 | * technology. | 1352 | * technology. |
1353 | */ | 1353 | */ |
1354 | pci_mem_start = (gapstart + 0xfffff) & ~0xfffff; | 1354 | pci_mem_start = (gapstart + 0xfffff) & ~0xfffff; |
1355 | 1355 | ||
1356 | printk("Allocating PCI resources starting at %08lx (gap: %08lx:%08lx)\n", | 1356 | printk("Allocating PCI resources starting at %08lx (gap: %08lx:%08lx)\n", |
1357 | pci_mem_start, gapstart, gapsize); | 1357 | pci_mem_start, gapstart, gapsize); |
1358 | } | 1358 | } |
1359 | 1359 | ||
1360 | /* Use inline assembly to define this because the nops are defined | 1360 | /* Use inline assembly to define this because the nops are defined |
1361 | as inline assembly strings in the include files and we cannot | 1361 | as inline assembly strings in the include files and we cannot |
1362 | get them easily into strings. */ | 1362 | get them easily into strings. */ |
1363 | asm("\t.data\nintelnops: " | 1363 | asm("\t.data\nintelnops: " |
1364 | GENERIC_NOP1 GENERIC_NOP2 GENERIC_NOP3 GENERIC_NOP4 GENERIC_NOP5 GENERIC_NOP6 | 1364 | GENERIC_NOP1 GENERIC_NOP2 GENERIC_NOP3 GENERIC_NOP4 GENERIC_NOP5 GENERIC_NOP6 |
1365 | GENERIC_NOP7 GENERIC_NOP8); | 1365 | GENERIC_NOP7 GENERIC_NOP8); |
1366 | asm("\t.data\nk8nops: " | 1366 | asm("\t.data\nk8nops: " |
1367 | K8_NOP1 K8_NOP2 K8_NOP3 K8_NOP4 K8_NOP5 K8_NOP6 | 1367 | K8_NOP1 K8_NOP2 K8_NOP3 K8_NOP4 K8_NOP5 K8_NOP6 |
1368 | K8_NOP7 K8_NOP8); | 1368 | K8_NOP7 K8_NOP8); |
1369 | asm("\t.data\nk7nops: " | 1369 | asm("\t.data\nk7nops: " |
1370 | K7_NOP1 K7_NOP2 K7_NOP3 K7_NOP4 K7_NOP5 K7_NOP6 | 1370 | K7_NOP1 K7_NOP2 K7_NOP3 K7_NOP4 K7_NOP5 K7_NOP6 |
1371 | K7_NOP7 K7_NOP8); | 1371 | K7_NOP7 K7_NOP8); |
1372 | 1372 | ||
1373 | extern unsigned char intelnops[], k8nops[], k7nops[]; | 1373 | extern unsigned char intelnops[], k8nops[], k7nops[]; |
1374 | static unsigned char *intel_nops[ASM_NOP_MAX+1] = { | 1374 | static unsigned char *intel_nops[ASM_NOP_MAX+1] = { |
1375 | NULL, | 1375 | NULL, |
1376 | intelnops, | 1376 | intelnops, |
1377 | intelnops + 1, | 1377 | intelnops + 1, |
1378 | intelnops + 1 + 2, | 1378 | intelnops + 1 + 2, |
1379 | intelnops + 1 + 2 + 3, | 1379 | intelnops + 1 + 2 + 3, |
1380 | intelnops + 1 + 2 + 3 + 4, | 1380 | intelnops + 1 + 2 + 3 + 4, |
1381 | intelnops + 1 + 2 + 3 + 4 + 5, | 1381 | intelnops + 1 + 2 + 3 + 4 + 5, |
1382 | intelnops + 1 + 2 + 3 + 4 + 5 + 6, | 1382 | intelnops + 1 + 2 + 3 + 4 + 5 + 6, |
1383 | intelnops + 1 + 2 + 3 + 4 + 5 + 6 + 7, | 1383 | intelnops + 1 + 2 + 3 + 4 + 5 + 6 + 7, |
1384 | }; | 1384 | }; |
1385 | static unsigned char *k8_nops[ASM_NOP_MAX+1] = { | 1385 | static unsigned char *k8_nops[ASM_NOP_MAX+1] = { |
1386 | NULL, | 1386 | NULL, |
1387 | k8nops, | 1387 | k8nops, |
1388 | k8nops + 1, | 1388 | k8nops + 1, |
1389 | k8nops + 1 + 2, | 1389 | k8nops + 1 + 2, |
1390 | k8nops + 1 + 2 + 3, | 1390 | k8nops + 1 + 2 + 3, |
1391 | k8nops + 1 + 2 + 3 + 4, | 1391 | k8nops + 1 + 2 + 3 + 4, |
1392 | k8nops + 1 + 2 + 3 + 4 + 5, | 1392 | k8nops + 1 + 2 + 3 + 4 + 5, |
1393 | k8nops + 1 + 2 + 3 + 4 + 5 + 6, | 1393 | k8nops + 1 + 2 + 3 + 4 + 5 + 6, |
1394 | k8nops + 1 + 2 + 3 + 4 + 5 + 6 + 7, | 1394 | k8nops + 1 + 2 + 3 + 4 + 5 + 6 + 7, |
1395 | }; | 1395 | }; |
1396 | static unsigned char *k7_nops[ASM_NOP_MAX+1] = { | 1396 | static unsigned char *k7_nops[ASM_NOP_MAX+1] = { |
1397 | NULL, | 1397 | NULL, |
1398 | k7nops, | 1398 | k7nops, |
1399 | k7nops + 1, | 1399 | k7nops + 1, |
1400 | k7nops + 1 + 2, | 1400 | k7nops + 1 + 2, |
1401 | k7nops + 1 + 2 + 3, | 1401 | k7nops + 1 + 2 + 3, |
1402 | k7nops + 1 + 2 + 3 + 4, | 1402 | k7nops + 1 + 2 + 3 + 4, |
1403 | k7nops + 1 + 2 + 3 + 4 + 5, | 1403 | k7nops + 1 + 2 + 3 + 4 + 5, |
1404 | k7nops + 1 + 2 + 3 + 4 + 5 + 6, | 1404 | k7nops + 1 + 2 + 3 + 4 + 5 + 6, |
1405 | k7nops + 1 + 2 + 3 + 4 + 5 + 6 + 7, | 1405 | k7nops + 1 + 2 + 3 + 4 + 5 + 6 + 7, |
1406 | }; | 1406 | }; |
1407 | static struct nop { | 1407 | static struct nop { |
1408 | int cpuid; | 1408 | int cpuid; |
1409 | unsigned char **noptable; | 1409 | unsigned char **noptable; |
1410 | } noptypes[] = { | 1410 | } noptypes[] = { |
1411 | { X86_FEATURE_K8, k8_nops }, | 1411 | { X86_FEATURE_K8, k8_nops }, |
1412 | { X86_FEATURE_K7, k7_nops }, | 1412 | { X86_FEATURE_K7, k7_nops }, |
1413 | { -1, NULL } | 1413 | { -1, NULL } |
1414 | }; | 1414 | }; |
1415 | 1415 | ||
1416 | /* Replace instructions with better alternatives for this CPU type. | 1416 | /* Replace instructions with better alternatives for this CPU type. |
1417 | 1417 | ||
1418 | This runs before SMP is initialized to avoid SMP problems with | 1418 | This runs before SMP is initialized to avoid SMP problems with |
1419 | self modifying code. This implies that assymetric systems where | 1419 | self modifying code. This implies that assymetric systems where |
1420 | APs have less capabilities than the boot processor are not handled. | 1420 | APs have less capabilities than the boot processor are not handled. |
1421 | Tough. Make sure you disable such features by hand. */ | 1421 | Tough. Make sure you disable such features by hand. */ |
1422 | void apply_alternatives(void *start, void *end) | 1422 | void apply_alternatives(void *start, void *end) |
1423 | { | 1423 | { |
1424 | struct alt_instr *a; | 1424 | struct alt_instr *a; |
1425 | int diff, i, k; | 1425 | int diff, i, k; |
1426 | unsigned char **noptable = intel_nops; | 1426 | unsigned char **noptable = intel_nops; |
1427 | for (i = 0; noptypes[i].cpuid >= 0; i++) { | 1427 | for (i = 0; noptypes[i].cpuid >= 0; i++) { |
1428 | if (boot_cpu_has(noptypes[i].cpuid)) { | 1428 | if (boot_cpu_has(noptypes[i].cpuid)) { |
1429 | noptable = noptypes[i].noptable; | 1429 | noptable = noptypes[i].noptable; |
1430 | break; | 1430 | break; |
1431 | } | 1431 | } |
1432 | } | 1432 | } |
1433 | for (a = start; (void *)a < end; a++) { | 1433 | for (a = start; (void *)a < end; a++) { |
1434 | if (!boot_cpu_has(a->cpuid)) | 1434 | if (!boot_cpu_has(a->cpuid)) |
1435 | continue; | 1435 | continue; |
1436 | BUG_ON(a->replacementlen > a->instrlen); | 1436 | BUG_ON(a->replacementlen > a->instrlen); |
1437 | memcpy(a->instr, a->replacement, a->replacementlen); | 1437 | memcpy(a->instr, a->replacement, a->replacementlen); |
1438 | diff = a->instrlen - a->replacementlen; | 1438 | diff = a->instrlen - a->replacementlen; |
1439 | /* Pad the rest with nops */ | 1439 | /* Pad the rest with nops */ |
1440 | for (i = a->replacementlen; diff > 0; diff -= k, i += k) { | 1440 | for (i = a->replacementlen; diff > 0; diff -= k, i += k) { |
1441 | k = diff; | 1441 | k = diff; |
1442 | if (k > ASM_NOP_MAX) | 1442 | if (k > ASM_NOP_MAX) |
1443 | k = ASM_NOP_MAX; | 1443 | k = ASM_NOP_MAX; |
1444 | memcpy(a->instr + i, noptable[k], k); | 1444 | memcpy(a->instr + i, noptable[k], k); |
1445 | } | 1445 | } |
1446 | } | 1446 | } |
1447 | } | 1447 | } |
1448 | 1448 | ||
1449 | void __init alternative_instructions(void) | 1449 | void __init alternative_instructions(void) |
1450 | { | 1450 | { |
1451 | extern struct alt_instr __alt_instructions[], __alt_instructions_end[]; | 1451 | extern struct alt_instr __alt_instructions[], __alt_instructions_end[]; |
1452 | apply_alternatives(__alt_instructions, __alt_instructions_end); | 1452 | apply_alternatives(__alt_instructions, __alt_instructions_end); |
1453 | } | 1453 | } |
1454 | 1454 | ||
1455 | static char * __init machine_specific_memory_setup(void); | 1455 | static char * __init machine_specific_memory_setup(void); |
1456 | 1456 | ||
1457 | #ifdef CONFIG_MCA | 1457 | #ifdef CONFIG_MCA |
1458 | static void set_mca_bus(int x) | 1458 | static void set_mca_bus(int x) |
1459 | { | 1459 | { |
1460 | MCA_bus = x; | 1460 | MCA_bus = x; |
1461 | } | 1461 | } |
1462 | #else | 1462 | #else |
1463 | static void set_mca_bus(int x) { } | 1463 | static void set_mca_bus(int x) { } |
1464 | #endif | 1464 | #endif |
1465 | 1465 | ||
1466 | /* | 1466 | /* |
1467 | * Determine if we were loaded by an EFI loader. If so, then we have also been | 1467 | * Determine if we were loaded by an EFI loader. If so, then we have also been |
1468 | * passed the efi memmap, systab, etc., so we should use these data structures | 1468 | * passed the efi memmap, systab, etc., so we should use these data structures |
1469 | * for initialization. Note, the efi init code path is determined by the | 1469 | * for initialization. Note, the efi init code path is determined by the |
1470 | * global efi_enabled. This allows the same kernel image to be used on existing | 1470 | * global efi_enabled. This allows the same kernel image to be used on existing |
1471 | * systems (with a traditional BIOS) as well as on EFI systems. | 1471 | * systems (with a traditional BIOS) as well as on EFI systems. |
1472 | */ | 1472 | */ |
1473 | void __init setup_arch(char **cmdline_p) | 1473 | void __init setup_arch(char **cmdline_p) |
1474 | { | 1474 | { |
1475 | unsigned long max_low_pfn; | 1475 | unsigned long max_low_pfn; |
1476 | 1476 | ||
1477 | memcpy(&boot_cpu_data, &new_cpu_data, sizeof(new_cpu_data)); | 1477 | memcpy(&boot_cpu_data, &new_cpu_data, sizeof(new_cpu_data)); |
1478 | pre_setup_arch_hook(); | 1478 | pre_setup_arch_hook(); |
1479 | early_cpu_init(); | 1479 | early_cpu_init(); |
1480 | 1480 | ||
1481 | /* | 1481 | /* |
1482 | * FIXME: This isn't an official loader_type right | 1482 | * FIXME: This isn't an official loader_type right |
1483 | * now but does currently work with elilo. | 1483 | * now but does currently work with elilo. |
1484 | * If we were configured as an EFI kernel, check to make | 1484 | * If we were configured as an EFI kernel, check to make |
1485 | * sure that we were loaded correctly from elilo and that | 1485 | * sure that we were loaded correctly from elilo and that |
1486 | * the system table is valid. If not, then initialize normally. | 1486 | * the system table is valid. If not, then initialize normally. |
1487 | */ | 1487 | */ |
1488 | #ifdef CONFIG_EFI | 1488 | #ifdef CONFIG_EFI |
1489 | if ((LOADER_TYPE == 0x50) && EFI_SYSTAB) | 1489 | if ((LOADER_TYPE == 0x50) && EFI_SYSTAB) |
1490 | efi_enabled = 1; | 1490 | efi_enabled = 1; |
1491 | #endif | 1491 | #endif |
1492 | 1492 | ||
1493 | ROOT_DEV = old_decode_dev(ORIG_ROOT_DEV); | 1493 | ROOT_DEV = old_decode_dev(ORIG_ROOT_DEV); |
1494 | drive_info = DRIVE_INFO; | 1494 | drive_info = DRIVE_INFO; |
1495 | screen_info = SCREEN_INFO; | 1495 | screen_info = SCREEN_INFO; |
1496 | edid_info = EDID_INFO; | 1496 | edid_info = EDID_INFO; |
1497 | apm_info.bios = APM_BIOS_INFO; | 1497 | apm_info.bios = APM_BIOS_INFO; |
1498 | ist_info = IST_INFO; | 1498 | ist_info = IST_INFO; |
1499 | saved_videomode = VIDEO_MODE; | 1499 | saved_videomode = VIDEO_MODE; |
1500 | if( SYS_DESC_TABLE.length != 0 ) { | 1500 | if( SYS_DESC_TABLE.length != 0 ) { |
1501 | set_mca_bus(SYS_DESC_TABLE.table[3] & 0x2); | 1501 | set_mca_bus(SYS_DESC_TABLE.table[3] & 0x2); |
1502 | machine_id = SYS_DESC_TABLE.table[0]; | 1502 | machine_id = SYS_DESC_TABLE.table[0]; |
1503 | machine_submodel_id = SYS_DESC_TABLE.table[1]; | 1503 | machine_submodel_id = SYS_DESC_TABLE.table[1]; |
1504 | BIOS_revision = SYS_DESC_TABLE.table[2]; | 1504 | BIOS_revision = SYS_DESC_TABLE.table[2]; |
1505 | } | 1505 | } |
1506 | bootloader_type = LOADER_TYPE; | 1506 | bootloader_type = LOADER_TYPE; |
1507 | 1507 | ||
1508 | #ifdef CONFIG_BLK_DEV_RAM | 1508 | #ifdef CONFIG_BLK_DEV_RAM |
1509 | rd_image_start = RAMDISK_FLAGS & RAMDISK_IMAGE_START_MASK; | 1509 | rd_image_start = RAMDISK_FLAGS & RAMDISK_IMAGE_START_MASK; |
1510 | rd_prompt = ((RAMDISK_FLAGS & RAMDISK_PROMPT_FLAG) != 0); | 1510 | rd_prompt = ((RAMDISK_FLAGS & RAMDISK_PROMPT_FLAG) != 0); |
1511 | rd_doload = ((RAMDISK_FLAGS & RAMDISK_LOAD_FLAG) != 0); | 1511 | rd_doload = ((RAMDISK_FLAGS & RAMDISK_LOAD_FLAG) != 0); |
1512 | #endif | 1512 | #endif |
1513 | ARCH_SETUP | 1513 | ARCH_SETUP |
1514 | if (efi_enabled) | 1514 | if (efi_enabled) |
1515 | efi_init(); | 1515 | efi_init(); |
1516 | else { | 1516 | else { |
1517 | printk(KERN_INFO "BIOS-provided physical RAM map:\n"); | 1517 | printk(KERN_INFO "BIOS-provided physical RAM map:\n"); |
1518 | print_memory_map(machine_specific_memory_setup()); | 1518 | print_memory_map(machine_specific_memory_setup()); |
1519 | } | 1519 | } |
1520 | 1520 | ||
1521 | copy_edd(); | 1521 | copy_edd(); |
1522 | 1522 | ||
1523 | if (!MOUNT_ROOT_RDONLY) | 1523 | if (!MOUNT_ROOT_RDONLY) |
1524 | root_mountflags &= ~MS_RDONLY; | 1524 | root_mountflags &= ~MS_RDONLY; |
1525 | init_mm.start_code = (unsigned long) _text; | 1525 | init_mm.start_code = (unsigned long) _text; |
1526 | init_mm.end_code = (unsigned long) _etext; | 1526 | init_mm.end_code = (unsigned long) _etext; |
1527 | init_mm.end_data = (unsigned long) _edata; | 1527 | init_mm.end_data = (unsigned long) _edata; |
1528 | init_mm.brk = init_pg_tables_end + PAGE_OFFSET; | 1528 | init_mm.brk = init_pg_tables_end + PAGE_OFFSET; |
1529 | 1529 | ||
1530 | code_resource.start = virt_to_phys(_text); | 1530 | code_resource.start = virt_to_phys(_text); |
1531 | code_resource.end = virt_to_phys(_etext)-1; | 1531 | code_resource.end = virt_to_phys(_etext)-1; |
1532 | data_resource.start = virt_to_phys(_etext); | 1532 | data_resource.start = virt_to_phys(_etext); |
1533 | data_resource.end = virt_to_phys(_edata)-1; | 1533 | data_resource.end = virt_to_phys(_edata)-1; |
1534 | 1534 | ||
1535 | parse_cmdline_early(cmdline_p); | 1535 | parse_cmdline_early(cmdline_p); |
1536 | 1536 | ||
1537 | max_low_pfn = setup_memory(); | 1537 | max_low_pfn = setup_memory(); |
1538 | 1538 | ||
1539 | /* | 1539 | /* |
1540 | * NOTE: before this point _nobody_ is allowed to allocate | 1540 | * NOTE: before this point _nobody_ is allowed to allocate |
1541 | * any memory using the bootmem allocator. Although the | 1541 | * any memory using the bootmem allocator. Although the |
1542 | * alloctor is now initialised only the first 8Mb of the kernel | 1542 | * alloctor is now initialised only the first 8Mb of the kernel |
1543 | * virtual address space has been mapped. All allocations before | 1543 | * virtual address space has been mapped. All allocations before |
1544 | * paging_init() has completed must use the alloc_bootmem_low_pages() | 1544 | * paging_init() has completed must use the alloc_bootmem_low_pages() |
1545 | * variant (which allocates DMA'able memory) and care must be taken | 1545 | * variant (which allocates DMA'able memory) and care must be taken |
1546 | * not to exceed the 8Mb limit. | 1546 | * not to exceed the 8Mb limit. |
1547 | */ | 1547 | */ |
1548 | 1548 | ||
1549 | #ifdef CONFIG_SMP | 1549 | #ifdef CONFIG_SMP |
1550 | smp_alloc_memory(); /* AP processor realmode stacks in low memory*/ | 1550 | smp_alloc_memory(); /* AP processor realmode stacks in low memory*/ |
1551 | #endif | 1551 | #endif |
1552 | paging_init(); | 1552 | paging_init(); |
1553 | remapped_pgdat_init(); | 1553 | remapped_pgdat_init(); |
1554 | sparse_init(); | 1554 | sparse_init(); |
1555 | zone_sizes_init(); | 1555 | zone_sizes_init(); |
1556 | 1556 | ||
1557 | /* | 1557 | /* |
1558 | * NOTE: at this point the bootmem allocator is fully available. | 1558 | * NOTE: at this point the bootmem allocator is fully available. |
1559 | */ | 1559 | */ |
1560 | 1560 | ||
1561 | #ifdef CONFIG_EARLY_PRINTK | 1561 | #ifdef CONFIG_EARLY_PRINTK |
1562 | { | 1562 | { |
1563 | char *s = strstr(*cmdline_p, "earlyprintk="); | 1563 | char *s = strstr(*cmdline_p, "earlyprintk="); |
1564 | if (s) { | 1564 | if (s) { |
1565 | extern void setup_early_printk(char *); | 1565 | extern void setup_early_printk(char *); |
1566 | 1566 | ||
1567 | setup_early_printk(s); | 1567 | setup_early_printk(s); |
1568 | printk("early console enabled\n"); | 1568 | printk("early console enabled\n"); |
1569 | } | 1569 | } |
1570 | } | 1570 | } |
1571 | #endif | 1571 | #endif |
1572 | 1572 | ||
1573 | 1573 | ||
1574 | dmi_scan_machine(); | 1574 | dmi_scan_machine(); |
1575 | 1575 | ||
1576 | #ifdef CONFIG_X86_GENERICARCH | 1576 | #ifdef CONFIG_X86_GENERICARCH |
1577 | generic_apic_probe(*cmdline_p); | 1577 | generic_apic_probe(*cmdline_p); |
1578 | #endif | 1578 | #endif |
1579 | if (efi_enabled) | 1579 | if (efi_enabled) |
1580 | efi_map_memmap(); | 1580 | efi_map_memmap(); |
1581 | 1581 | ||
1582 | #ifdef CONFIG_ACPI_BOOT | 1582 | #ifdef CONFIG_ACPI_BOOT |
1583 | /* | 1583 | /* |
1584 | * Parse the ACPI tables for possible boot-time SMP configuration. | 1584 | * Parse the ACPI tables for possible boot-time SMP configuration. |
1585 | */ | 1585 | */ |
1586 | acpi_boot_table_init(); | 1586 | acpi_boot_table_init(); |
1587 | acpi_boot_init(); | 1587 | acpi_boot_init(); |
1588 | #endif | ||
1589 | 1588 | ||
1589 | #if defined(CONFIG_SMP) && defined(CONFIG_X86_PC) | ||
1590 | if (def_to_bigsmp) | ||
1591 | printk(KERN_WARNING "More than 8 CPUs detected and " | ||
1592 | "CONFIG_X86_PC cannot handle it.\nUse " | ||
1593 | "CONFIG_X86_GENERICARCH or CONFIG_X86_BIGSMP.\n"); | ||
1594 | #endif | ||
1595 | #endif | ||
1590 | #ifdef CONFIG_X86_LOCAL_APIC | 1596 | #ifdef CONFIG_X86_LOCAL_APIC |
1591 | if (smp_found_config) | 1597 | if (smp_found_config) |
1592 | get_smp_config(); | 1598 | get_smp_config(); |
1593 | #endif | 1599 | #endif |
1594 | 1600 | ||
1595 | register_memory(); | 1601 | register_memory(); |
1596 | 1602 | ||
1597 | #ifdef CONFIG_VT | 1603 | #ifdef CONFIG_VT |
1598 | #if defined(CONFIG_VGA_CONSOLE) | 1604 | #if defined(CONFIG_VGA_CONSOLE) |
1599 | if (!efi_enabled || (efi_mem_type(0xa0000) != EFI_CONVENTIONAL_MEMORY)) | 1605 | if (!efi_enabled || (efi_mem_type(0xa0000) != EFI_CONVENTIONAL_MEMORY)) |
1600 | conswitchp = &vga_con; | 1606 | conswitchp = &vga_con; |
1601 | #elif defined(CONFIG_DUMMY_CONSOLE) | 1607 | #elif defined(CONFIG_DUMMY_CONSOLE) |
1602 | conswitchp = &dummy_con; | 1608 | conswitchp = &dummy_con; |
1603 | #endif | 1609 | #endif |
1604 | #endif | 1610 | #endif |
1605 | } | 1611 | } |
1606 | 1612 | ||
1607 | #include "setup_arch_post.h" | 1613 | #include "setup_arch_post.h" |
1608 | /* | 1614 | /* |
1609 | * Local Variables: | 1615 | * Local Variables: |
1610 | * mode:c | 1616 | * mode:c |
1611 | * c-file-style:"k&r" | 1617 | * c-file-style:"k&r" |
1612 | * c-basic-offset:8 | 1618 | * c-basic-offset:8 |
1613 | * End: | 1619 | * End: |
1614 | */ | 1620 | */ |
arch/i386/mach-generic/bigsmp.c
1 | /* | 1 | /* |
2 | * APIC driver for "bigsmp" XAPIC machines with more than 8 virtual CPUs. | 2 | * APIC driver for "bigsmp" XAPIC machines with more than 8 virtual CPUs. |
3 | * Drives the local APIC in "clustered mode". | 3 | * Drives the local APIC in "clustered mode". |
4 | */ | 4 | */ |
5 | #define APIC_DEFINITION 1 | 5 | #define APIC_DEFINITION 1 |
6 | #include <linux/config.h> | 6 | #include <linux/config.h> |
7 | #include <linux/threads.h> | 7 | #include <linux/threads.h> |
8 | #include <linux/cpumask.h> | 8 | #include <linux/cpumask.h> |
9 | #include <asm/mpspec.h> | 9 | #include <asm/mpspec.h> |
10 | #include <asm/genapic.h> | 10 | #include <asm/genapic.h> |
11 | #include <asm/fixmap.h> | 11 | #include <asm/fixmap.h> |
12 | #include <asm/apicdef.h> | 12 | #include <asm/apicdef.h> |
13 | #include <linux/kernel.h> | 13 | #include <linux/kernel.h> |
14 | #include <linux/smp.h> | 14 | #include <linux/smp.h> |
15 | #include <linux/init.h> | 15 | #include <linux/init.h> |
16 | #include <linux/dmi.h> | 16 | #include <linux/dmi.h> |
17 | #include <asm/mach-bigsmp/mach_apic.h> | 17 | #include <asm/mach-bigsmp/mach_apic.h> |
18 | #include <asm/mach-bigsmp/mach_apicdef.h> | 18 | #include <asm/mach-bigsmp/mach_apicdef.h> |
19 | #include <asm/mach-bigsmp/mach_ipi.h> | 19 | #include <asm/mach-bigsmp/mach_ipi.h> |
20 | #include <asm/mach-default/mach_mpparse.h> | 20 | #include <asm/mach-default/mach_mpparse.h> |
21 | 21 | ||
22 | static int dmi_bigsmp; /* can be set by dmi scanners */ | 22 | static int dmi_bigsmp; /* can be set by dmi scanners */ |
23 | 23 | ||
24 | static __init int hp_ht_bigsmp(struct dmi_system_id *d) | 24 | static __init int hp_ht_bigsmp(struct dmi_system_id *d) |
25 | { | 25 | { |
26 | #ifdef CONFIG_X86_GENERICARCH | 26 | #ifdef CONFIG_X86_GENERICARCH |
27 | printk(KERN_NOTICE "%s detected: force use of apic=bigsmp\n", d->ident); | 27 | printk(KERN_NOTICE "%s detected: force use of apic=bigsmp\n", d->ident); |
28 | dmi_bigsmp = 1; | 28 | dmi_bigsmp = 1; |
29 | #endif | 29 | #endif |
30 | return 0; | 30 | return 0; |
31 | } | 31 | } |
32 | 32 | ||
33 | 33 | ||
34 | static struct dmi_system_id __initdata bigsmp_dmi_table[] = { | 34 | static struct dmi_system_id __initdata bigsmp_dmi_table[] = { |
35 | { hp_ht_bigsmp, "HP ProLiant DL760 G2", { | 35 | { hp_ht_bigsmp, "HP ProLiant DL760 G2", { |
36 | DMI_MATCH(DMI_BIOS_VENDOR, "HP"), | 36 | DMI_MATCH(DMI_BIOS_VENDOR, "HP"), |
37 | DMI_MATCH(DMI_BIOS_VERSION, "P44-"), | 37 | DMI_MATCH(DMI_BIOS_VERSION, "P44-"), |
38 | }}, | 38 | }}, |
39 | 39 | ||
40 | { hp_ht_bigsmp, "HP ProLiant DL740", { | 40 | { hp_ht_bigsmp, "HP ProLiant DL740", { |
41 | DMI_MATCH(DMI_BIOS_VENDOR, "HP"), | 41 | DMI_MATCH(DMI_BIOS_VENDOR, "HP"), |
42 | DMI_MATCH(DMI_BIOS_VERSION, "P47-"), | 42 | DMI_MATCH(DMI_BIOS_VERSION, "P47-"), |
43 | }}, | 43 | }}, |
44 | { } | 44 | { } |
45 | }; | 45 | }; |
46 | 46 | ||
47 | 47 | ||
48 | static __init int probe_bigsmp(void) | 48 | static __init int probe_bigsmp(void) |
49 | { | 49 | { |
50 | dmi_check_system(bigsmp_dmi_table); | 50 | if (def_to_bigsmp) |
51 | dmi_bigsmp = 1; | ||
52 | else | ||
53 | dmi_check_system(bigsmp_dmi_table); | ||
51 | return dmi_bigsmp; | 54 | return dmi_bigsmp; |
52 | } | 55 | } |
53 | 56 | ||
54 | struct genapic apic_bigsmp = APIC_INIT("bigsmp", probe_bigsmp); | 57 | struct genapic apic_bigsmp = APIC_INIT("bigsmp", probe_bigsmp); |
55 | 58 |
arch/i386/mach-generic/probe.c
1 | /* Copyright 2003 Andi Kleen, SuSE Labs. | 1 | /* Copyright 2003 Andi Kleen, SuSE Labs. |
2 | * Subject to the GNU Public License, v.2 | 2 | * Subject to the GNU Public License, v.2 |
3 | * | 3 | * |
4 | * Generic x86 APIC driver probe layer. | 4 | * Generic x86 APIC driver probe layer. |
5 | */ | 5 | */ |
6 | #include <linux/config.h> | 6 | #include <linux/config.h> |
7 | #include <linux/threads.h> | 7 | #include <linux/threads.h> |
8 | #include <linux/cpumask.h> | 8 | #include <linux/cpumask.h> |
9 | #include <linux/string.h> | 9 | #include <linux/string.h> |
10 | #include <linux/kernel.h> | 10 | #include <linux/kernel.h> |
11 | #include <linux/ctype.h> | 11 | #include <linux/ctype.h> |
12 | #include <linux/init.h> | 12 | #include <linux/init.h> |
13 | #include <asm/fixmap.h> | 13 | #include <asm/fixmap.h> |
14 | #include <asm/mpspec.h> | 14 | #include <asm/mpspec.h> |
15 | #include <asm/apicdef.h> | 15 | #include <asm/apicdef.h> |
16 | #include <asm/genapic.h> | 16 | #include <asm/genapic.h> |
17 | 17 | ||
18 | extern struct genapic apic_summit; | 18 | extern struct genapic apic_summit; |
19 | extern struct genapic apic_bigsmp; | 19 | extern struct genapic apic_bigsmp; |
20 | extern struct genapic apic_es7000; | 20 | extern struct genapic apic_es7000; |
21 | extern struct genapic apic_default; | 21 | extern struct genapic apic_default; |
22 | 22 | ||
23 | struct genapic *genapic = &apic_default; | 23 | struct genapic *genapic = &apic_default; |
24 | 24 | ||
25 | struct genapic *apic_probe[] __initdata = { | 25 | struct genapic *apic_probe[] __initdata = { |
26 | &apic_summit, | 26 | &apic_summit, |
27 | &apic_bigsmp, | 27 | &apic_bigsmp, |
28 | &apic_es7000, | 28 | &apic_es7000, |
29 | &apic_default, /* must be last */ | 29 | &apic_default, /* must be last */ |
30 | NULL, | 30 | NULL, |
31 | }; | 31 | }; |
32 | 32 | ||
33 | static int cmdline_apic; | ||
34 | |||
35 | void __init generic_bigsmp_probe(void) | ||
36 | { | ||
37 | /* | ||
38 | * This routine is used to switch to bigsmp mode when | ||
39 | * - There is no apic= option specified by the user | ||
40 | * - generic_apic_probe() has choosen apic_default as the sub_arch | ||
41 | * - we find more than 8 CPUs in acpi LAPIC listing with xAPIC support | ||
42 | */ | ||
43 | |||
44 | if (!cmdline_apic && genapic == &apic_default) | ||
45 | if (apic_bigsmp.probe()) { | ||
46 | genapic = &apic_bigsmp; | ||
47 | printk(KERN_INFO "Overriding APIC driver with %s\n", | ||
48 | genapic->name); | ||
49 | } | ||
50 | } | ||
51 | |||
33 | void __init generic_apic_probe(char *command_line) | 52 | void __init generic_apic_probe(char *command_line) |
34 | { | 53 | { |
35 | char *s; | 54 | char *s; |
36 | int i; | 55 | int i; |
37 | int changed = 0; | 56 | int changed = 0; |
38 | 57 | ||
39 | s = strstr(command_line, "apic="); | 58 | s = strstr(command_line, "apic="); |
40 | if (s && (s == command_line || isspace(s[-1]))) { | 59 | if (s && (s == command_line || isspace(s[-1]))) { |
41 | char *p = strchr(s, ' '), old; | 60 | char *p = strchr(s, ' '), old; |
42 | if (!p) | 61 | if (!p) |
43 | p = strchr(s, '\0'); | 62 | p = strchr(s, '\0'); |
44 | old = *p; | 63 | old = *p; |
45 | *p = 0; | 64 | *p = 0; |
46 | for (i = 0; !changed && apic_probe[i]; i++) { | 65 | for (i = 0; !changed && apic_probe[i]; i++) { |
47 | if (!strcmp(apic_probe[i]->name, s+5)) { | 66 | if (!strcmp(apic_probe[i]->name, s+5)) { |
48 | changed = 1; | 67 | changed = 1; |
49 | genapic = apic_probe[i]; | 68 | genapic = apic_probe[i]; |
50 | } | 69 | } |
51 | } | 70 | } |
52 | if (!changed) | 71 | if (!changed) |
53 | printk(KERN_ERR "Unknown genapic `%s' specified.\n", s); | 72 | printk(KERN_ERR "Unknown genapic `%s' specified.\n", s); |
54 | *p = old; | 73 | *p = old; |
74 | cmdline_apic = changed; | ||
55 | } | 75 | } |
56 | for (i = 0; !changed && apic_probe[i]; i++) { | 76 | for (i = 0; !changed && apic_probe[i]; i++) { |
57 | if (apic_probe[i]->probe()) { | 77 | if (apic_probe[i]->probe()) { |
58 | changed = 1; | 78 | changed = 1; |
59 | genapic = apic_probe[i]; | 79 | genapic = apic_probe[i]; |
60 | } | 80 | } |
61 | } | 81 | } |
62 | /* Not visible without early console */ | 82 | /* Not visible without early console */ |
63 | if (!changed) | 83 | if (!changed) |
64 | panic("Didn't find an APIC driver"); | 84 | panic("Didn't find an APIC driver"); |
65 | 85 | ||
66 | printk(KERN_INFO "Using APIC driver %s\n", genapic->name); | 86 | printk(KERN_INFO "Using APIC driver %s\n", genapic->name); |
67 | } | 87 | } |
68 | 88 | ||
69 | /* These functions can switch the APIC even after the initial ->probe() */ | 89 | /* These functions can switch the APIC even after the initial ->probe() */ |
70 | 90 | ||
71 | int __init mps_oem_check(struct mp_config_table *mpc, char *oem, char *productid) | 91 | int __init mps_oem_check(struct mp_config_table *mpc, char *oem, char *productid) |
72 | { | 92 | { |
73 | int i; | 93 | int i; |
74 | for (i = 0; apic_probe[i]; ++i) { | 94 | for (i = 0; apic_probe[i]; ++i) { |
75 | if (apic_probe[i]->mps_oem_check(mpc,oem,productid)) { | 95 | if (apic_probe[i]->mps_oem_check(mpc,oem,productid)) { |
76 | genapic = apic_probe[i]; | 96 | genapic = apic_probe[i]; |
77 | printk(KERN_INFO "Switched to APIC driver `%s'.\n", | 97 | printk(KERN_INFO "Switched to APIC driver `%s'.\n", |
78 | genapic->name); | 98 | genapic->name); |
79 | return 1; | 99 | return 1; |
80 | } | 100 | } |
81 | } | 101 | } |
82 | return 0; | 102 | return 0; |
83 | } | 103 | } |
84 | 104 | ||
85 | int __init acpi_madt_oem_check(char *oem_id, char *oem_table_id) | 105 | int __init acpi_madt_oem_check(char *oem_id, char *oem_table_id) |
86 | { | 106 | { |
87 | int i; | 107 | int i; |
88 | for (i = 0; apic_probe[i]; ++i) { | 108 | for (i = 0; apic_probe[i]; ++i) { |
89 | if (apic_probe[i]->acpi_madt_oem_check(oem_id, oem_table_id)) { | 109 | if (apic_probe[i]->acpi_madt_oem_check(oem_id, oem_table_id)) { |
90 | genapic = apic_probe[i]; | 110 | genapic = apic_probe[i]; |
91 | printk(KERN_INFO "Switched to APIC driver `%s'.\n", | 111 | printk(KERN_INFO "Switched to APIC driver `%s'.\n", |
92 | genapic->name); | 112 | genapic->name); |
93 | return 1; | 113 | return 1; |
94 | } | 114 | } |
95 | } | 115 | } |
96 | return 0; | 116 | return 0; |
97 | } | 117 | } |
98 | 118 | ||
99 | int hard_smp_processor_id(void) | 119 | int hard_smp_processor_id(void) |
100 | { | 120 | { |
101 | return genapic->get_apic_id(*(unsigned long *)(APIC_BASE+APIC_ID)); | 121 | return genapic->get_apic_id(*(unsigned long *)(APIC_BASE+APIC_ID)); |
102 | } | 122 | } |
103 | 123 |
include/asm-i386/apicdef.h
1 | #ifndef __ASM_APICDEF_H | 1 | #ifndef __ASM_APICDEF_H |
2 | #define __ASM_APICDEF_H | 2 | #define __ASM_APICDEF_H |
3 | 3 | ||
4 | /* | 4 | /* |
5 | * Constants for various Intel APICs. (local APIC, IOAPIC, etc.) | 5 | * Constants for various Intel APICs. (local APIC, IOAPIC, etc.) |
6 | * | 6 | * |
7 | * Alan Cox <Alan.Cox@linux.org>, 1995. | 7 | * Alan Cox <Alan.Cox@linux.org>, 1995. |
8 | * Ingo Molnar <mingo@redhat.com>, 1999, 2000 | 8 | * Ingo Molnar <mingo@redhat.com>, 1999, 2000 |
9 | */ | 9 | */ |
10 | 10 | ||
11 | #define APIC_DEFAULT_PHYS_BASE 0xfee00000 | 11 | #define APIC_DEFAULT_PHYS_BASE 0xfee00000 |
12 | 12 | ||
13 | #define APIC_ID 0x20 | 13 | #define APIC_ID 0x20 |
14 | #define APIC_LVR 0x30 | 14 | #define APIC_LVR 0x30 |
15 | #define APIC_LVR_MASK 0xFF00FF | 15 | #define APIC_LVR_MASK 0xFF00FF |
16 | #define GET_APIC_VERSION(x) ((x)&0xFF) | 16 | #define GET_APIC_VERSION(x) ((x)&0xFF) |
17 | #define GET_APIC_MAXLVT(x) (((x)>>16)&0xFF) | 17 | #define GET_APIC_MAXLVT(x) (((x)>>16)&0xFF) |
18 | #define APIC_INTEGRATED(x) ((x)&0xF0) | 18 | #define APIC_INTEGRATED(x) ((x)&0xF0) |
19 | #define APIC_XAPIC(x) ((x) >= 0x14) | ||
19 | #define APIC_TASKPRI 0x80 | 20 | #define APIC_TASKPRI 0x80 |
20 | #define APIC_TPRI_MASK 0xFF | 21 | #define APIC_TPRI_MASK 0xFF |
21 | #define APIC_ARBPRI 0x90 | 22 | #define APIC_ARBPRI 0x90 |
22 | #define APIC_ARBPRI_MASK 0xFF | 23 | #define APIC_ARBPRI_MASK 0xFF |
23 | #define APIC_PROCPRI 0xA0 | 24 | #define APIC_PROCPRI 0xA0 |
24 | #define APIC_EOI 0xB0 | 25 | #define APIC_EOI 0xB0 |
25 | #define APIC_EIO_ACK 0x0 /* Write this to the EOI register */ | 26 | #define APIC_EIO_ACK 0x0 /* Write this to the EOI register */ |
26 | #define APIC_RRR 0xC0 | 27 | #define APIC_RRR 0xC0 |
27 | #define APIC_LDR 0xD0 | 28 | #define APIC_LDR 0xD0 |
28 | #define APIC_LDR_MASK (0xFF<<24) | 29 | #define APIC_LDR_MASK (0xFF<<24) |
29 | #define GET_APIC_LOGICAL_ID(x) (((x)>>24)&0xFF) | 30 | #define GET_APIC_LOGICAL_ID(x) (((x)>>24)&0xFF) |
30 | #define SET_APIC_LOGICAL_ID(x) (((x)<<24)) | 31 | #define SET_APIC_LOGICAL_ID(x) (((x)<<24)) |
31 | #define APIC_ALL_CPUS 0xFF | 32 | #define APIC_ALL_CPUS 0xFF |
32 | #define APIC_DFR 0xE0 | 33 | #define APIC_DFR 0xE0 |
33 | #define APIC_DFR_CLUSTER 0x0FFFFFFFul | 34 | #define APIC_DFR_CLUSTER 0x0FFFFFFFul |
34 | #define APIC_DFR_FLAT 0xFFFFFFFFul | 35 | #define APIC_DFR_FLAT 0xFFFFFFFFul |
35 | #define APIC_SPIV 0xF0 | 36 | #define APIC_SPIV 0xF0 |
36 | #define APIC_SPIV_FOCUS_DISABLED (1<<9) | 37 | #define APIC_SPIV_FOCUS_DISABLED (1<<9) |
37 | #define APIC_SPIV_APIC_ENABLED (1<<8) | 38 | #define APIC_SPIV_APIC_ENABLED (1<<8) |
38 | #define APIC_ISR 0x100 | 39 | #define APIC_ISR 0x100 |
39 | #define APIC_TMR 0x180 | 40 | #define APIC_TMR 0x180 |
40 | #define APIC_IRR 0x200 | 41 | #define APIC_IRR 0x200 |
41 | #define APIC_ESR 0x280 | 42 | #define APIC_ESR 0x280 |
42 | #define APIC_ESR_SEND_CS 0x00001 | 43 | #define APIC_ESR_SEND_CS 0x00001 |
43 | #define APIC_ESR_RECV_CS 0x00002 | 44 | #define APIC_ESR_RECV_CS 0x00002 |
44 | #define APIC_ESR_SEND_ACC 0x00004 | 45 | #define APIC_ESR_SEND_ACC 0x00004 |
45 | #define APIC_ESR_RECV_ACC 0x00008 | 46 | #define APIC_ESR_RECV_ACC 0x00008 |
46 | #define APIC_ESR_SENDILL 0x00020 | 47 | #define APIC_ESR_SENDILL 0x00020 |
47 | #define APIC_ESR_RECVILL 0x00040 | 48 | #define APIC_ESR_RECVILL 0x00040 |
48 | #define APIC_ESR_ILLREGA 0x00080 | 49 | #define APIC_ESR_ILLREGA 0x00080 |
49 | #define APIC_ICR 0x300 | 50 | #define APIC_ICR 0x300 |
50 | #define APIC_DEST_SELF 0x40000 | 51 | #define APIC_DEST_SELF 0x40000 |
51 | #define APIC_DEST_ALLINC 0x80000 | 52 | #define APIC_DEST_ALLINC 0x80000 |
52 | #define APIC_DEST_ALLBUT 0xC0000 | 53 | #define APIC_DEST_ALLBUT 0xC0000 |
53 | #define APIC_ICR_RR_MASK 0x30000 | 54 | #define APIC_ICR_RR_MASK 0x30000 |
54 | #define APIC_ICR_RR_INVALID 0x00000 | 55 | #define APIC_ICR_RR_INVALID 0x00000 |
55 | #define APIC_ICR_RR_INPROG 0x10000 | 56 | #define APIC_ICR_RR_INPROG 0x10000 |
56 | #define APIC_ICR_RR_VALID 0x20000 | 57 | #define APIC_ICR_RR_VALID 0x20000 |
57 | #define APIC_INT_LEVELTRIG 0x08000 | 58 | #define APIC_INT_LEVELTRIG 0x08000 |
58 | #define APIC_INT_ASSERT 0x04000 | 59 | #define APIC_INT_ASSERT 0x04000 |
59 | #define APIC_ICR_BUSY 0x01000 | 60 | #define APIC_ICR_BUSY 0x01000 |
60 | #define APIC_DEST_LOGICAL 0x00800 | 61 | #define APIC_DEST_LOGICAL 0x00800 |
61 | #define APIC_DM_FIXED 0x00000 | 62 | #define APIC_DM_FIXED 0x00000 |
62 | #define APIC_DM_LOWEST 0x00100 | 63 | #define APIC_DM_LOWEST 0x00100 |
63 | #define APIC_DM_SMI 0x00200 | 64 | #define APIC_DM_SMI 0x00200 |
64 | #define APIC_DM_REMRD 0x00300 | 65 | #define APIC_DM_REMRD 0x00300 |
65 | #define APIC_DM_NMI 0x00400 | 66 | #define APIC_DM_NMI 0x00400 |
66 | #define APIC_DM_INIT 0x00500 | 67 | #define APIC_DM_INIT 0x00500 |
67 | #define APIC_DM_STARTUP 0x00600 | 68 | #define APIC_DM_STARTUP 0x00600 |
68 | #define APIC_DM_EXTINT 0x00700 | 69 | #define APIC_DM_EXTINT 0x00700 |
69 | #define APIC_VECTOR_MASK 0x000FF | 70 | #define APIC_VECTOR_MASK 0x000FF |
70 | #define APIC_ICR2 0x310 | 71 | #define APIC_ICR2 0x310 |
71 | #define GET_APIC_DEST_FIELD(x) (((x)>>24)&0xFF) | 72 | #define GET_APIC_DEST_FIELD(x) (((x)>>24)&0xFF) |
72 | #define SET_APIC_DEST_FIELD(x) ((x)<<24) | 73 | #define SET_APIC_DEST_FIELD(x) ((x)<<24) |
73 | #define APIC_LVTT 0x320 | 74 | #define APIC_LVTT 0x320 |
74 | #define APIC_LVTTHMR 0x330 | 75 | #define APIC_LVTTHMR 0x330 |
75 | #define APIC_LVTPC 0x340 | 76 | #define APIC_LVTPC 0x340 |
76 | #define APIC_LVT0 0x350 | 77 | #define APIC_LVT0 0x350 |
77 | #define APIC_LVT_TIMER_BASE_MASK (0x3<<18) | 78 | #define APIC_LVT_TIMER_BASE_MASK (0x3<<18) |
78 | #define GET_APIC_TIMER_BASE(x) (((x)>>18)&0x3) | 79 | #define GET_APIC_TIMER_BASE(x) (((x)>>18)&0x3) |
79 | #define SET_APIC_TIMER_BASE(x) (((x)<<18)) | 80 | #define SET_APIC_TIMER_BASE(x) (((x)<<18)) |
80 | #define APIC_TIMER_BASE_CLKIN 0x0 | 81 | #define APIC_TIMER_BASE_CLKIN 0x0 |
81 | #define APIC_TIMER_BASE_TMBASE 0x1 | 82 | #define APIC_TIMER_BASE_TMBASE 0x1 |
82 | #define APIC_TIMER_BASE_DIV 0x2 | 83 | #define APIC_TIMER_BASE_DIV 0x2 |
83 | #define APIC_LVT_TIMER_PERIODIC (1<<17) | 84 | #define APIC_LVT_TIMER_PERIODIC (1<<17) |
84 | #define APIC_LVT_MASKED (1<<16) | 85 | #define APIC_LVT_MASKED (1<<16) |
85 | #define APIC_LVT_LEVEL_TRIGGER (1<<15) | 86 | #define APIC_LVT_LEVEL_TRIGGER (1<<15) |
86 | #define APIC_LVT_REMOTE_IRR (1<<14) | 87 | #define APIC_LVT_REMOTE_IRR (1<<14) |
87 | #define APIC_INPUT_POLARITY (1<<13) | 88 | #define APIC_INPUT_POLARITY (1<<13) |
88 | #define APIC_SEND_PENDING (1<<12) | 89 | #define APIC_SEND_PENDING (1<<12) |
89 | #define APIC_MODE_MASK 0x700 | 90 | #define APIC_MODE_MASK 0x700 |
90 | #define GET_APIC_DELIVERY_MODE(x) (((x)>>8)&0x7) | 91 | #define GET_APIC_DELIVERY_MODE(x) (((x)>>8)&0x7) |
91 | #define SET_APIC_DELIVERY_MODE(x,y) (((x)&~0x700)|((y)<<8)) | 92 | #define SET_APIC_DELIVERY_MODE(x,y) (((x)&~0x700)|((y)<<8)) |
92 | #define APIC_MODE_FIXED 0x0 | 93 | #define APIC_MODE_FIXED 0x0 |
93 | #define APIC_MODE_NMI 0x4 | 94 | #define APIC_MODE_NMI 0x4 |
94 | #define APIC_MODE_EXTINT 0x7 | 95 | #define APIC_MODE_EXTINT 0x7 |
95 | #define APIC_LVT1 0x360 | 96 | #define APIC_LVT1 0x360 |
96 | #define APIC_LVTERR 0x370 | 97 | #define APIC_LVTERR 0x370 |
97 | #define APIC_TMICT 0x380 | 98 | #define APIC_TMICT 0x380 |
98 | #define APIC_TMCCT 0x390 | 99 | #define APIC_TMCCT 0x390 |
99 | #define APIC_TDCR 0x3E0 | 100 | #define APIC_TDCR 0x3E0 |
100 | #define APIC_TDR_DIV_TMBASE (1<<2) | 101 | #define APIC_TDR_DIV_TMBASE (1<<2) |
101 | #define APIC_TDR_DIV_1 0xB | 102 | #define APIC_TDR_DIV_1 0xB |
102 | #define APIC_TDR_DIV_2 0x0 | 103 | #define APIC_TDR_DIV_2 0x0 |
103 | #define APIC_TDR_DIV_4 0x1 | 104 | #define APIC_TDR_DIV_4 0x1 |
104 | #define APIC_TDR_DIV_8 0x2 | 105 | #define APIC_TDR_DIV_8 0x2 |
105 | #define APIC_TDR_DIV_16 0x3 | 106 | #define APIC_TDR_DIV_16 0x3 |
106 | #define APIC_TDR_DIV_32 0x8 | 107 | #define APIC_TDR_DIV_32 0x8 |
107 | #define APIC_TDR_DIV_64 0x9 | 108 | #define APIC_TDR_DIV_64 0x9 |
108 | #define APIC_TDR_DIV_128 0xA | 109 | #define APIC_TDR_DIV_128 0xA |
109 | 110 | ||
110 | #define APIC_BASE (fix_to_virt(FIX_APIC_BASE)) | 111 | #define APIC_BASE (fix_to_virt(FIX_APIC_BASE)) |
111 | 112 | ||
112 | #define MAX_IO_APICS 64 | 113 | #define MAX_IO_APICS 64 |
113 | 114 | ||
114 | /* | 115 | /* |
115 | * the local APIC register structure, memory mapped. Not terribly well | 116 | * the local APIC register structure, memory mapped. Not terribly well |
116 | * tested, but we might eventually use this one in the future - the | 117 | * tested, but we might eventually use this one in the future - the |
117 | * problem why we cannot use it right now is the P5 APIC, it has an | 118 | * problem why we cannot use it right now is the P5 APIC, it has an |
118 | * errata which cannot take 8-bit reads and writes, only 32-bit ones ... | 119 | * errata which cannot take 8-bit reads and writes, only 32-bit ones ... |
119 | */ | 120 | */ |
120 | #define u32 unsigned int | 121 | #define u32 unsigned int |
121 | 122 | ||
122 | #define lapic ((volatile struct local_apic *)APIC_BASE) | 123 | #define lapic ((volatile struct local_apic *)APIC_BASE) |
123 | 124 | ||
124 | struct local_apic { | 125 | struct local_apic { |
125 | 126 | ||
126 | /*000*/ struct { u32 __reserved[4]; } __reserved_01; | 127 | /*000*/ struct { u32 __reserved[4]; } __reserved_01; |
127 | 128 | ||
128 | /*010*/ struct { u32 __reserved[4]; } __reserved_02; | 129 | /*010*/ struct { u32 __reserved[4]; } __reserved_02; |
129 | 130 | ||
130 | /*020*/ struct { /* APIC ID Register */ | 131 | /*020*/ struct { /* APIC ID Register */ |
131 | u32 __reserved_1 : 24, | 132 | u32 __reserved_1 : 24, |
132 | phys_apic_id : 4, | 133 | phys_apic_id : 4, |
133 | __reserved_2 : 4; | 134 | __reserved_2 : 4; |
134 | u32 __reserved[3]; | 135 | u32 __reserved[3]; |
135 | } id; | 136 | } id; |
136 | 137 | ||
137 | /*030*/ const | 138 | /*030*/ const |
138 | struct { /* APIC Version Register */ | 139 | struct { /* APIC Version Register */ |
139 | u32 version : 8, | 140 | u32 version : 8, |
140 | __reserved_1 : 8, | 141 | __reserved_1 : 8, |
141 | max_lvt : 8, | 142 | max_lvt : 8, |
142 | __reserved_2 : 8; | 143 | __reserved_2 : 8; |
143 | u32 __reserved[3]; | 144 | u32 __reserved[3]; |
144 | } version; | 145 | } version; |
145 | 146 | ||
146 | /*040*/ struct { u32 __reserved[4]; } __reserved_03; | 147 | /*040*/ struct { u32 __reserved[4]; } __reserved_03; |
147 | 148 | ||
148 | /*050*/ struct { u32 __reserved[4]; } __reserved_04; | 149 | /*050*/ struct { u32 __reserved[4]; } __reserved_04; |
149 | 150 | ||
150 | /*060*/ struct { u32 __reserved[4]; } __reserved_05; | 151 | /*060*/ struct { u32 __reserved[4]; } __reserved_05; |
151 | 152 | ||
152 | /*070*/ struct { u32 __reserved[4]; } __reserved_06; | 153 | /*070*/ struct { u32 __reserved[4]; } __reserved_06; |
153 | 154 | ||
154 | /*080*/ struct { /* Task Priority Register */ | 155 | /*080*/ struct { /* Task Priority Register */ |
155 | u32 priority : 8, | 156 | u32 priority : 8, |
156 | __reserved_1 : 24; | 157 | __reserved_1 : 24; |
157 | u32 __reserved_2[3]; | 158 | u32 __reserved_2[3]; |
158 | } tpr; | 159 | } tpr; |
159 | 160 | ||
160 | /*090*/ const | 161 | /*090*/ const |
161 | struct { /* Arbitration Priority Register */ | 162 | struct { /* Arbitration Priority Register */ |
162 | u32 priority : 8, | 163 | u32 priority : 8, |
163 | __reserved_1 : 24; | 164 | __reserved_1 : 24; |
164 | u32 __reserved_2[3]; | 165 | u32 __reserved_2[3]; |
165 | } apr; | 166 | } apr; |
166 | 167 | ||
167 | /*0A0*/ const | 168 | /*0A0*/ const |
168 | struct { /* Processor Priority Register */ | 169 | struct { /* Processor Priority Register */ |
169 | u32 priority : 8, | 170 | u32 priority : 8, |
170 | __reserved_1 : 24; | 171 | __reserved_1 : 24; |
171 | u32 __reserved_2[3]; | 172 | u32 __reserved_2[3]; |
172 | } ppr; | 173 | } ppr; |
173 | 174 | ||
174 | /*0B0*/ struct { /* End Of Interrupt Register */ | 175 | /*0B0*/ struct { /* End Of Interrupt Register */ |
175 | u32 eoi; | 176 | u32 eoi; |
176 | u32 __reserved[3]; | 177 | u32 __reserved[3]; |
177 | } eoi; | 178 | } eoi; |
178 | 179 | ||
179 | /*0C0*/ struct { u32 __reserved[4]; } __reserved_07; | 180 | /*0C0*/ struct { u32 __reserved[4]; } __reserved_07; |
180 | 181 | ||
181 | /*0D0*/ struct { /* Logical Destination Register */ | 182 | /*0D0*/ struct { /* Logical Destination Register */ |
182 | u32 __reserved_1 : 24, | 183 | u32 __reserved_1 : 24, |
183 | logical_dest : 8; | 184 | logical_dest : 8; |
184 | u32 __reserved_2[3]; | 185 | u32 __reserved_2[3]; |
185 | } ldr; | 186 | } ldr; |
186 | 187 | ||
187 | /*0E0*/ struct { /* Destination Format Register */ | 188 | /*0E0*/ struct { /* Destination Format Register */ |
188 | u32 __reserved_1 : 28, | 189 | u32 __reserved_1 : 28, |
189 | model : 4; | 190 | model : 4; |
190 | u32 __reserved_2[3]; | 191 | u32 __reserved_2[3]; |
191 | } dfr; | 192 | } dfr; |
192 | 193 | ||
193 | /*0F0*/ struct { /* Spurious Interrupt Vector Register */ | 194 | /*0F0*/ struct { /* Spurious Interrupt Vector Register */ |
194 | u32 spurious_vector : 8, | 195 | u32 spurious_vector : 8, |
195 | apic_enabled : 1, | 196 | apic_enabled : 1, |
196 | focus_cpu : 1, | 197 | focus_cpu : 1, |
197 | __reserved_2 : 22; | 198 | __reserved_2 : 22; |
198 | u32 __reserved_3[3]; | 199 | u32 __reserved_3[3]; |
199 | } svr; | 200 | } svr; |
200 | 201 | ||
201 | /*100*/ struct { /* In Service Register */ | 202 | /*100*/ struct { /* In Service Register */ |
202 | /*170*/ u32 bitfield; | 203 | /*170*/ u32 bitfield; |
203 | u32 __reserved[3]; | 204 | u32 __reserved[3]; |
204 | } isr [8]; | 205 | } isr [8]; |
205 | 206 | ||
206 | /*180*/ struct { /* Trigger Mode Register */ | 207 | /*180*/ struct { /* Trigger Mode Register */ |
207 | /*1F0*/ u32 bitfield; | 208 | /*1F0*/ u32 bitfield; |
208 | u32 __reserved[3]; | 209 | u32 __reserved[3]; |
209 | } tmr [8]; | 210 | } tmr [8]; |
210 | 211 | ||
211 | /*200*/ struct { /* Interrupt Request Register */ | 212 | /*200*/ struct { /* Interrupt Request Register */ |
212 | /*270*/ u32 bitfield; | 213 | /*270*/ u32 bitfield; |
213 | u32 __reserved[3]; | 214 | u32 __reserved[3]; |
214 | } irr [8]; | 215 | } irr [8]; |
215 | 216 | ||
216 | /*280*/ union { /* Error Status Register */ | 217 | /*280*/ union { /* Error Status Register */ |
217 | struct { | 218 | struct { |
218 | u32 send_cs_error : 1, | 219 | u32 send_cs_error : 1, |
219 | receive_cs_error : 1, | 220 | receive_cs_error : 1, |
220 | send_accept_error : 1, | 221 | send_accept_error : 1, |
221 | receive_accept_error : 1, | 222 | receive_accept_error : 1, |
222 | __reserved_1 : 1, | 223 | __reserved_1 : 1, |
223 | send_illegal_vector : 1, | 224 | send_illegal_vector : 1, |
224 | receive_illegal_vector : 1, | 225 | receive_illegal_vector : 1, |
225 | illegal_register_address : 1, | 226 | illegal_register_address : 1, |
226 | __reserved_2 : 24; | 227 | __reserved_2 : 24; |
227 | u32 __reserved_3[3]; | 228 | u32 __reserved_3[3]; |
228 | } error_bits; | 229 | } error_bits; |
229 | struct { | 230 | struct { |
230 | u32 errors; | 231 | u32 errors; |
231 | u32 __reserved_3[3]; | 232 | u32 __reserved_3[3]; |
232 | } all_errors; | 233 | } all_errors; |
233 | } esr; | 234 | } esr; |
234 | 235 | ||
235 | /*290*/ struct { u32 __reserved[4]; } __reserved_08; | 236 | /*290*/ struct { u32 __reserved[4]; } __reserved_08; |
236 | 237 | ||
237 | /*2A0*/ struct { u32 __reserved[4]; } __reserved_09; | 238 | /*2A0*/ struct { u32 __reserved[4]; } __reserved_09; |
238 | 239 | ||
239 | /*2B0*/ struct { u32 __reserved[4]; } __reserved_10; | 240 | /*2B0*/ struct { u32 __reserved[4]; } __reserved_10; |
240 | 241 | ||
241 | /*2C0*/ struct { u32 __reserved[4]; } __reserved_11; | 242 | /*2C0*/ struct { u32 __reserved[4]; } __reserved_11; |
242 | 243 | ||
243 | /*2D0*/ struct { u32 __reserved[4]; } __reserved_12; | 244 | /*2D0*/ struct { u32 __reserved[4]; } __reserved_12; |
244 | 245 | ||
245 | /*2E0*/ struct { u32 __reserved[4]; } __reserved_13; | 246 | /*2E0*/ struct { u32 __reserved[4]; } __reserved_13; |
246 | 247 | ||
247 | /*2F0*/ struct { u32 __reserved[4]; } __reserved_14; | 248 | /*2F0*/ struct { u32 __reserved[4]; } __reserved_14; |
248 | 249 | ||
249 | /*300*/ struct { /* Interrupt Command Register 1 */ | 250 | /*300*/ struct { /* Interrupt Command Register 1 */ |
250 | u32 vector : 8, | 251 | u32 vector : 8, |
251 | delivery_mode : 3, | 252 | delivery_mode : 3, |
252 | destination_mode : 1, | 253 | destination_mode : 1, |
253 | delivery_status : 1, | 254 | delivery_status : 1, |
254 | __reserved_1 : 1, | 255 | __reserved_1 : 1, |
255 | level : 1, | 256 | level : 1, |
256 | trigger : 1, | 257 | trigger : 1, |
257 | __reserved_2 : 2, | 258 | __reserved_2 : 2, |
258 | shorthand : 2, | 259 | shorthand : 2, |
259 | __reserved_3 : 12; | 260 | __reserved_3 : 12; |
260 | u32 __reserved_4[3]; | 261 | u32 __reserved_4[3]; |
261 | } icr1; | 262 | } icr1; |
262 | 263 | ||
263 | /*310*/ struct { /* Interrupt Command Register 2 */ | 264 | /*310*/ struct { /* Interrupt Command Register 2 */ |
264 | union { | 265 | union { |
265 | u32 __reserved_1 : 24, | 266 | u32 __reserved_1 : 24, |
266 | phys_dest : 4, | 267 | phys_dest : 4, |
267 | __reserved_2 : 4; | 268 | __reserved_2 : 4; |
268 | u32 __reserved_3 : 24, | 269 | u32 __reserved_3 : 24, |
269 | logical_dest : 8; | 270 | logical_dest : 8; |
270 | } dest; | 271 | } dest; |
271 | u32 __reserved_4[3]; | 272 | u32 __reserved_4[3]; |
272 | } icr2; | 273 | } icr2; |
273 | 274 | ||
274 | /*320*/ struct { /* LVT - Timer */ | 275 | /*320*/ struct { /* LVT - Timer */ |
275 | u32 vector : 8, | 276 | u32 vector : 8, |
276 | __reserved_1 : 4, | 277 | __reserved_1 : 4, |
277 | delivery_status : 1, | 278 | delivery_status : 1, |
278 | __reserved_2 : 3, | 279 | __reserved_2 : 3, |
279 | mask : 1, | 280 | mask : 1, |
280 | timer_mode : 1, | 281 | timer_mode : 1, |
281 | __reserved_3 : 14; | 282 | __reserved_3 : 14; |
282 | u32 __reserved_4[3]; | 283 | u32 __reserved_4[3]; |
283 | } lvt_timer; | 284 | } lvt_timer; |
284 | 285 | ||
285 | /*330*/ struct { /* LVT - Thermal Sensor */ | 286 | /*330*/ struct { /* LVT - Thermal Sensor */ |
286 | u32 vector : 8, | 287 | u32 vector : 8, |
287 | delivery_mode : 3, | 288 | delivery_mode : 3, |
288 | __reserved_1 : 1, | 289 | __reserved_1 : 1, |
289 | delivery_status : 1, | 290 | delivery_status : 1, |
290 | __reserved_2 : 3, | 291 | __reserved_2 : 3, |
291 | mask : 1, | 292 | mask : 1, |
292 | __reserved_3 : 15; | 293 | __reserved_3 : 15; |
293 | u32 __reserved_4[3]; | 294 | u32 __reserved_4[3]; |
294 | } lvt_thermal; | 295 | } lvt_thermal; |
295 | 296 | ||
296 | /*340*/ struct { /* LVT - Performance Counter */ | 297 | /*340*/ struct { /* LVT - Performance Counter */ |
297 | u32 vector : 8, | 298 | u32 vector : 8, |
298 | delivery_mode : 3, | 299 | delivery_mode : 3, |
299 | __reserved_1 : 1, | 300 | __reserved_1 : 1, |
300 | delivery_status : 1, | 301 | delivery_status : 1, |
301 | __reserved_2 : 3, | 302 | __reserved_2 : 3, |
302 | mask : 1, | 303 | mask : 1, |
303 | __reserved_3 : 15; | 304 | __reserved_3 : 15; |
304 | u32 __reserved_4[3]; | 305 | u32 __reserved_4[3]; |
305 | } lvt_pc; | 306 | } lvt_pc; |
306 | 307 | ||
307 | /*350*/ struct { /* LVT - LINT0 */ | 308 | /*350*/ struct { /* LVT - LINT0 */ |
308 | u32 vector : 8, | 309 | u32 vector : 8, |
309 | delivery_mode : 3, | 310 | delivery_mode : 3, |
310 | __reserved_1 : 1, | 311 | __reserved_1 : 1, |
311 | delivery_status : 1, | 312 | delivery_status : 1, |
312 | polarity : 1, | 313 | polarity : 1, |
313 | remote_irr : 1, | 314 | remote_irr : 1, |
314 | trigger : 1, | 315 | trigger : 1, |
315 | mask : 1, | 316 | mask : 1, |
316 | __reserved_2 : 15; | 317 | __reserved_2 : 15; |
317 | u32 __reserved_3[3]; | 318 | u32 __reserved_3[3]; |
318 | } lvt_lint0; | 319 | } lvt_lint0; |
319 | 320 | ||
320 | /*360*/ struct { /* LVT - LINT1 */ | 321 | /*360*/ struct { /* LVT - LINT1 */ |
321 | u32 vector : 8, | 322 | u32 vector : 8, |
322 | delivery_mode : 3, | 323 | delivery_mode : 3, |
323 | __reserved_1 : 1, | 324 | __reserved_1 : 1, |
324 | delivery_status : 1, | 325 | delivery_status : 1, |
325 | polarity : 1, | 326 | polarity : 1, |
326 | remote_irr : 1, | 327 | remote_irr : 1, |
327 | trigger : 1, | 328 | trigger : 1, |
328 | mask : 1, | 329 | mask : 1, |
329 | __reserved_2 : 15; | 330 | __reserved_2 : 15; |
330 | u32 __reserved_3[3]; | 331 | u32 __reserved_3[3]; |
331 | } lvt_lint1; | 332 | } lvt_lint1; |
332 | 333 | ||
333 | /*370*/ struct { /* LVT - Error */ | 334 | /*370*/ struct { /* LVT - Error */ |
334 | u32 vector : 8, | 335 | u32 vector : 8, |
335 | __reserved_1 : 4, | 336 | __reserved_1 : 4, |
336 | delivery_status : 1, | 337 | delivery_status : 1, |
337 | __reserved_2 : 3, | 338 | __reserved_2 : 3, |
338 | mask : 1, | 339 | mask : 1, |
339 | __reserved_3 : 15; | 340 | __reserved_3 : 15; |
340 | u32 __reserved_4[3]; | 341 | u32 __reserved_4[3]; |
341 | } lvt_error; | 342 | } lvt_error; |
342 | 343 | ||
343 | /*380*/ struct { /* Timer Initial Count Register */ | 344 | /*380*/ struct { /* Timer Initial Count Register */ |
344 | u32 initial_count; | 345 | u32 initial_count; |
345 | u32 __reserved_2[3]; | 346 | u32 __reserved_2[3]; |
346 | } timer_icr; | 347 | } timer_icr; |
347 | 348 | ||
348 | /*390*/ const | 349 | /*390*/ const |
349 | struct { /* Timer Current Count Register */ | 350 | struct { /* Timer Current Count Register */ |
350 | u32 curr_count; | 351 | u32 curr_count; |
351 | u32 __reserved_2[3]; | 352 | u32 __reserved_2[3]; |
352 | } timer_ccr; | 353 | } timer_ccr; |
353 | 354 | ||
354 | /*3A0*/ struct { u32 __reserved[4]; } __reserved_16; | 355 | /*3A0*/ struct { u32 __reserved[4]; } __reserved_16; |
355 | 356 | ||
356 | /*3B0*/ struct { u32 __reserved[4]; } __reserved_17; | 357 | /*3B0*/ struct { u32 __reserved[4]; } __reserved_17; |
357 | 358 | ||
358 | /*3C0*/ struct { u32 __reserved[4]; } __reserved_18; | 359 | /*3C0*/ struct { u32 __reserved[4]; } __reserved_18; |
359 | 360 | ||
360 | /*3D0*/ struct { u32 __reserved[4]; } __reserved_19; | 361 | /*3D0*/ struct { u32 __reserved[4]; } __reserved_19; |
361 | 362 | ||
362 | /*3E0*/ struct { /* Timer Divide Configuration Register */ | 363 | /*3E0*/ struct { /* Timer Divide Configuration Register */ |
363 | u32 divisor : 4, | 364 | u32 divisor : 4, |
364 | __reserved_1 : 28; | 365 | __reserved_1 : 28; |
365 | u32 __reserved_2[3]; | 366 | u32 __reserved_2[3]; |
366 | } timer_dcr; | 367 | } timer_dcr; |
367 | 368 | ||
368 | /*3F0*/ struct { u32 __reserved[4]; } __reserved_20; | 369 | /*3F0*/ struct { u32 __reserved[4]; } __reserved_20; |
369 | 370 | ||
370 | } __attribute__ ((packed)); | 371 | } __attribute__ ((packed)); |
371 | 372 | ||
372 | #undef u32 | 373 | #undef u32 |
373 | 374 | ||
374 | #endif | 375 | #endif |
375 | 376 |
include/asm-i386/mach-generic/mach_apic.h
1 | #ifndef __ASM_MACH_APIC_H | 1 | #ifndef __ASM_MACH_APIC_H |
2 | #define __ASM_MACH_APIC_H | 2 | #define __ASM_MACH_APIC_H |
3 | 3 | ||
4 | #include <asm/genapic.h> | 4 | #include <asm/genapic.h> |
5 | 5 | ||
6 | #define esr_disable (genapic->ESR_DISABLE) | 6 | #define esr_disable (genapic->ESR_DISABLE) |
7 | #define NO_BALANCE_IRQ (genapic->no_balance_irq) | 7 | #define NO_BALANCE_IRQ (genapic->no_balance_irq) |
8 | #define INT_DELIVERY_MODE (genapic->int_delivery_mode) | 8 | #define INT_DELIVERY_MODE (genapic->int_delivery_mode) |
9 | #define INT_DEST_MODE (genapic->int_dest_mode) | 9 | #define INT_DEST_MODE (genapic->int_dest_mode) |
10 | #undef APIC_DEST_LOGICAL | 10 | #undef APIC_DEST_LOGICAL |
11 | #define APIC_DEST_LOGICAL (genapic->apic_destination_logical) | 11 | #define APIC_DEST_LOGICAL (genapic->apic_destination_logical) |
12 | #define TARGET_CPUS (genapic->target_cpus()) | 12 | #define TARGET_CPUS (genapic->target_cpus()) |
13 | #define apic_id_registered (genapic->apic_id_registered) | 13 | #define apic_id_registered (genapic->apic_id_registered) |
14 | #define init_apic_ldr (genapic->init_apic_ldr) | 14 | #define init_apic_ldr (genapic->init_apic_ldr) |
15 | #define ioapic_phys_id_map (genapic->ioapic_phys_id_map) | 15 | #define ioapic_phys_id_map (genapic->ioapic_phys_id_map) |
16 | #define clustered_apic_check (genapic->clustered_apic_check) | 16 | #define clustered_apic_check (genapic->clustered_apic_check) |
17 | #define multi_timer_check (genapic->multi_timer_check) | 17 | #define multi_timer_check (genapic->multi_timer_check) |
18 | #define apicid_to_node (genapic->apicid_to_node) | 18 | #define apicid_to_node (genapic->apicid_to_node) |
19 | #define cpu_to_logical_apicid (genapic->cpu_to_logical_apicid) | 19 | #define cpu_to_logical_apicid (genapic->cpu_to_logical_apicid) |
20 | #define cpu_present_to_apicid (genapic->cpu_present_to_apicid) | 20 | #define cpu_present_to_apicid (genapic->cpu_present_to_apicid) |
21 | #define apicid_to_cpu_present (genapic->apicid_to_cpu_present) | 21 | #define apicid_to_cpu_present (genapic->apicid_to_cpu_present) |
22 | #define mpc_apic_id (genapic->mpc_apic_id) | 22 | #define mpc_apic_id (genapic->mpc_apic_id) |
23 | #define setup_portio_remap (genapic->setup_portio_remap) | 23 | #define setup_portio_remap (genapic->setup_portio_remap) |
24 | #define check_apicid_present (genapic->check_apicid_present) | 24 | #define check_apicid_present (genapic->check_apicid_present) |
25 | #define check_phys_apicid_present (genapic->check_phys_apicid_present) | 25 | #define check_phys_apicid_present (genapic->check_phys_apicid_present) |
26 | #define check_apicid_used (genapic->check_apicid_used) | 26 | #define check_apicid_used (genapic->check_apicid_used) |
27 | #define cpu_mask_to_apicid (genapic->cpu_mask_to_apicid) | 27 | #define cpu_mask_to_apicid (genapic->cpu_mask_to_apicid) |
28 | #define enable_apic_mode (genapic->enable_apic_mode) | 28 | #define enable_apic_mode (genapic->enable_apic_mode) |
29 | #define phys_pkg_id (genapic->phys_pkg_id) | 29 | #define phys_pkg_id (genapic->phys_pkg_id) |
30 | 30 | ||
31 | extern void generic_bigsmp_probe(void); | ||
32 | |||
31 | #endif /* __ASM_MACH_APIC_H */ | 33 | #endif /* __ASM_MACH_APIC_H */ |
32 | 34 |
include/asm-i386/mpspec.h
1 | #ifndef __ASM_MPSPEC_H | 1 | #ifndef __ASM_MPSPEC_H |
2 | #define __ASM_MPSPEC_H | 2 | #define __ASM_MPSPEC_H |
3 | 3 | ||
4 | #include <linux/cpumask.h> | 4 | #include <linux/cpumask.h> |
5 | #include <asm/mpspec_def.h> | 5 | #include <asm/mpspec_def.h> |
6 | #include <mach_mpspec.h> | 6 | #include <mach_mpspec.h> |
7 | 7 | ||
8 | extern int mp_bus_id_to_type [MAX_MP_BUSSES]; | 8 | extern int mp_bus_id_to_type [MAX_MP_BUSSES]; |
9 | extern int mp_bus_id_to_node [MAX_MP_BUSSES]; | 9 | extern int mp_bus_id_to_node [MAX_MP_BUSSES]; |
10 | extern int mp_bus_id_to_local [MAX_MP_BUSSES]; | 10 | extern int mp_bus_id_to_local [MAX_MP_BUSSES]; |
11 | extern int quad_local_to_mp_bus_id [NR_CPUS/4][4]; | 11 | extern int quad_local_to_mp_bus_id [NR_CPUS/4][4]; |
12 | extern int mp_bus_id_to_pci_bus [MAX_MP_BUSSES]; | 12 | extern int mp_bus_id_to_pci_bus [MAX_MP_BUSSES]; |
13 | 13 | ||
14 | extern unsigned int def_to_bigsmp; | ||
14 | extern unsigned int boot_cpu_physical_apicid; | 15 | extern unsigned int boot_cpu_physical_apicid; |
15 | extern int smp_found_config; | 16 | extern int smp_found_config; |
16 | extern void find_smp_config (void); | 17 | extern void find_smp_config (void); |
17 | extern void get_smp_config (void); | 18 | extern void get_smp_config (void); |
18 | extern int nr_ioapics; | 19 | extern int nr_ioapics; |
19 | extern int apic_version [MAX_APICS]; | 20 | extern int apic_version [MAX_APICS]; |
20 | extern int mp_bus_id_to_type [MAX_MP_BUSSES]; | 21 | extern int mp_bus_id_to_type [MAX_MP_BUSSES]; |
21 | extern int mp_irq_entries; | 22 | extern int mp_irq_entries; |
22 | extern struct mpc_config_intsrc mp_irqs [MAX_IRQ_SOURCES]; | 23 | extern struct mpc_config_intsrc mp_irqs [MAX_IRQ_SOURCES]; |
23 | extern int mpc_default_type; | 24 | extern int mpc_default_type; |
24 | extern int mp_bus_id_to_pci_bus [MAX_MP_BUSSES]; | 25 | extern int mp_bus_id_to_pci_bus [MAX_MP_BUSSES]; |
25 | extern unsigned long mp_lapic_addr; | 26 | extern unsigned long mp_lapic_addr; |
26 | extern int pic_mode; | 27 | extern int pic_mode; |
27 | extern int using_apic_timer; | 28 | extern int using_apic_timer; |
28 | 29 | ||
29 | #ifdef CONFIG_ACPI_BOOT | 30 | #ifdef CONFIG_ACPI_BOOT |
30 | extern void mp_register_lapic (u8 id, u8 enabled); | 31 | extern void mp_register_lapic (u8 id, u8 enabled); |
31 | extern void mp_register_lapic_address (u64 address); | 32 | extern void mp_register_lapic_address (u64 address); |
32 | extern void mp_register_ioapic (u8 id, u32 address, u32 gsi_base); | 33 | extern void mp_register_ioapic (u8 id, u32 address, u32 gsi_base); |
33 | extern void mp_override_legacy_irq (u8 bus_irq, u8 polarity, u8 trigger, u32 gsi); | 34 | extern void mp_override_legacy_irq (u8 bus_irq, u8 polarity, u8 trigger, u32 gsi); |
34 | extern void mp_config_acpi_legacy_irqs (void); | 35 | extern void mp_config_acpi_legacy_irqs (void); |
35 | extern int mp_register_gsi (u32 gsi, int edge_level, int active_high_low); | 36 | extern int mp_register_gsi (u32 gsi, int edge_level, int active_high_low); |
36 | #endif /*CONFIG_ACPI_BOOT*/ | 37 | #endif /*CONFIG_ACPI_BOOT*/ |
37 | 38 | ||
38 | #define PHYSID_ARRAY_SIZE BITS_TO_LONGS(MAX_APICS) | 39 | #define PHYSID_ARRAY_SIZE BITS_TO_LONGS(MAX_APICS) |
39 | 40 | ||
40 | struct physid_mask | 41 | struct physid_mask |
41 | { | 42 | { |
42 | unsigned long mask[PHYSID_ARRAY_SIZE]; | 43 | unsigned long mask[PHYSID_ARRAY_SIZE]; |
43 | }; | 44 | }; |
44 | 45 | ||
45 | typedef struct physid_mask physid_mask_t; | 46 | typedef struct physid_mask physid_mask_t; |
46 | 47 | ||
47 | #define physid_set(physid, map) set_bit(physid, (map).mask) | 48 | #define physid_set(physid, map) set_bit(physid, (map).mask) |
48 | #define physid_clear(physid, map) clear_bit(physid, (map).mask) | 49 | #define physid_clear(physid, map) clear_bit(physid, (map).mask) |
49 | #define physid_isset(physid, map) test_bit(physid, (map).mask) | 50 | #define physid_isset(physid, map) test_bit(physid, (map).mask) |
50 | #define physid_test_and_set(physid, map) test_and_set_bit(physid, (map).mask) | 51 | #define physid_test_and_set(physid, map) test_and_set_bit(physid, (map).mask) |
51 | 52 | ||
52 | #define physids_and(dst, src1, src2) bitmap_and((dst).mask, (src1).mask, (src2).mask, MAX_APICS) | 53 | #define physids_and(dst, src1, src2) bitmap_and((dst).mask, (src1).mask, (src2).mask, MAX_APICS) |
53 | #define physids_or(dst, src1, src2) bitmap_or((dst).mask, (src1).mask, (src2).mask, MAX_APICS) | 54 | #define physids_or(dst, src1, src2) bitmap_or((dst).mask, (src1).mask, (src2).mask, MAX_APICS) |
54 | #define physids_clear(map) bitmap_zero((map).mask, MAX_APICS) | 55 | #define physids_clear(map) bitmap_zero((map).mask, MAX_APICS) |
55 | #define physids_complement(dst, src) bitmap_complement((dst).mask,(src).mask, MAX_APICS) | 56 | #define physids_complement(dst, src) bitmap_complement((dst).mask,(src).mask, MAX_APICS) |
56 | #define physids_empty(map) bitmap_empty((map).mask, MAX_APICS) | 57 | #define physids_empty(map) bitmap_empty((map).mask, MAX_APICS) |
57 | #define physids_equal(map1, map2) bitmap_equal((map1).mask, (map2).mask, MAX_APICS) | 58 | #define physids_equal(map1, map2) bitmap_equal((map1).mask, (map2).mask, MAX_APICS) |
58 | #define physids_weight(map) bitmap_weight((map).mask, MAX_APICS) | 59 | #define physids_weight(map) bitmap_weight((map).mask, MAX_APICS) |
59 | #define physids_shift_right(d, s, n) bitmap_shift_right((d).mask, (s).mask, n, MAX_APICS) | 60 | #define physids_shift_right(d, s, n) bitmap_shift_right((d).mask, (s).mask, n, MAX_APICS) |
60 | #define physids_shift_left(d, s, n) bitmap_shift_left((d).mask, (s).mask, n, MAX_APICS) | 61 | #define physids_shift_left(d, s, n) bitmap_shift_left((d).mask, (s).mask, n, MAX_APICS) |
61 | #define physids_coerce(map) ((map).mask[0]) | 62 | #define physids_coerce(map) ((map).mask[0]) |
62 | 63 | ||
63 | #define physids_promote(physids) \ | 64 | #define physids_promote(physids) \ |
64 | ({ \ | 65 | ({ \ |
65 | physid_mask_t __physid_mask = PHYSID_MASK_NONE; \ | 66 | physid_mask_t __physid_mask = PHYSID_MASK_NONE; \ |
66 | __physid_mask.mask[0] = physids; \ | 67 | __physid_mask.mask[0] = physids; \ |
67 | __physid_mask; \ | 68 | __physid_mask; \ |
68 | }) | 69 | }) |
69 | 70 | ||
70 | #define physid_mask_of_physid(physid) \ | 71 | #define physid_mask_of_physid(physid) \ |
71 | ({ \ | 72 | ({ \ |
72 | physid_mask_t __physid_mask = PHYSID_MASK_NONE; \ | 73 | physid_mask_t __physid_mask = PHYSID_MASK_NONE; \ |
73 | physid_set(physid, __physid_mask); \ | 74 | physid_set(physid, __physid_mask); \ |
74 | __physid_mask; \ | 75 | __physid_mask; \ |
75 | }) | 76 | }) |
76 | 77 | ||
77 | #define PHYSID_MASK_ALL { {[0 ... PHYSID_ARRAY_SIZE-1] = ~0UL} } | 78 | #define PHYSID_MASK_ALL { {[0 ... PHYSID_ARRAY_SIZE-1] = ~0UL} } |
78 | #define PHYSID_MASK_NONE { {[0 ... PHYSID_ARRAY_SIZE-1] = 0UL} } | 79 | #define PHYSID_MASK_NONE { {[0 ... PHYSID_ARRAY_SIZE-1] = 0UL} } |
79 | 80 | ||
80 | extern physid_mask_t phys_cpu_present_map; | 81 | extern physid_mask_t phys_cpu_present_map; |
81 | 82 | ||
82 | #endif | 83 | #endif |
83 | 84 | ||
84 | 85 |