Commit 1d487624fcc17a40aa67acaa9e8f3815fb7cd0f0
Committed by
Ingo Molnar
1 parent
d47cc0db8f
Exists in
master
and in
7 other branches
x86, SMEP: Fix section mismatch warnings
Fix these kernel compilation warnings: WARNING: arch/x86/built-in.o(.cpuinit.text+0x1e07): Section mismatch ... WARNING: arch/x86/built-in.o(.cpuinit.text+0x1b10): Section mismatch ... introduced by: de5397ad5b9a: x86, cpu: Enable/disable Supervisor Mode Execution Protection Change disable_smep from __initdata to __cpuinitdata. Change setup_smep() from __init to __cpuinit. Reported-by: Yinghai Lu <yinghai@kernel.org> Signed-off-by: Fenghua Yu <fenghua.yu@intel.com> Cc: Asit K Mallick <asit.k.mallick@intel.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Link: http://lkml.kernel.org/r/1305930797-11409-1-git-send-email-fenghua.yu@intel.com Signed-off-by: Ingo Molnar <mingo@elte.hu>
Showing 1 changed file with 2 additions and 2 deletions Inline Diff
arch/x86/kernel/cpu/common.c
1 | #include <linux/bootmem.h> | 1 | #include <linux/bootmem.h> |
2 | #include <linux/linkage.h> | 2 | #include <linux/linkage.h> |
3 | #include <linux/bitops.h> | 3 | #include <linux/bitops.h> |
4 | #include <linux/kernel.h> | 4 | #include <linux/kernel.h> |
5 | #include <linux/module.h> | 5 | #include <linux/module.h> |
6 | #include <linux/percpu.h> | 6 | #include <linux/percpu.h> |
7 | #include <linux/string.h> | 7 | #include <linux/string.h> |
8 | #include <linux/delay.h> | 8 | #include <linux/delay.h> |
9 | #include <linux/sched.h> | 9 | #include <linux/sched.h> |
10 | #include <linux/init.h> | 10 | #include <linux/init.h> |
11 | #include <linux/kgdb.h> | 11 | #include <linux/kgdb.h> |
12 | #include <linux/smp.h> | 12 | #include <linux/smp.h> |
13 | #include <linux/io.h> | 13 | #include <linux/io.h> |
14 | 14 | ||
15 | #include <asm/stackprotector.h> | 15 | #include <asm/stackprotector.h> |
16 | #include <asm/perf_event.h> | 16 | #include <asm/perf_event.h> |
17 | #include <asm/mmu_context.h> | 17 | #include <asm/mmu_context.h> |
18 | #include <asm/hypervisor.h> | 18 | #include <asm/hypervisor.h> |
19 | #include <asm/processor.h> | 19 | #include <asm/processor.h> |
20 | #include <asm/sections.h> | 20 | #include <asm/sections.h> |
21 | #include <linux/topology.h> | 21 | #include <linux/topology.h> |
22 | #include <linux/cpumask.h> | 22 | #include <linux/cpumask.h> |
23 | #include <asm/pgtable.h> | 23 | #include <asm/pgtable.h> |
24 | #include <asm/atomic.h> | 24 | #include <asm/atomic.h> |
25 | #include <asm/proto.h> | 25 | #include <asm/proto.h> |
26 | #include <asm/setup.h> | 26 | #include <asm/setup.h> |
27 | #include <asm/apic.h> | 27 | #include <asm/apic.h> |
28 | #include <asm/desc.h> | 28 | #include <asm/desc.h> |
29 | #include <asm/i387.h> | 29 | #include <asm/i387.h> |
30 | #include <asm/mtrr.h> | 30 | #include <asm/mtrr.h> |
31 | #include <linux/numa.h> | 31 | #include <linux/numa.h> |
32 | #include <asm/asm.h> | 32 | #include <asm/asm.h> |
33 | #include <asm/cpu.h> | 33 | #include <asm/cpu.h> |
34 | #include <asm/mce.h> | 34 | #include <asm/mce.h> |
35 | #include <asm/msr.h> | 35 | #include <asm/msr.h> |
36 | #include <asm/pat.h> | 36 | #include <asm/pat.h> |
37 | 37 | ||
38 | #ifdef CONFIG_X86_LOCAL_APIC | 38 | #ifdef CONFIG_X86_LOCAL_APIC |
39 | #include <asm/uv/uv.h> | 39 | #include <asm/uv/uv.h> |
40 | #endif | 40 | #endif |
41 | 41 | ||
42 | #include "cpu.h" | 42 | #include "cpu.h" |
43 | 43 | ||
44 | /* all of these masks are initialized in setup_cpu_local_masks() */ | 44 | /* all of these masks are initialized in setup_cpu_local_masks() */ |
45 | cpumask_var_t cpu_initialized_mask; | 45 | cpumask_var_t cpu_initialized_mask; |
46 | cpumask_var_t cpu_callout_mask; | 46 | cpumask_var_t cpu_callout_mask; |
47 | cpumask_var_t cpu_callin_mask; | 47 | cpumask_var_t cpu_callin_mask; |
48 | 48 | ||
49 | /* representing cpus for which sibling maps can be computed */ | 49 | /* representing cpus for which sibling maps can be computed */ |
50 | cpumask_var_t cpu_sibling_setup_mask; | 50 | cpumask_var_t cpu_sibling_setup_mask; |
51 | 51 | ||
52 | /* correctly size the local cpu masks */ | 52 | /* correctly size the local cpu masks */ |
53 | void __init setup_cpu_local_masks(void) | 53 | void __init setup_cpu_local_masks(void) |
54 | { | 54 | { |
55 | alloc_bootmem_cpumask_var(&cpu_initialized_mask); | 55 | alloc_bootmem_cpumask_var(&cpu_initialized_mask); |
56 | alloc_bootmem_cpumask_var(&cpu_callin_mask); | 56 | alloc_bootmem_cpumask_var(&cpu_callin_mask); |
57 | alloc_bootmem_cpumask_var(&cpu_callout_mask); | 57 | alloc_bootmem_cpumask_var(&cpu_callout_mask); |
58 | alloc_bootmem_cpumask_var(&cpu_sibling_setup_mask); | 58 | alloc_bootmem_cpumask_var(&cpu_sibling_setup_mask); |
59 | } | 59 | } |
60 | 60 | ||
61 | static void __cpuinit default_init(struct cpuinfo_x86 *c) | 61 | static void __cpuinit default_init(struct cpuinfo_x86 *c) |
62 | { | 62 | { |
63 | #ifdef CONFIG_X86_64 | 63 | #ifdef CONFIG_X86_64 |
64 | cpu_detect_cache_sizes(c); | 64 | cpu_detect_cache_sizes(c); |
65 | #else | 65 | #else |
66 | /* Not much we can do here... */ | 66 | /* Not much we can do here... */ |
67 | /* Check if at least it has cpuid */ | 67 | /* Check if at least it has cpuid */ |
68 | if (c->cpuid_level == -1) { | 68 | if (c->cpuid_level == -1) { |
69 | /* No cpuid. It must be an ancient CPU */ | 69 | /* No cpuid. It must be an ancient CPU */ |
70 | if (c->x86 == 4) | 70 | if (c->x86 == 4) |
71 | strcpy(c->x86_model_id, "486"); | 71 | strcpy(c->x86_model_id, "486"); |
72 | else if (c->x86 == 3) | 72 | else if (c->x86 == 3) |
73 | strcpy(c->x86_model_id, "386"); | 73 | strcpy(c->x86_model_id, "386"); |
74 | } | 74 | } |
75 | #endif | 75 | #endif |
76 | } | 76 | } |
77 | 77 | ||
78 | static const struct cpu_dev __cpuinitconst default_cpu = { | 78 | static const struct cpu_dev __cpuinitconst default_cpu = { |
79 | .c_init = default_init, | 79 | .c_init = default_init, |
80 | .c_vendor = "Unknown", | 80 | .c_vendor = "Unknown", |
81 | .c_x86_vendor = X86_VENDOR_UNKNOWN, | 81 | .c_x86_vendor = X86_VENDOR_UNKNOWN, |
82 | }; | 82 | }; |
83 | 83 | ||
84 | static const struct cpu_dev *this_cpu __cpuinitdata = &default_cpu; | 84 | static const struct cpu_dev *this_cpu __cpuinitdata = &default_cpu; |
85 | 85 | ||
86 | DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = { | 86 | DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = { |
87 | #ifdef CONFIG_X86_64 | 87 | #ifdef CONFIG_X86_64 |
88 | /* | 88 | /* |
89 | * We need valid kernel segments for data and code in long mode too | 89 | * We need valid kernel segments for data and code in long mode too |
90 | * IRET will check the segment types kkeil 2000/10/28 | 90 | * IRET will check the segment types kkeil 2000/10/28 |
91 | * Also sysret mandates a special GDT layout | 91 | * Also sysret mandates a special GDT layout |
92 | * | 92 | * |
93 | * TLS descriptors are currently at a different place compared to i386. | 93 | * TLS descriptors are currently at a different place compared to i386. |
94 | * Hopefully nobody expects them at a fixed place (Wine?) | 94 | * Hopefully nobody expects them at a fixed place (Wine?) |
95 | */ | 95 | */ |
96 | [GDT_ENTRY_KERNEL32_CS] = GDT_ENTRY_INIT(0xc09b, 0, 0xfffff), | 96 | [GDT_ENTRY_KERNEL32_CS] = GDT_ENTRY_INIT(0xc09b, 0, 0xfffff), |
97 | [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xa09b, 0, 0xfffff), | 97 | [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xa09b, 0, 0xfffff), |
98 | [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc093, 0, 0xfffff), | 98 | [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc093, 0, 0xfffff), |
99 | [GDT_ENTRY_DEFAULT_USER32_CS] = GDT_ENTRY_INIT(0xc0fb, 0, 0xfffff), | 99 | [GDT_ENTRY_DEFAULT_USER32_CS] = GDT_ENTRY_INIT(0xc0fb, 0, 0xfffff), |
100 | [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f3, 0, 0xfffff), | 100 | [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f3, 0, 0xfffff), |
101 | [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xa0fb, 0, 0xfffff), | 101 | [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xa0fb, 0, 0xfffff), |
102 | #else | 102 | #else |
103 | [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xc09a, 0, 0xfffff), | 103 | [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xc09a, 0, 0xfffff), |
104 | [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff), | 104 | [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff), |
105 | [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xc0fa, 0, 0xfffff), | 105 | [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xc0fa, 0, 0xfffff), |
106 | [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f2, 0, 0xfffff), | 106 | [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f2, 0, 0xfffff), |
107 | /* | 107 | /* |
108 | * Segments used for calling PnP BIOS have byte granularity. | 108 | * Segments used for calling PnP BIOS have byte granularity. |
109 | * They code segments and data segments have fixed 64k limits, | 109 | * They code segments and data segments have fixed 64k limits, |
110 | * the transfer segment sizes are set at run time. | 110 | * the transfer segment sizes are set at run time. |
111 | */ | 111 | */ |
112 | /* 32-bit code */ | 112 | /* 32-bit code */ |
113 | [GDT_ENTRY_PNPBIOS_CS32] = GDT_ENTRY_INIT(0x409a, 0, 0xffff), | 113 | [GDT_ENTRY_PNPBIOS_CS32] = GDT_ENTRY_INIT(0x409a, 0, 0xffff), |
114 | /* 16-bit code */ | 114 | /* 16-bit code */ |
115 | [GDT_ENTRY_PNPBIOS_CS16] = GDT_ENTRY_INIT(0x009a, 0, 0xffff), | 115 | [GDT_ENTRY_PNPBIOS_CS16] = GDT_ENTRY_INIT(0x009a, 0, 0xffff), |
116 | /* 16-bit data */ | 116 | /* 16-bit data */ |
117 | [GDT_ENTRY_PNPBIOS_DS] = GDT_ENTRY_INIT(0x0092, 0, 0xffff), | 117 | [GDT_ENTRY_PNPBIOS_DS] = GDT_ENTRY_INIT(0x0092, 0, 0xffff), |
118 | /* 16-bit data */ | 118 | /* 16-bit data */ |
119 | [GDT_ENTRY_PNPBIOS_TS1] = GDT_ENTRY_INIT(0x0092, 0, 0), | 119 | [GDT_ENTRY_PNPBIOS_TS1] = GDT_ENTRY_INIT(0x0092, 0, 0), |
120 | /* 16-bit data */ | 120 | /* 16-bit data */ |
121 | [GDT_ENTRY_PNPBIOS_TS2] = GDT_ENTRY_INIT(0x0092, 0, 0), | 121 | [GDT_ENTRY_PNPBIOS_TS2] = GDT_ENTRY_INIT(0x0092, 0, 0), |
122 | /* | 122 | /* |
123 | * The APM segments have byte granularity and their bases | 123 | * The APM segments have byte granularity and their bases |
124 | * are set at run time. All have 64k limits. | 124 | * are set at run time. All have 64k limits. |
125 | */ | 125 | */ |
126 | /* 32-bit code */ | 126 | /* 32-bit code */ |
127 | [GDT_ENTRY_APMBIOS_BASE] = GDT_ENTRY_INIT(0x409a, 0, 0xffff), | 127 | [GDT_ENTRY_APMBIOS_BASE] = GDT_ENTRY_INIT(0x409a, 0, 0xffff), |
128 | /* 16-bit code */ | 128 | /* 16-bit code */ |
129 | [GDT_ENTRY_APMBIOS_BASE+1] = GDT_ENTRY_INIT(0x009a, 0, 0xffff), | 129 | [GDT_ENTRY_APMBIOS_BASE+1] = GDT_ENTRY_INIT(0x009a, 0, 0xffff), |
130 | /* data */ | 130 | /* data */ |
131 | [GDT_ENTRY_APMBIOS_BASE+2] = GDT_ENTRY_INIT(0x4092, 0, 0xffff), | 131 | [GDT_ENTRY_APMBIOS_BASE+2] = GDT_ENTRY_INIT(0x4092, 0, 0xffff), |
132 | 132 | ||
133 | [GDT_ENTRY_ESPFIX_SS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff), | 133 | [GDT_ENTRY_ESPFIX_SS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff), |
134 | [GDT_ENTRY_PERCPU] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff), | 134 | [GDT_ENTRY_PERCPU] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff), |
135 | GDT_STACK_CANARY_INIT | 135 | GDT_STACK_CANARY_INIT |
136 | #endif | 136 | #endif |
137 | } }; | 137 | } }; |
138 | EXPORT_PER_CPU_SYMBOL_GPL(gdt_page); | 138 | EXPORT_PER_CPU_SYMBOL_GPL(gdt_page); |
139 | 139 | ||
140 | static int __init x86_xsave_setup(char *s) | 140 | static int __init x86_xsave_setup(char *s) |
141 | { | 141 | { |
142 | setup_clear_cpu_cap(X86_FEATURE_XSAVE); | 142 | setup_clear_cpu_cap(X86_FEATURE_XSAVE); |
143 | setup_clear_cpu_cap(X86_FEATURE_XSAVEOPT); | 143 | setup_clear_cpu_cap(X86_FEATURE_XSAVEOPT); |
144 | return 1; | 144 | return 1; |
145 | } | 145 | } |
146 | __setup("noxsave", x86_xsave_setup); | 146 | __setup("noxsave", x86_xsave_setup); |
147 | 147 | ||
148 | static int __init x86_xsaveopt_setup(char *s) | 148 | static int __init x86_xsaveopt_setup(char *s) |
149 | { | 149 | { |
150 | setup_clear_cpu_cap(X86_FEATURE_XSAVEOPT); | 150 | setup_clear_cpu_cap(X86_FEATURE_XSAVEOPT); |
151 | return 1; | 151 | return 1; |
152 | } | 152 | } |
153 | __setup("noxsaveopt", x86_xsaveopt_setup); | 153 | __setup("noxsaveopt", x86_xsaveopt_setup); |
154 | 154 | ||
155 | #ifdef CONFIG_X86_32 | 155 | #ifdef CONFIG_X86_32 |
156 | static int cachesize_override __cpuinitdata = -1; | 156 | static int cachesize_override __cpuinitdata = -1; |
157 | static int disable_x86_serial_nr __cpuinitdata = 1; | 157 | static int disable_x86_serial_nr __cpuinitdata = 1; |
158 | 158 | ||
159 | static int __init cachesize_setup(char *str) | 159 | static int __init cachesize_setup(char *str) |
160 | { | 160 | { |
161 | get_option(&str, &cachesize_override); | 161 | get_option(&str, &cachesize_override); |
162 | return 1; | 162 | return 1; |
163 | } | 163 | } |
164 | __setup("cachesize=", cachesize_setup); | 164 | __setup("cachesize=", cachesize_setup); |
165 | 165 | ||
166 | static int __init x86_fxsr_setup(char *s) | 166 | static int __init x86_fxsr_setup(char *s) |
167 | { | 167 | { |
168 | setup_clear_cpu_cap(X86_FEATURE_FXSR); | 168 | setup_clear_cpu_cap(X86_FEATURE_FXSR); |
169 | setup_clear_cpu_cap(X86_FEATURE_XMM); | 169 | setup_clear_cpu_cap(X86_FEATURE_XMM); |
170 | return 1; | 170 | return 1; |
171 | } | 171 | } |
172 | __setup("nofxsr", x86_fxsr_setup); | 172 | __setup("nofxsr", x86_fxsr_setup); |
173 | 173 | ||
174 | static int __init x86_sep_setup(char *s) | 174 | static int __init x86_sep_setup(char *s) |
175 | { | 175 | { |
176 | setup_clear_cpu_cap(X86_FEATURE_SEP); | 176 | setup_clear_cpu_cap(X86_FEATURE_SEP); |
177 | return 1; | 177 | return 1; |
178 | } | 178 | } |
179 | __setup("nosep", x86_sep_setup); | 179 | __setup("nosep", x86_sep_setup); |
180 | 180 | ||
181 | /* Standard macro to see if a specific flag is changeable */ | 181 | /* Standard macro to see if a specific flag is changeable */ |
182 | static inline int flag_is_changeable_p(u32 flag) | 182 | static inline int flag_is_changeable_p(u32 flag) |
183 | { | 183 | { |
184 | u32 f1, f2; | 184 | u32 f1, f2; |
185 | 185 | ||
186 | /* | 186 | /* |
187 | * Cyrix and IDT cpus allow disabling of CPUID | 187 | * Cyrix and IDT cpus allow disabling of CPUID |
188 | * so the code below may return different results | 188 | * so the code below may return different results |
189 | * when it is executed before and after enabling | 189 | * when it is executed before and after enabling |
190 | * the CPUID. Add "volatile" to not allow gcc to | 190 | * the CPUID. Add "volatile" to not allow gcc to |
191 | * optimize the subsequent calls to this function. | 191 | * optimize the subsequent calls to this function. |
192 | */ | 192 | */ |
193 | asm volatile ("pushfl \n\t" | 193 | asm volatile ("pushfl \n\t" |
194 | "pushfl \n\t" | 194 | "pushfl \n\t" |
195 | "popl %0 \n\t" | 195 | "popl %0 \n\t" |
196 | "movl %0, %1 \n\t" | 196 | "movl %0, %1 \n\t" |
197 | "xorl %2, %0 \n\t" | 197 | "xorl %2, %0 \n\t" |
198 | "pushl %0 \n\t" | 198 | "pushl %0 \n\t" |
199 | "popfl \n\t" | 199 | "popfl \n\t" |
200 | "pushfl \n\t" | 200 | "pushfl \n\t" |
201 | "popl %0 \n\t" | 201 | "popl %0 \n\t" |
202 | "popfl \n\t" | 202 | "popfl \n\t" |
203 | 203 | ||
204 | : "=&r" (f1), "=&r" (f2) | 204 | : "=&r" (f1), "=&r" (f2) |
205 | : "ir" (flag)); | 205 | : "ir" (flag)); |
206 | 206 | ||
207 | return ((f1^f2) & flag) != 0; | 207 | return ((f1^f2) & flag) != 0; |
208 | } | 208 | } |
209 | 209 | ||
210 | /* Probe for the CPUID instruction */ | 210 | /* Probe for the CPUID instruction */ |
211 | static int __cpuinit have_cpuid_p(void) | 211 | static int __cpuinit have_cpuid_p(void) |
212 | { | 212 | { |
213 | return flag_is_changeable_p(X86_EFLAGS_ID); | 213 | return flag_is_changeable_p(X86_EFLAGS_ID); |
214 | } | 214 | } |
215 | 215 | ||
216 | static void __cpuinit squash_the_stupid_serial_number(struct cpuinfo_x86 *c) | 216 | static void __cpuinit squash_the_stupid_serial_number(struct cpuinfo_x86 *c) |
217 | { | 217 | { |
218 | unsigned long lo, hi; | 218 | unsigned long lo, hi; |
219 | 219 | ||
220 | if (!cpu_has(c, X86_FEATURE_PN) || !disable_x86_serial_nr) | 220 | if (!cpu_has(c, X86_FEATURE_PN) || !disable_x86_serial_nr) |
221 | return; | 221 | return; |
222 | 222 | ||
223 | /* Disable processor serial number: */ | 223 | /* Disable processor serial number: */ |
224 | 224 | ||
225 | rdmsr(MSR_IA32_BBL_CR_CTL, lo, hi); | 225 | rdmsr(MSR_IA32_BBL_CR_CTL, lo, hi); |
226 | lo |= 0x200000; | 226 | lo |= 0x200000; |
227 | wrmsr(MSR_IA32_BBL_CR_CTL, lo, hi); | 227 | wrmsr(MSR_IA32_BBL_CR_CTL, lo, hi); |
228 | 228 | ||
229 | printk(KERN_NOTICE "CPU serial number disabled.\n"); | 229 | printk(KERN_NOTICE "CPU serial number disabled.\n"); |
230 | clear_cpu_cap(c, X86_FEATURE_PN); | 230 | clear_cpu_cap(c, X86_FEATURE_PN); |
231 | 231 | ||
232 | /* Disabling the serial number may affect the cpuid level */ | 232 | /* Disabling the serial number may affect the cpuid level */ |
233 | c->cpuid_level = cpuid_eax(0); | 233 | c->cpuid_level = cpuid_eax(0); |
234 | } | 234 | } |
235 | 235 | ||
236 | static int __init x86_serial_nr_setup(char *s) | 236 | static int __init x86_serial_nr_setup(char *s) |
237 | { | 237 | { |
238 | disable_x86_serial_nr = 0; | 238 | disable_x86_serial_nr = 0; |
239 | return 1; | 239 | return 1; |
240 | } | 240 | } |
241 | __setup("serialnumber", x86_serial_nr_setup); | 241 | __setup("serialnumber", x86_serial_nr_setup); |
242 | #else | 242 | #else |
243 | static inline int flag_is_changeable_p(u32 flag) | 243 | static inline int flag_is_changeable_p(u32 flag) |
244 | { | 244 | { |
245 | return 1; | 245 | return 1; |
246 | } | 246 | } |
247 | /* Probe for the CPUID instruction */ | 247 | /* Probe for the CPUID instruction */ |
248 | static inline int have_cpuid_p(void) | 248 | static inline int have_cpuid_p(void) |
249 | { | 249 | { |
250 | return 1; | 250 | return 1; |
251 | } | 251 | } |
252 | static inline void squash_the_stupid_serial_number(struct cpuinfo_x86 *c) | 252 | static inline void squash_the_stupid_serial_number(struct cpuinfo_x86 *c) |
253 | { | 253 | { |
254 | } | 254 | } |
255 | #endif | 255 | #endif |
256 | 256 | ||
257 | static int disable_smep __initdata; | 257 | static int disable_smep __cpuinitdata; |
258 | static __init int setup_disable_smep(char *arg) | 258 | static __init int setup_disable_smep(char *arg) |
259 | { | 259 | { |
260 | disable_smep = 1; | 260 | disable_smep = 1; |
261 | return 1; | 261 | return 1; |
262 | } | 262 | } |
263 | __setup("nosmep", setup_disable_smep); | 263 | __setup("nosmep", setup_disable_smep); |
264 | 264 | ||
265 | static __init void setup_smep(struct cpuinfo_x86 *c) | 265 | static __cpuinit void setup_smep(struct cpuinfo_x86 *c) |
266 | { | 266 | { |
267 | if (cpu_has(c, X86_FEATURE_SMEP)) { | 267 | if (cpu_has(c, X86_FEATURE_SMEP)) { |
268 | if (unlikely(disable_smep)) { | 268 | if (unlikely(disable_smep)) { |
269 | setup_clear_cpu_cap(X86_FEATURE_SMEP); | 269 | setup_clear_cpu_cap(X86_FEATURE_SMEP); |
270 | clear_in_cr4(X86_CR4_SMEP); | 270 | clear_in_cr4(X86_CR4_SMEP); |
271 | } else | 271 | } else |
272 | set_in_cr4(X86_CR4_SMEP); | 272 | set_in_cr4(X86_CR4_SMEP); |
273 | } | 273 | } |
274 | } | 274 | } |
275 | 275 | ||
276 | /* | 276 | /* |
277 | * Some CPU features depend on higher CPUID levels, which may not always | 277 | * Some CPU features depend on higher CPUID levels, which may not always |
278 | * be available due to CPUID level capping or broken virtualization | 278 | * be available due to CPUID level capping or broken virtualization |
279 | * software. Add those features to this table to auto-disable them. | 279 | * software. Add those features to this table to auto-disable them. |
280 | */ | 280 | */ |
281 | struct cpuid_dependent_feature { | 281 | struct cpuid_dependent_feature { |
282 | u32 feature; | 282 | u32 feature; |
283 | u32 level; | 283 | u32 level; |
284 | }; | 284 | }; |
285 | 285 | ||
286 | static const struct cpuid_dependent_feature __cpuinitconst | 286 | static const struct cpuid_dependent_feature __cpuinitconst |
287 | cpuid_dependent_features[] = { | 287 | cpuid_dependent_features[] = { |
288 | { X86_FEATURE_MWAIT, 0x00000005 }, | 288 | { X86_FEATURE_MWAIT, 0x00000005 }, |
289 | { X86_FEATURE_DCA, 0x00000009 }, | 289 | { X86_FEATURE_DCA, 0x00000009 }, |
290 | { X86_FEATURE_XSAVE, 0x0000000d }, | 290 | { X86_FEATURE_XSAVE, 0x0000000d }, |
291 | { 0, 0 } | 291 | { 0, 0 } |
292 | }; | 292 | }; |
293 | 293 | ||
294 | static void __cpuinit filter_cpuid_features(struct cpuinfo_x86 *c, bool warn) | 294 | static void __cpuinit filter_cpuid_features(struct cpuinfo_x86 *c, bool warn) |
295 | { | 295 | { |
296 | const struct cpuid_dependent_feature *df; | 296 | const struct cpuid_dependent_feature *df; |
297 | 297 | ||
298 | for (df = cpuid_dependent_features; df->feature; df++) { | 298 | for (df = cpuid_dependent_features; df->feature; df++) { |
299 | 299 | ||
300 | if (!cpu_has(c, df->feature)) | 300 | if (!cpu_has(c, df->feature)) |
301 | continue; | 301 | continue; |
302 | /* | 302 | /* |
303 | * Note: cpuid_level is set to -1 if unavailable, but | 303 | * Note: cpuid_level is set to -1 if unavailable, but |
304 | * extended_extended_level is set to 0 if unavailable | 304 | * extended_extended_level is set to 0 if unavailable |
305 | * and the legitimate extended levels are all negative | 305 | * and the legitimate extended levels are all negative |
306 | * when signed; hence the weird messing around with | 306 | * when signed; hence the weird messing around with |
307 | * signs here... | 307 | * signs here... |
308 | */ | 308 | */ |
309 | if (!((s32)df->level < 0 ? | 309 | if (!((s32)df->level < 0 ? |
310 | (u32)df->level > (u32)c->extended_cpuid_level : | 310 | (u32)df->level > (u32)c->extended_cpuid_level : |
311 | (s32)df->level > (s32)c->cpuid_level)) | 311 | (s32)df->level > (s32)c->cpuid_level)) |
312 | continue; | 312 | continue; |
313 | 313 | ||
314 | clear_cpu_cap(c, df->feature); | 314 | clear_cpu_cap(c, df->feature); |
315 | if (!warn) | 315 | if (!warn) |
316 | continue; | 316 | continue; |
317 | 317 | ||
318 | printk(KERN_WARNING | 318 | printk(KERN_WARNING |
319 | "CPU: CPU feature %s disabled, no CPUID level 0x%x\n", | 319 | "CPU: CPU feature %s disabled, no CPUID level 0x%x\n", |
320 | x86_cap_flags[df->feature], df->level); | 320 | x86_cap_flags[df->feature], df->level); |
321 | } | 321 | } |
322 | } | 322 | } |
323 | 323 | ||
324 | /* | 324 | /* |
325 | * Naming convention should be: <Name> [(<Codename>)] | 325 | * Naming convention should be: <Name> [(<Codename>)] |
326 | * This table only is used unless init_<vendor>() below doesn't set it; | 326 | * This table only is used unless init_<vendor>() below doesn't set it; |
327 | * in particular, if CPUID levels 0x80000002..4 are supported, this | 327 | * in particular, if CPUID levels 0x80000002..4 are supported, this |
328 | * isn't used | 328 | * isn't used |
329 | */ | 329 | */ |
330 | 330 | ||
331 | /* Look up CPU names by table lookup. */ | 331 | /* Look up CPU names by table lookup. */ |
332 | static const char *__cpuinit table_lookup_model(struct cpuinfo_x86 *c) | 332 | static const char *__cpuinit table_lookup_model(struct cpuinfo_x86 *c) |
333 | { | 333 | { |
334 | const struct cpu_model_info *info; | 334 | const struct cpu_model_info *info; |
335 | 335 | ||
336 | if (c->x86_model >= 16) | 336 | if (c->x86_model >= 16) |
337 | return NULL; /* Range check */ | 337 | return NULL; /* Range check */ |
338 | 338 | ||
339 | if (!this_cpu) | 339 | if (!this_cpu) |
340 | return NULL; | 340 | return NULL; |
341 | 341 | ||
342 | info = this_cpu->c_models; | 342 | info = this_cpu->c_models; |
343 | 343 | ||
344 | while (info && info->family) { | 344 | while (info && info->family) { |
345 | if (info->family == c->x86) | 345 | if (info->family == c->x86) |
346 | return info->model_names[c->x86_model]; | 346 | return info->model_names[c->x86_model]; |
347 | info++; | 347 | info++; |
348 | } | 348 | } |
349 | return NULL; /* Not found */ | 349 | return NULL; /* Not found */ |
350 | } | 350 | } |
351 | 351 | ||
352 | __u32 cpu_caps_cleared[NCAPINTS] __cpuinitdata; | 352 | __u32 cpu_caps_cleared[NCAPINTS] __cpuinitdata; |
353 | __u32 cpu_caps_set[NCAPINTS] __cpuinitdata; | 353 | __u32 cpu_caps_set[NCAPINTS] __cpuinitdata; |
354 | 354 | ||
355 | void load_percpu_segment(int cpu) | 355 | void load_percpu_segment(int cpu) |
356 | { | 356 | { |
357 | #ifdef CONFIG_X86_32 | 357 | #ifdef CONFIG_X86_32 |
358 | loadsegment(fs, __KERNEL_PERCPU); | 358 | loadsegment(fs, __KERNEL_PERCPU); |
359 | #else | 359 | #else |
360 | loadsegment(gs, 0); | 360 | loadsegment(gs, 0); |
361 | wrmsrl(MSR_GS_BASE, (unsigned long)per_cpu(irq_stack_union.gs_base, cpu)); | 361 | wrmsrl(MSR_GS_BASE, (unsigned long)per_cpu(irq_stack_union.gs_base, cpu)); |
362 | #endif | 362 | #endif |
363 | load_stack_canary_segment(); | 363 | load_stack_canary_segment(); |
364 | } | 364 | } |
365 | 365 | ||
366 | /* | 366 | /* |
367 | * Current gdt points %fs at the "master" per-cpu area: after this, | 367 | * Current gdt points %fs at the "master" per-cpu area: after this, |
368 | * it's on the real one. | 368 | * it's on the real one. |
369 | */ | 369 | */ |
370 | void switch_to_new_gdt(int cpu) | 370 | void switch_to_new_gdt(int cpu) |
371 | { | 371 | { |
372 | struct desc_ptr gdt_descr; | 372 | struct desc_ptr gdt_descr; |
373 | 373 | ||
374 | gdt_descr.address = (long)get_cpu_gdt_table(cpu); | 374 | gdt_descr.address = (long)get_cpu_gdt_table(cpu); |
375 | gdt_descr.size = GDT_SIZE - 1; | 375 | gdt_descr.size = GDT_SIZE - 1; |
376 | load_gdt(&gdt_descr); | 376 | load_gdt(&gdt_descr); |
377 | /* Reload the per-cpu base */ | 377 | /* Reload the per-cpu base */ |
378 | 378 | ||
379 | load_percpu_segment(cpu); | 379 | load_percpu_segment(cpu); |
380 | } | 380 | } |
381 | 381 | ||
382 | static const struct cpu_dev *__cpuinitdata cpu_devs[X86_VENDOR_NUM] = {}; | 382 | static const struct cpu_dev *__cpuinitdata cpu_devs[X86_VENDOR_NUM] = {}; |
383 | 383 | ||
384 | static void __cpuinit get_model_name(struct cpuinfo_x86 *c) | 384 | static void __cpuinit get_model_name(struct cpuinfo_x86 *c) |
385 | { | 385 | { |
386 | unsigned int *v; | 386 | unsigned int *v; |
387 | char *p, *q; | 387 | char *p, *q; |
388 | 388 | ||
389 | if (c->extended_cpuid_level < 0x80000004) | 389 | if (c->extended_cpuid_level < 0x80000004) |
390 | return; | 390 | return; |
391 | 391 | ||
392 | v = (unsigned int *)c->x86_model_id; | 392 | v = (unsigned int *)c->x86_model_id; |
393 | cpuid(0x80000002, &v[0], &v[1], &v[2], &v[3]); | 393 | cpuid(0x80000002, &v[0], &v[1], &v[2], &v[3]); |
394 | cpuid(0x80000003, &v[4], &v[5], &v[6], &v[7]); | 394 | cpuid(0x80000003, &v[4], &v[5], &v[6], &v[7]); |
395 | cpuid(0x80000004, &v[8], &v[9], &v[10], &v[11]); | 395 | cpuid(0x80000004, &v[8], &v[9], &v[10], &v[11]); |
396 | c->x86_model_id[48] = 0; | 396 | c->x86_model_id[48] = 0; |
397 | 397 | ||
398 | /* | 398 | /* |
399 | * Intel chips right-justify this string for some dumb reason; | 399 | * Intel chips right-justify this string for some dumb reason; |
400 | * undo that brain damage: | 400 | * undo that brain damage: |
401 | */ | 401 | */ |
402 | p = q = &c->x86_model_id[0]; | 402 | p = q = &c->x86_model_id[0]; |
403 | while (*p == ' ') | 403 | while (*p == ' ') |
404 | p++; | 404 | p++; |
405 | if (p != q) { | 405 | if (p != q) { |
406 | while (*p) | 406 | while (*p) |
407 | *q++ = *p++; | 407 | *q++ = *p++; |
408 | while (q <= &c->x86_model_id[48]) | 408 | while (q <= &c->x86_model_id[48]) |
409 | *q++ = '\0'; /* Zero-pad the rest */ | 409 | *q++ = '\0'; /* Zero-pad the rest */ |
410 | } | 410 | } |
411 | } | 411 | } |
412 | 412 | ||
413 | void __cpuinit cpu_detect_cache_sizes(struct cpuinfo_x86 *c) | 413 | void __cpuinit cpu_detect_cache_sizes(struct cpuinfo_x86 *c) |
414 | { | 414 | { |
415 | unsigned int n, dummy, ebx, ecx, edx, l2size; | 415 | unsigned int n, dummy, ebx, ecx, edx, l2size; |
416 | 416 | ||
417 | n = c->extended_cpuid_level; | 417 | n = c->extended_cpuid_level; |
418 | 418 | ||
419 | if (n >= 0x80000005) { | 419 | if (n >= 0x80000005) { |
420 | cpuid(0x80000005, &dummy, &ebx, &ecx, &edx); | 420 | cpuid(0x80000005, &dummy, &ebx, &ecx, &edx); |
421 | c->x86_cache_size = (ecx>>24) + (edx>>24); | 421 | c->x86_cache_size = (ecx>>24) + (edx>>24); |
422 | #ifdef CONFIG_X86_64 | 422 | #ifdef CONFIG_X86_64 |
423 | /* On K8 L1 TLB is inclusive, so don't count it */ | 423 | /* On K8 L1 TLB is inclusive, so don't count it */ |
424 | c->x86_tlbsize = 0; | 424 | c->x86_tlbsize = 0; |
425 | #endif | 425 | #endif |
426 | } | 426 | } |
427 | 427 | ||
428 | if (n < 0x80000006) /* Some chips just has a large L1. */ | 428 | if (n < 0x80000006) /* Some chips just has a large L1. */ |
429 | return; | 429 | return; |
430 | 430 | ||
431 | cpuid(0x80000006, &dummy, &ebx, &ecx, &edx); | 431 | cpuid(0x80000006, &dummy, &ebx, &ecx, &edx); |
432 | l2size = ecx >> 16; | 432 | l2size = ecx >> 16; |
433 | 433 | ||
434 | #ifdef CONFIG_X86_64 | 434 | #ifdef CONFIG_X86_64 |
435 | c->x86_tlbsize += ((ebx >> 16) & 0xfff) + (ebx & 0xfff); | 435 | c->x86_tlbsize += ((ebx >> 16) & 0xfff) + (ebx & 0xfff); |
436 | #else | 436 | #else |
437 | /* do processor-specific cache resizing */ | 437 | /* do processor-specific cache resizing */ |
438 | if (this_cpu->c_size_cache) | 438 | if (this_cpu->c_size_cache) |
439 | l2size = this_cpu->c_size_cache(c, l2size); | 439 | l2size = this_cpu->c_size_cache(c, l2size); |
440 | 440 | ||
441 | /* Allow user to override all this if necessary. */ | 441 | /* Allow user to override all this if necessary. */ |
442 | if (cachesize_override != -1) | 442 | if (cachesize_override != -1) |
443 | l2size = cachesize_override; | 443 | l2size = cachesize_override; |
444 | 444 | ||
445 | if (l2size == 0) | 445 | if (l2size == 0) |
446 | return; /* Again, no L2 cache is possible */ | 446 | return; /* Again, no L2 cache is possible */ |
447 | #endif | 447 | #endif |
448 | 448 | ||
449 | c->x86_cache_size = l2size; | 449 | c->x86_cache_size = l2size; |
450 | } | 450 | } |
451 | 451 | ||
452 | void __cpuinit detect_ht(struct cpuinfo_x86 *c) | 452 | void __cpuinit detect_ht(struct cpuinfo_x86 *c) |
453 | { | 453 | { |
454 | #ifdef CONFIG_X86_HT | 454 | #ifdef CONFIG_X86_HT |
455 | u32 eax, ebx, ecx, edx; | 455 | u32 eax, ebx, ecx, edx; |
456 | int index_msb, core_bits; | 456 | int index_msb, core_bits; |
457 | static bool printed; | 457 | static bool printed; |
458 | 458 | ||
459 | if (!cpu_has(c, X86_FEATURE_HT)) | 459 | if (!cpu_has(c, X86_FEATURE_HT)) |
460 | return; | 460 | return; |
461 | 461 | ||
462 | if (cpu_has(c, X86_FEATURE_CMP_LEGACY)) | 462 | if (cpu_has(c, X86_FEATURE_CMP_LEGACY)) |
463 | goto out; | 463 | goto out; |
464 | 464 | ||
465 | if (cpu_has(c, X86_FEATURE_XTOPOLOGY)) | 465 | if (cpu_has(c, X86_FEATURE_XTOPOLOGY)) |
466 | return; | 466 | return; |
467 | 467 | ||
468 | cpuid(1, &eax, &ebx, &ecx, &edx); | 468 | cpuid(1, &eax, &ebx, &ecx, &edx); |
469 | 469 | ||
470 | smp_num_siblings = (ebx & 0xff0000) >> 16; | 470 | smp_num_siblings = (ebx & 0xff0000) >> 16; |
471 | 471 | ||
472 | if (smp_num_siblings == 1) { | 472 | if (smp_num_siblings == 1) { |
473 | printk_once(KERN_INFO "CPU0: Hyper-Threading is disabled\n"); | 473 | printk_once(KERN_INFO "CPU0: Hyper-Threading is disabled\n"); |
474 | goto out; | 474 | goto out; |
475 | } | 475 | } |
476 | 476 | ||
477 | if (smp_num_siblings <= 1) | 477 | if (smp_num_siblings <= 1) |
478 | goto out; | 478 | goto out; |
479 | 479 | ||
480 | if (smp_num_siblings > nr_cpu_ids) { | 480 | if (smp_num_siblings > nr_cpu_ids) { |
481 | pr_warning("CPU: Unsupported number of siblings %d", | 481 | pr_warning("CPU: Unsupported number of siblings %d", |
482 | smp_num_siblings); | 482 | smp_num_siblings); |
483 | smp_num_siblings = 1; | 483 | smp_num_siblings = 1; |
484 | return; | 484 | return; |
485 | } | 485 | } |
486 | 486 | ||
487 | index_msb = get_count_order(smp_num_siblings); | 487 | index_msb = get_count_order(smp_num_siblings); |
488 | c->phys_proc_id = apic->phys_pkg_id(c->initial_apicid, index_msb); | 488 | c->phys_proc_id = apic->phys_pkg_id(c->initial_apicid, index_msb); |
489 | 489 | ||
490 | smp_num_siblings = smp_num_siblings / c->x86_max_cores; | 490 | smp_num_siblings = smp_num_siblings / c->x86_max_cores; |
491 | 491 | ||
492 | index_msb = get_count_order(smp_num_siblings); | 492 | index_msb = get_count_order(smp_num_siblings); |
493 | 493 | ||
494 | core_bits = get_count_order(c->x86_max_cores); | 494 | core_bits = get_count_order(c->x86_max_cores); |
495 | 495 | ||
496 | c->cpu_core_id = apic->phys_pkg_id(c->initial_apicid, index_msb) & | 496 | c->cpu_core_id = apic->phys_pkg_id(c->initial_apicid, index_msb) & |
497 | ((1 << core_bits) - 1); | 497 | ((1 << core_bits) - 1); |
498 | 498 | ||
499 | out: | 499 | out: |
500 | if (!printed && (c->x86_max_cores * smp_num_siblings) > 1) { | 500 | if (!printed && (c->x86_max_cores * smp_num_siblings) > 1) { |
501 | printk(KERN_INFO "CPU: Physical Processor ID: %d\n", | 501 | printk(KERN_INFO "CPU: Physical Processor ID: %d\n", |
502 | c->phys_proc_id); | 502 | c->phys_proc_id); |
503 | printk(KERN_INFO "CPU: Processor Core ID: %d\n", | 503 | printk(KERN_INFO "CPU: Processor Core ID: %d\n", |
504 | c->cpu_core_id); | 504 | c->cpu_core_id); |
505 | printed = 1; | 505 | printed = 1; |
506 | } | 506 | } |
507 | #endif | 507 | #endif |
508 | } | 508 | } |
509 | 509 | ||
510 | static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c) | 510 | static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c) |
511 | { | 511 | { |
512 | char *v = c->x86_vendor_id; | 512 | char *v = c->x86_vendor_id; |
513 | int i; | 513 | int i; |
514 | 514 | ||
515 | for (i = 0; i < X86_VENDOR_NUM; i++) { | 515 | for (i = 0; i < X86_VENDOR_NUM; i++) { |
516 | if (!cpu_devs[i]) | 516 | if (!cpu_devs[i]) |
517 | break; | 517 | break; |
518 | 518 | ||
519 | if (!strcmp(v, cpu_devs[i]->c_ident[0]) || | 519 | if (!strcmp(v, cpu_devs[i]->c_ident[0]) || |
520 | (cpu_devs[i]->c_ident[1] && | 520 | (cpu_devs[i]->c_ident[1] && |
521 | !strcmp(v, cpu_devs[i]->c_ident[1]))) { | 521 | !strcmp(v, cpu_devs[i]->c_ident[1]))) { |
522 | 522 | ||
523 | this_cpu = cpu_devs[i]; | 523 | this_cpu = cpu_devs[i]; |
524 | c->x86_vendor = this_cpu->c_x86_vendor; | 524 | c->x86_vendor = this_cpu->c_x86_vendor; |
525 | return; | 525 | return; |
526 | } | 526 | } |
527 | } | 527 | } |
528 | 528 | ||
529 | printk_once(KERN_ERR | 529 | printk_once(KERN_ERR |
530 | "CPU: vendor_id '%s' unknown, using generic init.\n" \ | 530 | "CPU: vendor_id '%s' unknown, using generic init.\n" \ |
531 | "CPU: Your system may be unstable.\n", v); | 531 | "CPU: Your system may be unstable.\n", v); |
532 | 532 | ||
533 | c->x86_vendor = X86_VENDOR_UNKNOWN; | 533 | c->x86_vendor = X86_VENDOR_UNKNOWN; |
534 | this_cpu = &default_cpu; | 534 | this_cpu = &default_cpu; |
535 | } | 535 | } |
536 | 536 | ||
537 | void __cpuinit cpu_detect(struct cpuinfo_x86 *c) | 537 | void __cpuinit cpu_detect(struct cpuinfo_x86 *c) |
538 | { | 538 | { |
539 | /* Get vendor name */ | 539 | /* Get vendor name */ |
540 | cpuid(0x00000000, (unsigned int *)&c->cpuid_level, | 540 | cpuid(0x00000000, (unsigned int *)&c->cpuid_level, |
541 | (unsigned int *)&c->x86_vendor_id[0], | 541 | (unsigned int *)&c->x86_vendor_id[0], |
542 | (unsigned int *)&c->x86_vendor_id[8], | 542 | (unsigned int *)&c->x86_vendor_id[8], |
543 | (unsigned int *)&c->x86_vendor_id[4]); | 543 | (unsigned int *)&c->x86_vendor_id[4]); |
544 | 544 | ||
545 | c->x86 = 4; | 545 | c->x86 = 4; |
546 | /* Intel-defined flags: level 0x00000001 */ | 546 | /* Intel-defined flags: level 0x00000001 */ |
547 | if (c->cpuid_level >= 0x00000001) { | 547 | if (c->cpuid_level >= 0x00000001) { |
548 | u32 junk, tfms, cap0, misc; | 548 | u32 junk, tfms, cap0, misc; |
549 | 549 | ||
550 | cpuid(0x00000001, &tfms, &misc, &junk, &cap0); | 550 | cpuid(0x00000001, &tfms, &misc, &junk, &cap0); |
551 | c->x86 = (tfms >> 8) & 0xf; | 551 | c->x86 = (tfms >> 8) & 0xf; |
552 | c->x86_model = (tfms >> 4) & 0xf; | 552 | c->x86_model = (tfms >> 4) & 0xf; |
553 | c->x86_mask = tfms & 0xf; | 553 | c->x86_mask = tfms & 0xf; |
554 | 554 | ||
555 | if (c->x86 == 0xf) | 555 | if (c->x86 == 0xf) |
556 | c->x86 += (tfms >> 20) & 0xff; | 556 | c->x86 += (tfms >> 20) & 0xff; |
557 | if (c->x86 >= 0x6) | 557 | if (c->x86 >= 0x6) |
558 | c->x86_model += ((tfms >> 16) & 0xf) << 4; | 558 | c->x86_model += ((tfms >> 16) & 0xf) << 4; |
559 | 559 | ||
560 | if (cap0 & (1<<19)) { | 560 | if (cap0 & (1<<19)) { |
561 | c->x86_clflush_size = ((misc >> 8) & 0xff) * 8; | 561 | c->x86_clflush_size = ((misc >> 8) & 0xff) * 8; |
562 | c->x86_cache_alignment = c->x86_clflush_size; | 562 | c->x86_cache_alignment = c->x86_clflush_size; |
563 | } | 563 | } |
564 | } | 564 | } |
565 | } | 565 | } |
566 | 566 | ||
567 | void __cpuinit get_cpu_cap(struct cpuinfo_x86 *c) | 567 | void __cpuinit get_cpu_cap(struct cpuinfo_x86 *c) |
568 | { | 568 | { |
569 | u32 tfms, xlvl; | 569 | u32 tfms, xlvl; |
570 | u32 ebx; | 570 | u32 ebx; |
571 | 571 | ||
572 | /* Intel-defined flags: level 0x00000001 */ | 572 | /* Intel-defined flags: level 0x00000001 */ |
573 | if (c->cpuid_level >= 0x00000001) { | 573 | if (c->cpuid_level >= 0x00000001) { |
574 | u32 capability, excap; | 574 | u32 capability, excap; |
575 | 575 | ||
576 | cpuid(0x00000001, &tfms, &ebx, &excap, &capability); | 576 | cpuid(0x00000001, &tfms, &ebx, &excap, &capability); |
577 | c->x86_capability[0] = capability; | 577 | c->x86_capability[0] = capability; |
578 | c->x86_capability[4] = excap; | 578 | c->x86_capability[4] = excap; |
579 | } | 579 | } |
580 | 580 | ||
581 | /* Additional Intel-defined flags: level 0x00000007 */ | 581 | /* Additional Intel-defined flags: level 0x00000007 */ |
582 | if (c->cpuid_level >= 0x00000007) { | 582 | if (c->cpuid_level >= 0x00000007) { |
583 | u32 eax, ebx, ecx, edx; | 583 | u32 eax, ebx, ecx, edx; |
584 | 584 | ||
585 | cpuid_count(0x00000007, 0, &eax, &ebx, &ecx, &edx); | 585 | cpuid_count(0x00000007, 0, &eax, &ebx, &ecx, &edx); |
586 | 586 | ||
587 | c->x86_capability[9] = ebx; | 587 | c->x86_capability[9] = ebx; |
588 | } | 588 | } |
589 | 589 | ||
590 | /* AMD-defined flags: level 0x80000001 */ | 590 | /* AMD-defined flags: level 0x80000001 */ |
591 | xlvl = cpuid_eax(0x80000000); | 591 | xlvl = cpuid_eax(0x80000000); |
592 | c->extended_cpuid_level = xlvl; | 592 | c->extended_cpuid_level = xlvl; |
593 | 593 | ||
594 | if ((xlvl & 0xffff0000) == 0x80000000) { | 594 | if ((xlvl & 0xffff0000) == 0x80000000) { |
595 | if (xlvl >= 0x80000001) { | 595 | if (xlvl >= 0x80000001) { |
596 | c->x86_capability[1] = cpuid_edx(0x80000001); | 596 | c->x86_capability[1] = cpuid_edx(0x80000001); |
597 | c->x86_capability[6] = cpuid_ecx(0x80000001); | 597 | c->x86_capability[6] = cpuid_ecx(0x80000001); |
598 | } | 598 | } |
599 | } | 599 | } |
600 | 600 | ||
601 | if (c->extended_cpuid_level >= 0x80000008) { | 601 | if (c->extended_cpuid_level >= 0x80000008) { |
602 | u32 eax = cpuid_eax(0x80000008); | 602 | u32 eax = cpuid_eax(0x80000008); |
603 | 603 | ||
604 | c->x86_virt_bits = (eax >> 8) & 0xff; | 604 | c->x86_virt_bits = (eax >> 8) & 0xff; |
605 | c->x86_phys_bits = eax & 0xff; | 605 | c->x86_phys_bits = eax & 0xff; |
606 | } | 606 | } |
607 | #ifdef CONFIG_X86_32 | 607 | #ifdef CONFIG_X86_32 |
608 | else if (cpu_has(c, X86_FEATURE_PAE) || cpu_has(c, X86_FEATURE_PSE36)) | 608 | else if (cpu_has(c, X86_FEATURE_PAE) || cpu_has(c, X86_FEATURE_PSE36)) |
609 | c->x86_phys_bits = 36; | 609 | c->x86_phys_bits = 36; |
610 | #endif | 610 | #endif |
611 | 611 | ||
612 | if (c->extended_cpuid_level >= 0x80000007) | 612 | if (c->extended_cpuid_level >= 0x80000007) |
613 | c->x86_power = cpuid_edx(0x80000007); | 613 | c->x86_power = cpuid_edx(0x80000007); |
614 | 614 | ||
615 | init_scattered_cpuid_features(c); | 615 | init_scattered_cpuid_features(c); |
616 | } | 616 | } |
617 | 617 | ||
618 | static void __cpuinit identify_cpu_without_cpuid(struct cpuinfo_x86 *c) | 618 | static void __cpuinit identify_cpu_without_cpuid(struct cpuinfo_x86 *c) |
619 | { | 619 | { |
620 | #ifdef CONFIG_X86_32 | 620 | #ifdef CONFIG_X86_32 |
621 | int i; | 621 | int i; |
622 | 622 | ||
623 | /* | 623 | /* |
624 | * First of all, decide if this is a 486 or higher | 624 | * First of all, decide if this is a 486 or higher |
625 | * It's a 486 if we can modify the AC flag | 625 | * It's a 486 if we can modify the AC flag |
626 | */ | 626 | */ |
627 | if (flag_is_changeable_p(X86_EFLAGS_AC)) | 627 | if (flag_is_changeable_p(X86_EFLAGS_AC)) |
628 | c->x86 = 4; | 628 | c->x86 = 4; |
629 | else | 629 | else |
630 | c->x86 = 3; | 630 | c->x86 = 3; |
631 | 631 | ||
632 | for (i = 0; i < X86_VENDOR_NUM; i++) | 632 | for (i = 0; i < X86_VENDOR_NUM; i++) |
633 | if (cpu_devs[i] && cpu_devs[i]->c_identify) { | 633 | if (cpu_devs[i] && cpu_devs[i]->c_identify) { |
634 | c->x86_vendor_id[0] = 0; | 634 | c->x86_vendor_id[0] = 0; |
635 | cpu_devs[i]->c_identify(c); | 635 | cpu_devs[i]->c_identify(c); |
636 | if (c->x86_vendor_id[0]) { | 636 | if (c->x86_vendor_id[0]) { |
637 | get_cpu_vendor(c); | 637 | get_cpu_vendor(c); |
638 | break; | 638 | break; |
639 | } | 639 | } |
640 | } | 640 | } |
641 | #endif | 641 | #endif |
642 | } | 642 | } |
643 | 643 | ||
644 | /* | 644 | /* |
645 | * Do minimum CPU detection early. | 645 | * Do minimum CPU detection early. |
646 | * Fields really needed: vendor, cpuid_level, family, model, mask, | 646 | * Fields really needed: vendor, cpuid_level, family, model, mask, |
647 | * cache alignment. | 647 | * cache alignment. |
648 | * The others are not touched to avoid unwanted side effects. | 648 | * The others are not touched to avoid unwanted side effects. |
649 | * | 649 | * |
650 | * WARNING: this function is only called on the BP. Don't add code here | 650 | * WARNING: this function is only called on the BP. Don't add code here |
651 | * that is supposed to run on all CPUs. | 651 | * that is supposed to run on all CPUs. |
652 | */ | 652 | */ |
653 | static void __init early_identify_cpu(struct cpuinfo_x86 *c) | 653 | static void __init early_identify_cpu(struct cpuinfo_x86 *c) |
654 | { | 654 | { |
655 | #ifdef CONFIG_X86_64 | 655 | #ifdef CONFIG_X86_64 |
656 | c->x86_clflush_size = 64; | 656 | c->x86_clflush_size = 64; |
657 | c->x86_phys_bits = 36; | 657 | c->x86_phys_bits = 36; |
658 | c->x86_virt_bits = 48; | 658 | c->x86_virt_bits = 48; |
659 | #else | 659 | #else |
660 | c->x86_clflush_size = 32; | 660 | c->x86_clflush_size = 32; |
661 | c->x86_phys_bits = 32; | 661 | c->x86_phys_bits = 32; |
662 | c->x86_virt_bits = 32; | 662 | c->x86_virt_bits = 32; |
663 | #endif | 663 | #endif |
664 | c->x86_cache_alignment = c->x86_clflush_size; | 664 | c->x86_cache_alignment = c->x86_clflush_size; |
665 | 665 | ||
666 | memset(&c->x86_capability, 0, sizeof c->x86_capability); | 666 | memset(&c->x86_capability, 0, sizeof c->x86_capability); |
667 | c->extended_cpuid_level = 0; | 667 | c->extended_cpuid_level = 0; |
668 | 668 | ||
669 | if (!have_cpuid_p()) | 669 | if (!have_cpuid_p()) |
670 | identify_cpu_without_cpuid(c); | 670 | identify_cpu_without_cpuid(c); |
671 | 671 | ||
672 | /* cyrix could have cpuid enabled via c_identify()*/ | 672 | /* cyrix could have cpuid enabled via c_identify()*/ |
673 | if (!have_cpuid_p()) | 673 | if (!have_cpuid_p()) |
674 | return; | 674 | return; |
675 | 675 | ||
676 | cpu_detect(c); | 676 | cpu_detect(c); |
677 | 677 | ||
678 | get_cpu_vendor(c); | 678 | get_cpu_vendor(c); |
679 | 679 | ||
680 | get_cpu_cap(c); | 680 | get_cpu_cap(c); |
681 | 681 | ||
682 | if (this_cpu->c_early_init) | 682 | if (this_cpu->c_early_init) |
683 | this_cpu->c_early_init(c); | 683 | this_cpu->c_early_init(c); |
684 | 684 | ||
685 | #ifdef CONFIG_SMP | 685 | #ifdef CONFIG_SMP |
686 | c->cpu_index = 0; | 686 | c->cpu_index = 0; |
687 | #endif | 687 | #endif |
688 | filter_cpuid_features(c, false); | 688 | filter_cpuid_features(c, false); |
689 | 689 | ||
690 | setup_smep(c); | 690 | setup_smep(c); |
691 | } | 691 | } |
692 | 692 | ||
693 | void __init early_cpu_init(void) | 693 | void __init early_cpu_init(void) |
694 | { | 694 | { |
695 | const struct cpu_dev *const *cdev; | 695 | const struct cpu_dev *const *cdev; |
696 | int count = 0; | 696 | int count = 0; |
697 | 697 | ||
698 | #ifdef CONFIG_PROCESSOR_SELECT | 698 | #ifdef CONFIG_PROCESSOR_SELECT |
699 | printk(KERN_INFO "KERNEL supported cpus:\n"); | 699 | printk(KERN_INFO "KERNEL supported cpus:\n"); |
700 | #endif | 700 | #endif |
701 | 701 | ||
702 | for (cdev = __x86_cpu_dev_start; cdev < __x86_cpu_dev_end; cdev++) { | 702 | for (cdev = __x86_cpu_dev_start; cdev < __x86_cpu_dev_end; cdev++) { |
703 | const struct cpu_dev *cpudev = *cdev; | 703 | const struct cpu_dev *cpudev = *cdev; |
704 | 704 | ||
705 | if (count >= X86_VENDOR_NUM) | 705 | if (count >= X86_VENDOR_NUM) |
706 | break; | 706 | break; |
707 | cpu_devs[count] = cpudev; | 707 | cpu_devs[count] = cpudev; |
708 | count++; | 708 | count++; |
709 | 709 | ||
710 | #ifdef CONFIG_PROCESSOR_SELECT | 710 | #ifdef CONFIG_PROCESSOR_SELECT |
711 | { | 711 | { |
712 | unsigned int j; | 712 | unsigned int j; |
713 | 713 | ||
714 | for (j = 0; j < 2; j++) { | 714 | for (j = 0; j < 2; j++) { |
715 | if (!cpudev->c_ident[j]) | 715 | if (!cpudev->c_ident[j]) |
716 | continue; | 716 | continue; |
717 | printk(KERN_INFO " %s %s\n", cpudev->c_vendor, | 717 | printk(KERN_INFO " %s %s\n", cpudev->c_vendor, |
718 | cpudev->c_ident[j]); | 718 | cpudev->c_ident[j]); |
719 | } | 719 | } |
720 | } | 720 | } |
721 | #endif | 721 | #endif |
722 | } | 722 | } |
723 | early_identify_cpu(&boot_cpu_data); | 723 | early_identify_cpu(&boot_cpu_data); |
724 | } | 724 | } |
725 | 725 | ||
726 | /* | 726 | /* |
727 | * The NOPL instruction is supposed to exist on all CPUs of family >= 6; | 727 | * The NOPL instruction is supposed to exist on all CPUs of family >= 6; |
728 | * unfortunately, that's not true in practice because of early VIA | 728 | * unfortunately, that's not true in practice because of early VIA |
729 | * chips and (more importantly) broken virtualizers that are not easy | 729 | * chips and (more importantly) broken virtualizers that are not easy |
730 | * to detect. In the latter case it doesn't even *fail* reliably, so | 730 | * to detect. In the latter case it doesn't even *fail* reliably, so |
731 | * probing for it doesn't even work. Disable it completely on 32-bit | 731 | * probing for it doesn't even work. Disable it completely on 32-bit |
732 | * unless we can find a reliable way to detect all the broken cases. | 732 | * unless we can find a reliable way to detect all the broken cases. |
733 | * Enable it explicitly on 64-bit for non-constant inputs of cpu_has(). | 733 | * Enable it explicitly on 64-bit for non-constant inputs of cpu_has(). |
734 | */ | 734 | */ |
735 | static void __cpuinit detect_nopl(struct cpuinfo_x86 *c) | 735 | static void __cpuinit detect_nopl(struct cpuinfo_x86 *c) |
736 | { | 736 | { |
737 | #ifdef CONFIG_X86_32 | 737 | #ifdef CONFIG_X86_32 |
738 | clear_cpu_cap(c, X86_FEATURE_NOPL); | 738 | clear_cpu_cap(c, X86_FEATURE_NOPL); |
739 | #else | 739 | #else |
740 | set_cpu_cap(c, X86_FEATURE_NOPL); | 740 | set_cpu_cap(c, X86_FEATURE_NOPL); |
741 | #endif | 741 | #endif |
742 | } | 742 | } |
743 | 743 | ||
744 | static void __cpuinit generic_identify(struct cpuinfo_x86 *c) | 744 | static void __cpuinit generic_identify(struct cpuinfo_x86 *c) |
745 | { | 745 | { |
746 | c->extended_cpuid_level = 0; | 746 | c->extended_cpuid_level = 0; |
747 | 747 | ||
748 | if (!have_cpuid_p()) | 748 | if (!have_cpuid_p()) |
749 | identify_cpu_without_cpuid(c); | 749 | identify_cpu_without_cpuid(c); |
750 | 750 | ||
751 | /* cyrix could have cpuid enabled via c_identify()*/ | 751 | /* cyrix could have cpuid enabled via c_identify()*/ |
752 | if (!have_cpuid_p()) | 752 | if (!have_cpuid_p()) |
753 | return; | 753 | return; |
754 | 754 | ||
755 | cpu_detect(c); | 755 | cpu_detect(c); |
756 | 756 | ||
757 | get_cpu_vendor(c); | 757 | get_cpu_vendor(c); |
758 | 758 | ||
759 | get_cpu_cap(c); | 759 | get_cpu_cap(c); |
760 | 760 | ||
761 | if (c->cpuid_level >= 0x00000001) { | 761 | if (c->cpuid_level >= 0x00000001) { |
762 | c->initial_apicid = (cpuid_ebx(1) >> 24) & 0xFF; | 762 | c->initial_apicid = (cpuid_ebx(1) >> 24) & 0xFF; |
763 | #ifdef CONFIG_X86_32 | 763 | #ifdef CONFIG_X86_32 |
764 | # ifdef CONFIG_X86_HT | 764 | # ifdef CONFIG_X86_HT |
765 | c->apicid = apic->phys_pkg_id(c->initial_apicid, 0); | 765 | c->apicid = apic->phys_pkg_id(c->initial_apicid, 0); |
766 | # else | 766 | # else |
767 | c->apicid = c->initial_apicid; | 767 | c->apicid = c->initial_apicid; |
768 | # endif | 768 | # endif |
769 | #endif | 769 | #endif |
770 | 770 | ||
771 | #ifdef CONFIG_X86_HT | 771 | #ifdef CONFIG_X86_HT |
772 | c->phys_proc_id = c->initial_apicid; | 772 | c->phys_proc_id = c->initial_apicid; |
773 | #endif | 773 | #endif |
774 | } | 774 | } |
775 | 775 | ||
776 | setup_smep(c); | 776 | setup_smep(c); |
777 | 777 | ||
778 | get_model_name(c); /* Default name */ | 778 | get_model_name(c); /* Default name */ |
779 | 779 | ||
780 | detect_nopl(c); | 780 | detect_nopl(c); |
781 | } | 781 | } |
782 | 782 | ||
783 | /* | 783 | /* |
784 | * This does the hard work of actually picking apart the CPU stuff... | 784 | * This does the hard work of actually picking apart the CPU stuff... |
785 | */ | 785 | */ |
786 | static void __cpuinit identify_cpu(struct cpuinfo_x86 *c) | 786 | static void __cpuinit identify_cpu(struct cpuinfo_x86 *c) |
787 | { | 787 | { |
788 | int i; | 788 | int i; |
789 | 789 | ||
790 | c->loops_per_jiffy = loops_per_jiffy; | 790 | c->loops_per_jiffy = loops_per_jiffy; |
791 | c->x86_cache_size = -1; | 791 | c->x86_cache_size = -1; |
792 | c->x86_vendor = X86_VENDOR_UNKNOWN; | 792 | c->x86_vendor = X86_VENDOR_UNKNOWN; |
793 | c->x86_model = c->x86_mask = 0; /* So far unknown... */ | 793 | c->x86_model = c->x86_mask = 0; /* So far unknown... */ |
794 | c->x86_vendor_id[0] = '\0'; /* Unset */ | 794 | c->x86_vendor_id[0] = '\0'; /* Unset */ |
795 | c->x86_model_id[0] = '\0'; /* Unset */ | 795 | c->x86_model_id[0] = '\0'; /* Unset */ |
796 | c->x86_max_cores = 1; | 796 | c->x86_max_cores = 1; |
797 | c->x86_coreid_bits = 0; | 797 | c->x86_coreid_bits = 0; |
798 | #ifdef CONFIG_X86_64 | 798 | #ifdef CONFIG_X86_64 |
799 | c->x86_clflush_size = 64; | 799 | c->x86_clflush_size = 64; |
800 | c->x86_phys_bits = 36; | 800 | c->x86_phys_bits = 36; |
801 | c->x86_virt_bits = 48; | 801 | c->x86_virt_bits = 48; |
802 | #else | 802 | #else |
803 | c->cpuid_level = -1; /* CPUID not detected */ | 803 | c->cpuid_level = -1; /* CPUID not detected */ |
804 | c->x86_clflush_size = 32; | 804 | c->x86_clflush_size = 32; |
805 | c->x86_phys_bits = 32; | 805 | c->x86_phys_bits = 32; |
806 | c->x86_virt_bits = 32; | 806 | c->x86_virt_bits = 32; |
807 | #endif | 807 | #endif |
808 | c->x86_cache_alignment = c->x86_clflush_size; | 808 | c->x86_cache_alignment = c->x86_clflush_size; |
809 | memset(&c->x86_capability, 0, sizeof c->x86_capability); | 809 | memset(&c->x86_capability, 0, sizeof c->x86_capability); |
810 | 810 | ||
811 | generic_identify(c); | 811 | generic_identify(c); |
812 | 812 | ||
813 | if (this_cpu->c_identify) | 813 | if (this_cpu->c_identify) |
814 | this_cpu->c_identify(c); | 814 | this_cpu->c_identify(c); |
815 | 815 | ||
816 | /* Clear/Set all flags overriden by options, after probe */ | 816 | /* Clear/Set all flags overriden by options, after probe */ |
817 | for (i = 0; i < NCAPINTS; i++) { | 817 | for (i = 0; i < NCAPINTS; i++) { |
818 | c->x86_capability[i] &= ~cpu_caps_cleared[i]; | 818 | c->x86_capability[i] &= ~cpu_caps_cleared[i]; |
819 | c->x86_capability[i] |= cpu_caps_set[i]; | 819 | c->x86_capability[i] |= cpu_caps_set[i]; |
820 | } | 820 | } |
821 | 821 | ||
822 | #ifdef CONFIG_X86_64 | 822 | #ifdef CONFIG_X86_64 |
823 | c->apicid = apic->phys_pkg_id(c->initial_apicid, 0); | 823 | c->apicid = apic->phys_pkg_id(c->initial_apicid, 0); |
824 | #endif | 824 | #endif |
825 | 825 | ||
826 | /* | 826 | /* |
827 | * Vendor-specific initialization. In this section we | 827 | * Vendor-specific initialization. In this section we |
828 | * canonicalize the feature flags, meaning if there are | 828 | * canonicalize the feature flags, meaning if there are |
829 | * features a certain CPU supports which CPUID doesn't | 829 | * features a certain CPU supports which CPUID doesn't |
830 | * tell us, CPUID claiming incorrect flags, or other bugs, | 830 | * tell us, CPUID claiming incorrect flags, or other bugs, |
831 | * we handle them here. | 831 | * we handle them here. |
832 | * | 832 | * |
833 | * At the end of this section, c->x86_capability better | 833 | * At the end of this section, c->x86_capability better |
834 | * indicate the features this CPU genuinely supports! | 834 | * indicate the features this CPU genuinely supports! |
835 | */ | 835 | */ |
836 | if (this_cpu->c_init) | 836 | if (this_cpu->c_init) |
837 | this_cpu->c_init(c); | 837 | this_cpu->c_init(c); |
838 | 838 | ||
839 | /* Disable the PN if appropriate */ | 839 | /* Disable the PN if appropriate */ |
840 | squash_the_stupid_serial_number(c); | 840 | squash_the_stupid_serial_number(c); |
841 | 841 | ||
842 | /* | 842 | /* |
843 | * The vendor-specific functions might have changed features. | 843 | * The vendor-specific functions might have changed features. |
844 | * Now we do "generic changes." | 844 | * Now we do "generic changes." |
845 | */ | 845 | */ |
846 | 846 | ||
847 | /* Filter out anything that depends on CPUID levels we don't have */ | 847 | /* Filter out anything that depends on CPUID levels we don't have */ |
848 | filter_cpuid_features(c, true); | 848 | filter_cpuid_features(c, true); |
849 | 849 | ||
850 | /* If the model name is still unset, do table lookup. */ | 850 | /* If the model name is still unset, do table lookup. */ |
851 | if (!c->x86_model_id[0]) { | 851 | if (!c->x86_model_id[0]) { |
852 | const char *p; | 852 | const char *p; |
853 | p = table_lookup_model(c); | 853 | p = table_lookup_model(c); |
854 | if (p) | 854 | if (p) |
855 | strcpy(c->x86_model_id, p); | 855 | strcpy(c->x86_model_id, p); |
856 | else | 856 | else |
857 | /* Last resort... */ | 857 | /* Last resort... */ |
858 | sprintf(c->x86_model_id, "%02x/%02x", | 858 | sprintf(c->x86_model_id, "%02x/%02x", |
859 | c->x86, c->x86_model); | 859 | c->x86, c->x86_model); |
860 | } | 860 | } |
861 | 861 | ||
862 | #ifdef CONFIG_X86_64 | 862 | #ifdef CONFIG_X86_64 |
863 | detect_ht(c); | 863 | detect_ht(c); |
864 | #endif | 864 | #endif |
865 | 865 | ||
866 | init_hypervisor(c); | 866 | init_hypervisor(c); |
867 | 867 | ||
868 | /* | 868 | /* |
869 | * Clear/Set all flags overriden by options, need do it | 869 | * Clear/Set all flags overriden by options, need do it |
870 | * before following smp all cpus cap AND. | 870 | * before following smp all cpus cap AND. |
871 | */ | 871 | */ |
872 | for (i = 0; i < NCAPINTS; i++) { | 872 | for (i = 0; i < NCAPINTS; i++) { |
873 | c->x86_capability[i] &= ~cpu_caps_cleared[i]; | 873 | c->x86_capability[i] &= ~cpu_caps_cleared[i]; |
874 | c->x86_capability[i] |= cpu_caps_set[i]; | 874 | c->x86_capability[i] |= cpu_caps_set[i]; |
875 | } | 875 | } |
876 | 876 | ||
877 | /* | 877 | /* |
878 | * On SMP, boot_cpu_data holds the common feature set between | 878 | * On SMP, boot_cpu_data holds the common feature set between |
879 | * all CPUs; so make sure that we indicate which features are | 879 | * all CPUs; so make sure that we indicate which features are |
880 | * common between the CPUs. The first time this routine gets | 880 | * common between the CPUs. The first time this routine gets |
881 | * executed, c == &boot_cpu_data. | 881 | * executed, c == &boot_cpu_data. |
882 | */ | 882 | */ |
883 | if (c != &boot_cpu_data) { | 883 | if (c != &boot_cpu_data) { |
884 | /* AND the already accumulated flags with these */ | 884 | /* AND the already accumulated flags with these */ |
885 | for (i = 0; i < NCAPINTS; i++) | 885 | for (i = 0; i < NCAPINTS; i++) |
886 | boot_cpu_data.x86_capability[i] &= c->x86_capability[i]; | 886 | boot_cpu_data.x86_capability[i] &= c->x86_capability[i]; |
887 | } | 887 | } |
888 | 888 | ||
889 | /* Init Machine Check Exception if available. */ | 889 | /* Init Machine Check Exception if available. */ |
890 | mcheck_cpu_init(c); | 890 | mcheck_cpu_init(c); |
891 | 891 | ||
892 | select_idle_routine(c); | 892 | select_idle_routine(c); |
893 | 893 | ||
894 | #ifdef CONFIG_NUMA | 894 | #ifdef CONFIG_NUMA |
895 | numa_add_cpu(smp_processor_id()); | 895 | numa_add_cpu(smp_processor_id()); |
896 | #endif | 896 | #endif |
897 | } | 897 | } |
898 | 898 | ||
899 | #ifdef CONFIG_X86_64 | 899 | #ifdef CONFIG_X86_64 |
900 | static void vgetcpu_set_mode(void) | 900 | static void vgetcpu_set_mode(void) |
901 | { | 901 | { |
902 | if (cpu_has(&boot_cpu_data, X86_FEATURE_RDTSCP)) | 902 | if (cpu_has(&boot_cpu_data, X86_FEATURE_RDTSCP)) |
903 | vgetcpu_mode = VGETCPU_RDTSCP; | 903 | vgetcpu_mode = VGETCPU_RDTSCP; |
904 | else | 904 | else |
905 | vgetcpu_mode = VGETCPU_LSL; | 905 | vgetcpu_mode = VGETCPU_LSL; |
906 | } | 906 | } |
907 | #endif | 907 | #endif |
908 | 908 | ||
909 | void __init identify_boot_cpu(void) | 909 | void __init identify_boot_cpu(void) |
910 | { | 910 | { |
911 | identify_cpu(&boot_cpu_data); | 911 | identify_cpu(&boot_cpu_data); |
912 | init_c1e_mask(); | 912 | init_c1e_mask(); |
913 | #ifdef CONFIG_X86_32 | 913 | #ifdef CONFIG_X86_32 |
914 | sysenter_setup(); | 914 | sysenter_setup(); |
915 | enable_sep_cpu(); | 915 | enable_sep_cpu(); |
916 | #else | 916 | #else |
917 | vgetcpu_set_mode(); | 917 | vgetcpu_set_mode(); |
918 | #endif | 918 | #endif |
919 | } | 919 | } |
920 | 920 | ||
921 | void __cpuinit identify_secondary_cpu(struct cpuinfo_x86 *c) | 921 | void __cpuinit identify_secondary_cpu(struct cpuinfo_x86 *c) |
922 | { | 922 | { |
923 | BUG_ON(c == &boot_cpu_data); | 923 | BUG_ON(c == &boot_cpu_data); |
924 | identify_cpu(c); | 924 | identify_cpu(c); |
925 | #ifdef CONFIG_X86_32 | 925 | #ifdef CONFIG_X86_32 |
926 | enable_sep_cpu(); | 926 | enable_sep_cpu(); |
927 | #endif | 927 | #endif |
928 | mtrr_ap_init(); | 928 | mtrr_ap_init(); |
929 | } | 929 | } |
930 | 930 | ||
931 | struct msr_range { | 931 | struct msr_range { |
932 | unsigned min; | 932 | unsigned min; |
933 | unsigned max; | 933 | unsigned max; |
934 | }; | 934 | }; |
935 | 935 | ||
936 | static const struct msr_range msr_range_array[] __cpuinitconst = { | 936 | static const struct msr_range msr_range_array[] __cpuinitconst = { |
937 | { 0x00000000, 0x00000418}, | 937 | { 0x00000000, 0x00000418}, |
938 | { 0xc0000000, 0xc000040b}, | 938 | { 0xc0000000, 0xc000040b}, |
939 | { 0xc0010000, 0xc0010142}, | 939 | { 0xc0010000, 0xc0010142}, |
940 | { 0xc0011000, 0xc001103b}, | 940 | { 0xc0011000, 0xc001103b}, |
941 | }; | 941 | }; |
942 | 942 | ||
943 | static void __cpuinit print_cpu_msr(void) | 943 | static void __cpuinit print_cpu_msr(void) |
944 | { | 944 | { |
945 | unsigned index_min, index_max; | 945 | unsigned index_min, index_max; |
946 | unsigned index; | 946 | unsigned index; |
947 | u64 val; | 947 | u64 val; |
948 | int i; | 948 | int i; |
949 | 949 | ||
950 | for (i = 0; i < ARRAY_SIZE(msr_range_array); i++) { | 950 | for (i = 0; i < ARRAY_SIZE(msr_range_array); i++) { |
951 | index_min = msr_range_array[i].min; | 951 | index_min = msr_range_array[i].min; |
952 | index_max = msr_range_array[i].max; | 952 | index_max = msr_range_array[i].max; |
953 | 953 | ||
954 | for (index = index_min; index < index_max; index++) { | 954 | for (index = index_min; index < index_max; index++) { |
955 | if (rdmsrl_amd_safe(index, &val)) | 955 | if (rdmsrl_amd_safe(index, &val)) |
956 | continue; | 956 | continue; |
957 | printk(KERN_INFO " MSR%08x: %016llx\n", index, val); | 957 | printk(KERN_INFO " MSR%08x: %016llx\n", index, val); |
958 | } | 958 | } |
959 | } | 959 | } |
960 | } | 960 | } |
961 | 961 | ||
962 | static int show_msr __cpuinitdata; | 962 | static int show_msr __cpuinitdata; |
963 | 963 | ||
964 | static __init int setup_show_msr(char *arg) | 964 | static __init int setup_show_msr(char *arg) |
965 | { | 965 | { |
966 | int num; | 966 | int num; |
967 | 967 | ||
968 | get_option(&arg, &num); | 968 | get_option(&arg, &num); |
969 | 969 | ||
970 | if (num > 0) | 970 | if (num > 0) |
971 | show_msr = num; | 971 | show_msr = num; |
972 | return 1; | 972 | return 1; |
973 | } | 973 | } |
974 | __setup("show_msr=", setup_show_msr); | 974 | __setup("show_msr=", setup_show_msr); |
975 | 975 | ||
976 | static __init int setup_noclflush(char *arg) | 976 | static __init int setup_noclflush(char *arg) |
977 | { | 977 | { |
978 | setup_clear_cpu_cap(X86_FEATURE_CLFLSH); | 978 | setup_clear_cpu_cap(X86_FEATURE_CLFLSH); |
979 | return 1; | 979 | return 1; |
980 | } | 980 | } |
981 | __setup("noclflush", setup_noclflush); | 981 | __setup("noclflush", setup_noclflush); |
982 | 982 | ||
983 | void __cpuinit print_cpu_info(struct cpuinfo_x86 *c) | 983 | void __cpuinit print_cpu_info(struct cpuinfo_x86 *c) |
984 | { | 984 | { |
985 | const char *vendor = NULL; | 985 | const char *vendor = NULL; |
986 | 986 | ||
987 | if (c->x86_vendor < X86_VENDOR_NUM) { | 987 | if (c->x86_vendor < X86_VENDOR_NUM) { |
988 | vendor = this_cpu->c_vendor; | 988 | vendor = this_cpu->c_vendor; |
989 | } else { | 989 | } else { |
990 | if (c->cpuid_level >= 0) | 990 | if (c->cpuid_level >= 0) |
991 | vendor = c->x86_vendor_id; | 991 | vendor = c->x86_vendor_id; |
992 | } | 992 | } |
993 | 993 | ||
994 | if (vendor && !strstr(c->x86_model_id, vendor)) | 994 | if (vendor && !strstr(c->x86_model_id, vendor)) |
995 | printk(KERN_CONT "%s ", vendor); | 995 | printk(KERN_CONT "%s ", vendor); |
996 | 996 | ||
997 | if (c->x86_model_id[0]) | 997 | if (c->x86_model_id[0]) |
998 | printk(KERN_CONT "%s", c->x86_model_id); | 998 | printk(KERN_CONT "%s", c->x86_model_id); |
999 | else | 999 | else |
1000 | printk(KERN_CONT "%d86", c->x86); | 1000 | printk(KERN_CONT "%d86", c->x86); |
1001 | 1001 | ||
1002 | if (c->x86_mask || c->cpuid_level >= 0) | 1002 | if (c->x86_mask || c->cpuid_level >= 0) |
1003 | printk(KERN_CONT " stepping %02x\n", c->x86_mask); | 1003 | printk(KERN_CONT " stepping %02x\n", c->x86_mask); |
1004 | else | 1004 | else |
1005 | printk(KERN_CONT "\n"); | 1005 | printk(KERN_CONT "\n"); |
1006 | 1006 | ||
1007 | #ifdef CONFIG_SMP | 1007 | #ifdef CONFIG_SMP |
1008 | if (c->cpu_index < show_msr) | 1008 | if (c->cpu_index < show_msr) |
1009 | print_cpu_msr(); | 1009 | print_cpu_msr(); |
1010 | #else | 1010 | #else |
1011 | if (show_msr) | 1011 | if (show_msr) |
1012 | print_cpu_msr(); | 1012 | print_cpu_msr(); |
1013 | #endif | 1013 | #endif |
1014 | } | 1014 | } |
1015 | 1015 | ||
1016 | static __init int setup_disablecpuid(char *arg) | 1016 | static __init int setup_disablecpuid(char *arg) |
1017 | { | 1017 | { |
1018 | int bit; | 1018 | int bit; |
1019 | 1019 | ||
1020 | if (get_option(&arg, &bit) && bit < NCAPINTS*32) | 1020 | if (get_option(&arg, &bit) && bit < NCAPINTS*32) |
1021 | setup_clear_cpu_cap(bit); | 1021 | setup_clear_cpu_cap(bit); |
1022 | else | 1022 | else |
1023 | return 0; | 1023 | return 0; |
1024 | 1024 | ||
1025 | return 1; | 1025 | return 1; |
1026 | } | 1026 | } |
1027 | __setup("clearcpuid=", setup_disablecpuid); | 1027 | __setup("clearcpuid=", setup_disablecpuid); |
1028 | 1028 | ||
1029 | #ifdef CONFIG_X86_64 | 1029 | #ifdef CONFIG_X86_64 |
1030 | struct desc_ptr idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) idt_table }; | 1030 | struct desc_ptr idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) idt_table }; |
1031 | 1031 | ||
1032 | DEFINE_PER_CPU_FIRST(union irq_stack_union, | 1032 | DEFINE_PER_CPU_FIRST(union irq_stack_union, |
1033 | irq_stack_union) __aligned(PAGE_SIZE); | 1033 | irq_stack_union) __aligned(PAGE_SIZE); |
1034 | 1034 | ||
1035 | /* | 1035 | /* |
1036 | * The following four percpu variables are hot. Align current_task to | 1036 | * The following four percpu variables are hot. Align current_task to |
1037 | * cacheline size such that all four fall in the same cacheline. | 1037 | * cacheline size such that all four fall in the same cacheline. |
1038 | */ | 1038 | */ |
1039 | DEFINE_PER_CPU(struct task_struct *, current_task) ____cacheline_aligned = | 1039 | DEFINE_PER_CPU(struct task_struct *, current_task) ____cacheline_aligned = |
1040 | &init_task; | 1040 | &init_task; |
1041 | EXPORT_PER_CPU_SYMBOL(current_task); | 1041 | EXPORT_PER_CPU_SYMBOL(current_task); |
1042 | 1042 | ||
1043 | DEFINE_PER_CPU(unsigned long, kernel_stack) = | 1043 | DEFINE_PER_CPU(unsigned long, kernel_stack) = |
1044 | (unsigned long)&init_thread_union - KERNEL_STACK_OFFSET + THREAD_SIZE; | 1044 | (unsigned long)&init_thread_union - KERNEL_STACK_OFFSET + THREAD_SIZE; |
1045 | EXPORT_PER_CPU_SYMBOL(kernel_stack); | 1045 | EXPORT_PER_CPU_SYMBOL(kernel_stack); |
1046 | 1046 | ||
1047 | DEFINE_PER_CPU(char *, irq_stack_ptr) = | 1047 | DEFINE_PER_CPU(char *, irq_stack_ptr) = |
1048 | init_per_cpu_var(irq_stack_union.irq_stack) + IRQ_STACK_SIZE - 64; | 1048 | init_per_cpu_var(irq_stack_union.irq_stack) + IRQ_STACK_SIZE - 64; |
1049 | 1049 | ||
1050 | DEFINE_PER_CPU(unsigned int, irq_count) = -1; | 1050 | DEFINE_PER_CPU(unsigned int, irq_count) = -1; |
1051 | 1051 | ||
1052 | /* | 1052 | /* |
1053 | * Special IST stacks which the CPU switches to when it calls | 1053 | * Special IST stacks which the CPU switches to when it calls |
1054 | * an IST-marked descriptor entry. Up to 7 stacks (hardware | 1054 | * an IST-marked descriptor entry. Up to 7 stacks (hardware |
1055 | * limit), all of them are 4K, except the debug stack which | 1055 | * limit), all of them are 4K, except the debug stack which |
1056 | * is 8K. | 1056 | * is 8K. |
1057 | */ | 1057 | */ |
1058 | static const unsigned int exception_stack_sizes[N_EXCEPTION_STACKS] = { | 1058 | static const unsigned int exception_stack_sizes[N_EXCEPTION_STACKS] = { |
1059 | [0 ... N_EXCEPTION_STACKS - 1] = EXCEPTION_STKSZ, | 1059 | [0 ... N_EXCEPTION_STACKS - 1] = EXCEPTION_STKSZ, |
1060 | [DEBUG_STACK - 1] = DEBUG_STKSZ | 1060 | [DEBUG_STACK - 1] = DEBUG_STKSZ |
1061 | }; | 1061 | }; |
1062 | 1062 | ||
1063 | static DEFINE_PER_CPU_PAGE_ALIGNED(char, exception_stacks | 1063 | static DEFINE_PER_CPU_PAGE_ALIGNED(char, exception_stacks |
1064 | [(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ + DEBUG_STKSZ]); | 1064 | [(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ + DEBUG_STKSZ]); |
1065 | 1065 | ||
1066 | /* May not be marked __init: used by software suspend */ | 1066 | /* May not be marked __init: used by software suspend */ |
1067 | void syscall_init(void) | 1067 | void syscall_init(void) |
1068 | { | 1068 | { |
1069 | /* | 1069 | /* |
1070 | * LSTAR and STAR live in a bit strange symbiosis. | 1070 | * LSTAR and STAR live in a bit strange symbiosis. |
1071 | * They both write to the same internal register. STAR allows to | 1071 | * They both write to the same internal register. STAR allows to |
1072 | * set CS/DS but only a 32bit target. LSTAR sets the 64bit rip. | 1072 | * set CS/DS but only a 32bit target. LSTAR sets the 64bit rip. |
1073 | */ | 1073 | */ |
1074 | wrmsrl(MSR_STAR, ((u64)__USER32_CS)<<48 | ((u64)__KERNEL_CS)<<32); | 1074 | wrmsrl(MSR_STAR, ((u64)__USER32_CS)<<48 | ((u64)__KERNEL_CS)<<32); |
1075 | wrmsrl(MSR_LSTAR, system_call); | 1075 | wrmsrl(MSR_LSTAR, system_call); |
1076 | wrmsrl(MSR_CSTAR, ignore_sysret); | 1076 | wrmsrl(MSR_CSTAR, ignore_sysret); |
1077 | 1077 | ||
1078 | #ifdef CONFIG_IA32_EMULATION | 1078 | #ifdef CONFIG_IA32_EMULATION |
1079 | syscall32_cpu_init(); | 1079 | syscall32_cpu_init(); |
1080 | #endif | 1080 | #endif |
1081 | 1081 | ||
1082 | /* Flags to clear on syscall */ | 1082 | /* Flags to clear on syscall */ |
1083 | wrmsrl(MSR_SYSCALL_MASK, | 1083 | wrmsrl(MSR_SYSCALL_MASK, |
1084 | X86_EFLAGS_TF|X86_EFLAGS_DF|X86_EFLAGS_IF|X86_EFLAGS_IOPL); | 1084 | X86_EFLAGS_TF|X86_EFLAGS_DF|X86_EFLAGS_IF|X86_EFLAGS_IOPL); |
1085 | } | 1085 | } |
1086 | 1086 | ||
1087 | unsigned long kernel_eflags; | 1087 | unsigned long kernel_eflags; |
1088 | 1088 | ||
1089 | /* | 1089 | /* |
1090 | * Copies of the original ist values from the tss are only accessed during | 1090 | * Copies of the original ist values from the tss are only accessed during |
1091 | * debugging, no special alignment required. | 1091 | * debugging, no special alignment required. |
1092 | */ | 1092 | */ |
1093 | DEFINE_PER_CPU(struct orig_ist, orig_ist); | 1093 | DEFINE_PER_CPU(struct orig_ist, orig_ist); |
1094 | 1094 | ||
1095 | #else /* CONFIG_X86_64 */ | 1095 | #else /* CONFIG_X86_64 */ |
1096 | 1096 | ||
1097 | DEFINE_PER_CPU(struct task_struct *, current_task) = &init_task; | 1097 | DEFINE_PER_CPU(struct task_struct *, current_task) = &init_task; |
1098 | EXPORT_PER_CPU_SYMBOL(current_task); | 1098 | EXPORT_PER_CPU_SYMBOL(current_task); |
1099 | 1099 | ||
1100 | #ifdef CONFIG_CC_STACKPROTECTOR | 1100 | #ifdef CONFIG_CC_STACKPROTECTOR |
1101 | DEFINE_PER_CPU_ALIGNED(struct stack_canary, stack_canary); | 1101 | DEFINE_PER_CPU_ALIGNED(struct stack_canary, stack_canary); |
1102 | #endif | 1102 | #endif |
1103 | 1103 | ||
1104 | /* Make sure %fs and %gs are initialized properly in idle threads */ | 1104 | /* Make sure %fs and %gs are initialized properly in idle threads */ |
1105 | struct pt_regs * __cpuinit idle_regs(struct pt_regs *regs) | 1105 | struct pt_regs * __cpuinit idle_regs(struct pt_regs *regs) |
1106 | { | 1106 | { |
1107 | memset(regs, 0, sizeof(struct pt_regs)); | 1107 | memset(regs, 0, sizeof(struct pt_regs)); |
1108 | regs->fs = __KERNEL_PERCPU; | 1108 | regs->fs = __KERNEL_PERCPU; |
1109 | regs->gs = __KERNEL_STACK_CANARY; | 1109 | regs->gs = __KERNEL_STACK_CANARY; |
1110 | 1110 | ||
1111 | return regs; | 1111 | return regs; |
1112 | } | 1112 | } |
1113 | #endif /* CONFIG_X86_64 */ | 1113 | #endif /* CONFIG_X86_64 */ |
1114 | 1114 | ||
1115 | /* | 1115 | /* |
1116 | * Clear all 6 debug registers: | 1116 | * Clear all 6 debug registers: |
1117 | */ | 1117 | */ |
1118 | static void clear_all_debug_regs(void) | 1118 | static void clear_all_debug_regs(void) |
1119 | { | 1119 | { |
1120 | int i; | 1120 | int i; |
1121 | 1121 | ||
1122 | for (i = 0; i < 8; i++) { | 1122 | for (i = 0; i < 8; i++) { |
1123 | /* Ignore db4, db5 */ | 1123 | /* Ignore db4, db5 */ |
1124 | if ((i == 4) || (i == 5)) | 1124 | if ((i == 4) || (i == 5)) |
1125 | continue; | 1125 | continue; |
1126 | 1126 | ||
1127 | set_debugreg(0, i); | 1127 | set_debugreg(0, i); |
1128 | } | 1128 | } |
1129 | } | 1129 | } |
1130 | 1130 | ||
1131 | #ifdef CONFIG_KGDB | 1131 | #ifdef CONFIG_KGDB |
1132 | /* | 1132 | /* |
1133 | * Restore debug regs if using kgdbwait and you have a kernel debugger | 1133 | * Restore debug regs if using kgdbwait and you have a kernel debugger |
1134 | * connection established. | 1134 | * connection established. |
1135 | */ | 1135 | */ |
1136 | static void dbg_restore_debug_regs(void) | 1136 | static void dbg_restore_debug_regs(void) |
1137 | { | 1137 | { |
1138 | if (unlikely(kgdb_connected && arch_kgdb_ops.correct_hw_break)) | 1138 | if (unlikely(kgdb_connected && arch_kgdb_ops.correct_hw_break)) |
1139 | arch_kgdb_ops.correct_hw_break(); | 1139 | arch_kgdb_ops.correct_hw_break(); |
1140 | } | 1140 | } |
1141 | #else /* ! CONFIG_KGDB */ | 1141 | #else /* ! CONFIG_KGDB */ |
1142 | #define dbg_restore_debug_regs() | 1142 | #define dbg_restore_debug_regs() |
1143 | #endif /* ! CONFIG_KGDB */ | 1143 | #endif /* ! CONFIG_KGDB */ |
1144 | 1144 | ||
1145 | /* | 1145 | /* |
1146 | * cpu_init() initializes state that is per-CPU. Some data is already | 1146 | * cpu_init() initializes state that is per-CPU. Some data is already |
1147 | * initialized (naturally) in the bootstrap process, such as the GDT | 1147 | * initialized (naturally) in the bootstrap process, such as the GDT |
1148 | * and IDT. We reload them nevertheless, this function acts as a | 1148 | * and IDT. We reload them nevertheless, this function acts as a |
1149 | * 'CPU state barrier', nothing should get across. | 1149 | * 'CPU state barrier', nothing should get across. |
1150 | * A lot of state is already set up in PDA init for 64 bit | 1150 | * A lot of state is already set up in PDA init for 64 bit |
1151 | */ | 1151 | */ |
1152 | #ifdef CONFIG_X86_64 | 1152 | #ifdef CONFIG_X86_64 |
1153 | 1153 | ||
1154 | void __cpuinit cpu_init(void) | 1154 | void __cpuinit cpu_init(void) |
1155 | { | 1155 | { |
1156 | struct orig_ist *oist; | 1156 | struct orig_ist *oist; |
1157 | struct task_struct *me; | 1157 | struct task_struct *me; |
1158 | struct tss_struct *t; | 1158 | struct tss_struct *t; |
1159 | unsigned long v; | 1159 | unsigned long v; |
1160 | int cpu; | 1160 | int cpu; |
1161 | int i; | 1161 | int i; |
1162 | 1162 | ||
1163 | cpu = stack_smp_processor_id(); | 1163 | cpu = stack_smp_processor_id(); |
1164 | t = &per_cpu(init_tss, cpu); | 1164 | t = &per_cpu(init_tss, cpu); |
1165 | oist = &per_cpu(orig_ist, cpu); | 1165 | oist = &per_cpu(orig_ist, cpu); |
1166 | 1166 | ||
1167 | #ifdef CONFIG_NUMA | 1167 | #ifdef CONFIG_NUMA |
1168 | if (cpu != 0 && percpu_read(numa_node) == 0 && | 1168 | if (cpu != 0 && percpu_read(numa_node) == 0 && |
1169 | early_cpu_to_node(cpu) != NUMA_NO_NODE) | 1169 | early_cpu_to_node(cpu) != NUMA_NO_NODE) |
1170 | set_numa_node(early_cpu_to_node(cpu)); | 1170 | set_numa_node(early_cpu_to_node(cpu)); |
1171 | #endif | 1171 | #endif |
1172 | 1172 | ||
1173 | me = current; | 1173 | me = current; |
1174 | 1174 | ||
1175 | if (cpumask_test_and_set_cpu(cpu, cpu_initialized_mask)) | 1175 | if (cpumask_test_and_set_cpu(cpu, cpu_initialized_mask)) |
1176 | panic("CPU#%d already initialized!\n", cpu); | 1176 | panic("CPU#%d already initialized!\n", cpu); |
1177 | 1177 | ||
1178 | pr_debug("Initializing CPU#%d\n", cpu); | 1178 | pr_debug("Initializing CPU#%d\n", cpu); |
1179 | 1179 | ||
1180 | clear_in_cr4(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE); | 1180 | clear_in_cr4(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE); |
1181 | 1181 | ||
1182 | /* | 1182 | /* |
1183 | * Initialize the per-CPU GDT with the boot GDT, | 1183 | * Initialize the per-CPU GDT with the boot GDT, |
1184 | * and set up the GDT descriptor: | 1184 | * and set up the GDT descriptor: |
1185 | */ | 1185 | */ |
1186 | 1186 | ||
1187 | switch_to_new_gdt(cpu); | 1187 | switch_to_new_gdt(cpu); |
1188 | loadsegment(fs, 0); | 1188 | loadsegment(fs, 0); |
1189 | 1189 | ||
1190 | load_idt((const struct desc_ptr *)&idt_descr); | 1190 | load_idt((const struct desc_ptr *)&idt_descr); |
1191 | 1191 | ||
1192 | memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8); | 1192 | memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8); |
1193 | syscall_init(); | 1193 | syscall_init(); |
1194 | 1194 | ||
1195 | wrmsrl(MSR_FS_BASE, 0); | 1195 | wrmsrl(MSR_FS_BASE, 0); |
1196 | wrmsrl(MSR_KERNEL_GS_BASE, 0); | 1196 | wrmsrl(MSR_KERNEL_GS_BASE, 0); |
1197 | barrier(); | 1197 | barrier(); |
1198 | 1198 | ||
1199 | x86_configure_nx(); | 1199 | x86_configure_nx(); |
1200 | if (cpu != 0) | 1200 | if (cpu != 0) |
1201 | enable_x2apic(); | 1201 | enable_x2apic(); |
1202 | 1202 | ||
1203 | /* | 1203 | /* |
1204 | * set up and load the per-CPU TSS | 1204 | * set up and load the per-CPU TSS |
1205 | */ | 1205 | */ |
1206 | if (!oist->ist[0]) { | 1206 | if (!oist->ist[0]) { |
1207 | char *estacks = per_cpu(exception_stacks, cpu); | 1207 | char *estacks = per_cpu(exception_stacks, cpu); |
1208 | 1208 | ||
1209 | for (v = 0; v < N_EXCEPTION_STACKS; v++) { | 1209 | for (v = 0; v < N_EXCEPTION_STACKS; v++) { |
1210 | estacks += exception_stack_sizes[v]; | 1210 | estacks += exception_stack_sizes[v]; |
1211 | oist->ist[v] = t->x86_tss.ist[v] = | 1211 | oist->ist[v] = t->x86_tss.ist[v] = |
1212 | (unsigned long)estacks; | 1212 | (unsigned long)estacks; |
1213 | } | 1213 | } |
1214 | } | 1214 | } |
1215 | 1215 | ||
1216 | t->x86_tss.io_bitmap_base = offsetof(struct tss_struct, io_bitmap); | 1216 | t->x86_tss.io_bitmap_base = offsetof(struct tss_struct, io_bitmap); |
1217 | 1217 | ||
1218 | /* | 1218 | /* |
1219 | * <= is required because the CPU will access up to | 1219 | * <= is required because the CPU will access up to |
1220 | * 8 bits beyond the end of the IO permission bitmap. | 1220 | * 8 bits beyond the end of the IO permission bitmap. |
1221 | */ | 1221 | */ |
1222 | for (i = 0; i <= IO_BITMAP_LONGS; i++) | 1222 | for (i = 0; i <= IO_BITMAP_LONGS; i++) |
1223 | t->io_bitmap[i] = ~0UL; | 1223 | t->io_bitmap[i] = ~0UL; |
1224 | 1224 | ||
1225 | atomic_inc(&init_mm.mm_count); | 1225 | atomic_inc(&init_mm.mm_count); |
1226 | me->active_mm = &init_mm; | 1226 | me->active_mm = &init_mm; |
1227 | BUG_ON(me->mm); | 1227 | BUG_ON(me->mm); |
1228 | enter_lazy_tlb(&init_mm, me); | 1228 | enter_lazy_tlb(&init_mm, me); |
1229 | 1229 | ||
1230 | load_sp0(t, ¤t->thread); | 1230 | load_sp0(t, ¤t->thread); |
1231 | set_tss_desc(cpu, t); | 1231 | set_tss_desc(cpu, t); |
1232 | load_TR_desc(); | 1232 | load_TR_desc(); |
1233 | load_LDT(&init_mm.context); | 1233 | load_LDT(&init_mm.context); |
1234 | 1234 | ||
1235 | clear_all_debug_regs(); | 1235 | clear_all_debug_regs(); |
1236 | dbg_restore_debug_regs(); | 1236 | dbg_restore_debug_regs(); |
1237 | 1237 | ||
1238 | fpu_init(); | 1238 | fpu_init(); |
1239 | xsave_init(); | 1239 | xsave_init(); |
1240 | 1240 | ||
1241 | raw_local_save_flags(kernel_eflags); | 1241 | raw_local_save_flags(kernel_eflags); |
1242 | 1242 | ||
1243 | if (is_uv_system()) | 1243 | if (is_uv_system()) |
1244 | uv_cpu_init(); | 1244 | uv_cpu_init(); |
1245 | } | 1245 | } |
1246 | 1246 | ||
1247 | #else | 1247 | #else |
1248 | 1248 | ||
1249 | void __cpuinit cpu_init(void) | 1249 | void __cpuinit cpu_init(void) |
1250 | { | 1250 | { |
1251 | int cpu = smp_processor_id(); | 1251 | int cpu = smp_processor_id(); |
1252 | struct task_struct *curr = current; | 1252 | struct task_struct *curr = current; |
1253 | struct tss_struct *t = &per_cpu(init_tss, cpu); | 1253 | struct tss_struct *t = &per_cpu(init_tss, cpu); |
1254 | struct thread_struct *thread = &curr->thread; | 1254 | struct thread_struct *thread = &curr->thread; |
1255 | 1255 | ||
1256 | if (cpumask_test_and_set_cpu(cpu, cpu_initialized_mask)) { | 1256 | if (cpumask_test_and_set_cpu(cpu, cpu_initialized_mask)) { |
1257 | printk(KERN_WARNING "CPU#%d already initialized!\n", cpu); | 1257 | printk(KERN_WARNING "CPU#%d already initialized!\n", cpu); |
1258 | for (;;) | 1258 | for (;;) |
1259 | local_irq_enable(); | 1259 | local_irq_enable(); |
1260 | } | 1260 | } |
1261 | 1261 | ||
1262 | printk(KERN_INFO "Initializing CPU#%d\n", cpu); | 1262 | printk(KERN_INFO "Initializing CPU#%d\n", cpu); |
1263 | 1263 | ||
1264 | if (cpu_has_vme || cpu_has_tsc || cpu_has_de) | 1264 | if (cpu_has_vme || cpu_has_tsc || cpu_has_de) |
1265 | clear_in_cr4(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE); | 1265 | clear_in_cr4(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE); |
1266 | 1266 | ||
1267 | load_idt(&idt_descr); | 1267 | load_idt(&idt_descr); |
1268 | switch_to_new_gdt(cpu); | 1268 | switch_to_new_gdt(cpu); |
1269 | 1269 | ||
1270 | /* | 1270 | /* |
1271 | * Set up and load the per-CPU TSS and LDT | 1271 | * Set up and load the per-CPU TSS and LDT |
1272 | */ | 1272 | */ |
1273 | atomic_inc(&init_mm.mm_count); | 1273 | atomic_inc(&init_mm.mm_count); |
1274 | curr->active_mm = &init_mm; | 1274 | curr->active_mm = &init_mm; |
1275 | BUG_ON(curr->mm); | 1275 | BUG_ON(curr->mm); |
1276 | enter_lazy_tlb(&init_mm, curr); | 1276 | enter_lazy_tlb(&init_mm, curr); |
1277 | 1277 | ||
1278 | load_sp0(t, thread); | 1278 | load_sp0(t, thread); |
1279 | set_tss_desc(cpu, t); | 1279 | set_tss_desc(cpu, t); |
1280 | load_TR_desc(); | 1280 | load_TR_desc(); |
1281 | load_LDT(&init_mm.context); | 1281 | load_LDT(&init_mm.context); |
1282 | 1282 | ||
1283 | t->x86_tss.io_bitmap_base = offsetof(struct tss_struct, io_bitmap); | 1283 | t->x86_tss.io_bitmap_base = offsetof(struct tss_struct, io_bitmap); |
1284 | 1284 | ||
1285 | #ifdef CONFIG_DOUBLEFAULT | 1285 | #ifdef CONFIG_DOUBLEFAULT |
1286 | /* Set up doublefault TSS pointer in the GDT */ | 1286 | /* Set up doublefault TSS pointer in the GDT */ |
1287 | __set_tss_desc(cpu, GDT_ENTRY_DOUBLEFAULT_TSS, &doublefault_tss); | 1287 | __set_tss_desc(cpu, GDT_ENTRY_DOUBLEFAULT_TSS, &doublefault_tss); |
1288 | #endif | 1288 | #endif |
1289 | 1289 | ||
1290 | clear_all_debug_regs(); | 1290 | clear_all_debug_regs(); |
1291 | dbg_restore_debug_regs(); | 1291 | dbg_restore_debug_regs(); |
1292 | 1292 | ||
1293 | fpu_init(); | 1293 | fpu_init(); |
1294 | xsave_init(); | 1294 | xsave_init(); |
1295 | } | 1295 | } |
1296 | #endif | 1296 | #endif |
1297 | 1297 |