Commit 5bb241b325d7d91bc4ec0b394f31dffb17fe7978

Authored by Linus Torvalds

Merge branch 'x86-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/…

…git/tip/linux-2.6-tip

* 'x86-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
  x86: Remove redundant non-NUMA topology functions
  x86: early_printk: Protect against using the same device twice
  x86: Reduce verbosity of "PAT enabled" kernel message
  x86: Reduce verbosity of "TSC is reliable" message
  x86: mce: Use safer ways to access MCE registers
  x86: mce, inject: Use real inject-msg in raise_local
  x86: mce: Fix thermal throttling message storm
  x86: mce: Clean up thermal throttling state tracking code
  x86: split NX setup into separate file to limit unstack-protected code
  xen: check EFER for NX before setting up GDT mapping
  x86: Cleanup linker script using new linker script macros.
  x86: Use section .data.page_aligned for the idt_table.
  x86: convert to use __HEAD and HEAD_TEXT macros.
  x86: convert compressed loader to use __HEAD and HEAD_TEXT macros.
  x86: fix fragile computation of vsyscall address

Showing 19 changed files Side-by-side Diff

arch/x86/boot/compressed/head_32.S
... ... @@ -23,13 +23,14 @@
23 23 */
24 24 .text
25 25  
  26 +#include <linux/init.h>
26 27 #include <linux/linkage.h>
27 28 #include <asm/segment.h>
28 29 #include <asm/page_types.h>
29 30 #include <asm/boot.h>
30 31 #include <asm/asm-offsets.h>
31 32  
32   - .section ".text.head","ax",@progbits
  33 + __HEAD
33 34 ENTRY(startup_32)
34 35 cld
35 36 /*
arch/x86/boot/compressed/head_64.S
... ... @@ -24,6 +24,7 @@
24 24 .code32
25 25 .text
26 26  
  27 +#include <linux/init.h>
27 28 #include <linux/linkage.h>
28 29 #include <asm/segment.h>
29 30 #include <asm/pgtable_types.h>
... ... @@ -33,7 +34,7 @@
33 34 #include <asm/processor-flags.h>
34 35 #include <asm/asm-offsets.h>
35 36  
36   - .section ".text.head"
  37 + __HEAD
37 38 .code32
38 39 ENTRY(startup_32)
39 40 cld
arch/x86/boot/compressed/vmlinux.lds.S
  1 +#include <asm-generic/vmlinux.lds.h>
  2 +
1 3 OUTPUT_FORMAT(CONFIG_OUTPUT_FORMAT, CONFIG_OUTPUT_FORMAT, CONFIG_OUTPUT_FORMAT)
2 4  
3 5 #undef i386
4 6  
... ... @@ -18,9 +20,9 @@
18 20 * address 0.
19 21 */
20 22 . = 0;
21   - .text.head : {
  23 + .head.text : {
22 24 _head = . ;
23   - *(.text.head)
  25 + HEAD_TEXT
24 26 _ehead = . ;
25 27 }
26 28 .rodata.compressed : {
arch/x86/include/asm/pgtable_types.h
... ... @@ -277,6 +277,7 @@
277 277 typedef struct page *pgtable_t;
278 278  
279 279 extern pteval_t __supported_pte_mask;
  280 +extern void set_nx(void);
280 281 extern int nx_enabled;
281 282  
282 283 #define pgprot_writecombine pgprot_writecombine
arch/x86/include/asm/topology.h
... ... @@ -165,19 +165,9 @@
165 165 return 0;
166 166 }
167 167  
168   -static inline int cpu_to_node(int cpu)
169   -{
170   - return 0;
171   -}
172   -
173 168 static inline int early_cpu_to_node(int cpu)
174 169 {
175 170 return 0;
176   -}
177   -
178   -static inline const struct cpumask *cpumask_of_node(int node)
179   -{
180   - return cpu_online_mask;
181 171 }
182 172  
183 173 static inline void setup_node_to_cpumask_map(void) { }
arch/x86/kernel/cpu/mcheck/mce-inject.c
... ... @@ -98,8 +98,9 @@
98 98 };
99 99  
100 100 /* Inject mce on current CPU */
101   -static int raise_local(struct mce *m)
  101 +static int raise_local(void)
102 102 {
  103 + struct mce *m = &__get_cpu_var(injectm);
103 104 int context = MCJ_CTX(m->inject_flags);
104 105 int ret = 0;
105 106 int cpu = m->extcpu;
106 107  
... ... @@ -167,12 +168,12 @@
167 168 }
168 169 cpu_relax();
169 170 }
170   - raise_local(m);
  171 + raise_local();
171 172 put_cpu();
172 173 put_online_cpus();
173 174 } else
174 175 #endif
175   - raise_local(m);
  176 + raise_local();
176 177 }
177 178  
178 179 /* Error injection interface */
arch/x86/kernel/cpu/mcheck/mce.c
... ... @@ -305,13 +305,25 @@
305 305 static u64 mce_rdmsrl(u32 msr)
306 306 {
307 307 u64 v;
  308 +
308 309 if (__get_cpu_var(injectm).finished) {
309 310 int offset = msr_to_offset(msr);
  311 +
310 312 if (offset < 0)
311 313 return 0;
312 314 return *(u64 *)((char *)&__get_cpu_var(injectm) + offset);
313 315 }
314   - rdmsrl(msr, v);
  316 +
  317 + if (rdmsrl_safe(msr, &v)) {
  318 + WARN_ONCE(1, "mce: Unable to read msr %d!\n", msr);
  319 + /*
  320 + * Return zero in case the access faulted. This should
  321 + * not happen normally but can happen if the CPU does
  322 + * something weird, or if the code is buggy.
  323 + */
  324 + v = 0;
  325 + }
  326 +
315 327 return v;
316 328 }
317 329  
... ... @@ -319,6 +331,7 @@
319 331 {
320 332 if (__get_cpu_var(injectm).finished) {
321 333 int offset = msr_to_offset(msr);
  334 +
322 335 if (offset >= 0)
323 336 *(u64 *)((char *)&__get_cpu_var(injectm) + offset) = v;
324 337 return;
... ... @@ -415,7 +428,7 @@
415 428 m->ip = mce_rdmsrl(rip_msr);
416 429 }
417 430  
418   -#ifdef CONFIG_X86_LOCAL_APIC
  431 +#ifdef CONFIG_X86_LOCAL_APIC
419 432 /*
420 433 * Called after interrupts have been reenabled again
421 434 * when a MCE happened during an interrupts off region
... ... @@ -1172,6 +1185,7 @@
1172 1185 return -ENOMEM;
1173 1186 for (i = 0; i < banks; i++) {
1174 1187 struct mce_bank *b = &mce_banks[i];
  1188 +
1175 1189 b->ctl = -1ULL;
1176 1190 b->init = 1;
1177 1191 }
... ... @@ -1203,6 +1217,7 @@
1203 1217 banks = b;
1204 1218 if (!mce_banks) {
1205 1219 int err = mce_banks_init();
  1220 +
1206 1221 if (err)
1207 1222 return err;
1208 1223 }
... ... @@ -1237,6 +1252,7 @@
1237 1252  
1238 1253 for (i = 0; i < banks; i++) {
1239 1254 struct mce_bank *b = &mce_banks[i];
  1255 +
1240 1256 if (!b->init)
1241 1257 continue;
1242 1258 wrmsrl(MSR_IA32_MCx_CTL(i), b->ctl);
... ... @@ -1626,6 +1642,7 @@
1626 1642  
1627 1643 for (i = 0; i < banks; i++) {
1628 1644 struct mce_bank *b = &mce_banks[i];
  1645 +
1629 1646 if (b->init)
1630 1647 wrmsrl(MSR_IA32_MCx_CTL(i), 0);
1631 1648 }
... ... @@ -1911,6 +1928,7 @@
1911 1928 cmci_clear();
1912 1929 for (i = 0; i < banks; i++) {
1913 1930 struct mce_bank *b = &mce_banks[i];
  1931 +
1914 1932 if (b->init)
1915 1933 wrmsrl(MSR_IA32_MCx_CTL(i), 0);
1916 1934 }
... ... @@ -1928,6 +1946,7 @@
1928 1946 cmci_reenable();
1929 1947 for (i = 0; i < banks; i++) {
1930 1948 struct mce_bank *b = &mce_banks[i];
  1949 +
1931 1950 if (b->init)
1932 1951 wrmsrl(MSR_IA32_MCx_CTL(i), b->ctl);
1933 1952 }
arch/x86/kernel/cpu/mcheck/therm_throt.c
... ... @@ -34,20 +34,31 @@
34 34 /* How long to wait between reporting thermal events */
35 35 #define CHECK_INTERVAL (300 * HZ)
36 36  
37   -static DEFINE_PER_CPU(__u64, next_check) = INITIAL_JIFFIES;
38   -static DEFINE_PER_CPU(unsigned long, thermal_throttle_count);
39   -static DEFINE_PER_CPU(bool, thermal_throttle_active);
  37 +/*
  38 + * Current thermal throttling state:
  39 + */
  40 +struct thermal_state {
  41 + bool is_throttled;
40 42  
41   -static atomic_t therm_throt_en = ATOMIC_INIT(0);
  43 + u64 next_check;
  44 + unsigned long throttle_count;
  45 + unsigned long last_throttle_count;
  46 +};
42 47  
  48 +static DEFINE_PER_CPU(struct thermal_state, thermal_state);
  49 +
  50 +static atomic_t therm_throt_en = ATOMIC_INIT(0);
  51 +
43 52 #ifdef CONFIG_SYSFS
44 53 #define define_therm_throt_sysdev_one_ro(_name) \
45 54 static SYSDEV_ATTR(_name, 0444, therm_throt_sysdev_show_##_name, NULL)
46 55  
47 56 #define define_therm_throt_sysdev_show_func(name) \
48   -static ssize_t therm_throt_sysdev_show_##name(struct sys_device *dev, \
49   - struct sysdev_attribute *attr, \
50   - char *buf) \
  57 + \
  58 +static ssize_t therm_throt_sysdev_show_##name( \
  59 + struct sys_device *dev, \
  60 + struct sysdev_attribute *attr, \
  61 + char *buf) \
51 62 { \
52 63 unsigned int cpu = dev->id; \
53 64 ssize_t ret; \
... ... @@ -55,7 +66,7 @@
55 66 preempt_disable(); /* CPU hotplug */ \
56 67 if (cpu_online(cpu)) \
57 68 ret = sprintf(buf, "%lu\n", \
58   - per_cpu(thermal_throttle_##name, cpu)); \
  69 + per_cpu(thermal_state, cpu).name); \
59 70 else \
60 71 ret = 0; \
61 72 preempt_enable(); \
62 73  
... ... @@ -63,11 +74,11 @@
63 74 return ret; \
64 75 }
65 76  
66   -define_therm_throt_sysdev_show_func(count);
67   -define_therm_throt_sysdev_one_ro(count);
  77 +define_therm_throt_sysdev_show_func(throttle_count);
  78 +define_therm_throt_sysdev_one_ro(throttle_count);
68 79  
69 80 static struct attribute *thermal_throttle_attrs[] = {
70   - &attr_count.attr,
  81 + &attr_throttle_count.attr,
71 82 NULL
72 83 };
73 84  
74 85  
75 86  
76 87  
77 88  
78 89  
79 90  
80 91  
... ... @@ -93,33 +104,39 @@
93 104 * 1 : Event should be logged further, and a message has been
94 105 * printed to the syslog.
95 106 */
96   -static int therm_throt_process(int curr)
  107 +static int therm_throt_process(bool is_throttled)
97 108 {
98   - unsigned int cpu = smp_processor_id();
99   - __u64 tmp_jiffs = get_jiffies_64();
100   - bool was_throttled = __get_cpu_var(thermal_throttle_active);
101   - bool is_throttled = __get_cpu_var(thermal_throttle_active) = curr;
  109 + struct thermal_state *state;
  110 + unsigned int this_cpu;
  111 + bool was_throttled;
  112 + u64 now;
102 113  
  114 + this_cpu = smp_processor_id();
  115 + now = get_jiffies_64();
  116 + state = &per_cpu(thermal_state, this_cpu);
  117 +
  118 + was_throttled = state->is_throttled;
  119 + state->is_throttled = is_throttled;
  120 +
103 121 if (is_throttled)
104   - __get_cpu_var(thermal_throttle_count)++;
  122 + state->throttle_count++;
105 123  
106   - if (!(was_throttled ^ is_throttled) &&
107   - time_before64(tmp_jiffs, __get_cpu_var(next_check)))
  124 + if (time_before64(now, state->next_check) &&
  125 + state->throttle_count != state->last_throttle_count)
108 126 return 0;
109 127  
110   - __get_cpu_var(next_check) = tmp_jiffs + CHECK_INTERVAL;
  128 + state->next_check = now + CHECK_INTERVAL;
  129 + state->last_throttle_count = state->throttle_count;
111 130  
112 131 /* if we just entered the thermal event */
113 132 if (is_throttled) {
114   - printk(KERN_CRIT "CPU%d: Temperature above threshold, "
115   - "cpu clock throttled (total events = %lu)\n",
116   - cpu, __get_cpu_var(thermal_throttle_count));
  133 + printk(KERN_CRIT "CPU%d: Temperature above threshold, cpu clock throttled (total events = %lu)\n", this_cpu, state->throttle_count);
117 134  
118 135 add_taint(TAINT_MACHINE_CHECK);
119 136 return 1;
120 137 }
121 138 if (was_throttled) {
122   - printk(KERN_INFO "CPU%d: Temperature/speed normal\n", cpu);
  139 + printk(KERN_INFO "CPU%d: Temperature/speed normal\n", this_cpu);
123 140 return 1;
124 141 }
125 142  
... ... @@ -213,7 +230,7 @@
213 230 __u64 msr_val;
214 231  
215 232 rdmsrl(MSR_IA32_THERM_STATUS, msr_val);
216   - if (therm_throt_process(msr_val & THERM_STATUS_PROCHOT))
  233 + if (therm_throt_process((msr_val & THERM_STATUS_PROCHOT) != 0))
217 234 mce_log_therm_throt_event(msr_val);
218 235 }
219 236  
arch/x86/kernel/early_printk.c
... ... @@ -178,6 +178,11 @@
178 178  
179 179 static inline void early_console_register(struct console *con, int keep_early)
180 180 {
  181 + if (early_console->index != -1) {
  182 + printk(KERN_CRIT "ERROR: earlyprintk= %s already used\n",
  183 + con->name);
  184 + return;
  185 + }
181 186 early_console = con;
182 187 if (keep_early)
183 188 early_console->flags &= ~CON_BOOT;
arch/x86/kernel/head_32.S
... ... @@ -79,7 +79,7 @@
79 79 * any particular GDT layout, because we load our own as soon as we
80 80 * can.
81 81 */
82   -.section .text.head,"ax",@progbits
  82 +__HEAD
83 83 ENTRY(startup_32)
84 84 /* test KEEP_SEGMENTS flag to see if the bootloader is asking
85 85 us to not reload segments */
arch/x86/kernel/head_64.S
... ... @@ -40,7 +40,7 @@
40 40 L3_START_KERNEL = pud_index(__START_KERNEL_map)
41 41  
42 42 .text
43   - .section .text.head
  43 + __HEAD
44 44 .code64
45 45 .globl startup_64
46 46 startup_64:
arch/x86/kernel/traps.c
... ... @@ -72,11 +72,9 @@
72 72  
73 73 /*
74 74 * The IDT has to be page-aligned to simplify the Pentium
75   - * F0 0F bug workaround.. We have a special link segment
76   - * for this.
  75 + * F0 0F bug workaround.
77 76 */
78   -gate_desc idt_table[NR_VECTORS]
79   - __attribute__((__section__(".data.idt"))) = { { { { 0, 0 } } }, };
  77 +gate_desc idt_table[NR_VECTORS] __page_aligned_data = { { { { 0, 0 } } }, };
80 78 #endif
81 79  
82 80 DECLARE_BITMAP(used_vectors, NR_VECTORS);
arch/x86/kernel/tsc_sync.c
... ... @@ -114,7 +114,7 @@
114 114 return;
115 115  
116 116 if (boot_cpu_has(X86_FEATURE_TSC_RELIABLE)) {
117   - pr_info("Skipping synchronization checks as TSC is reliable.\n");
  117 + printk_once(KERN_INFO "Skipping synchronization checks as TSC is reliable.\n");
118 118 return;
119 119 }
120 120  
arch/x86/kernel/vmlinux.lds.S
... ... @@ -65,17 +65,11 @@
65 65 #endif
66 66  
67 67 /* Text and read-only data */
68   -
69   - /* bootstrapping code */
70   - .text.head : AT(ADDR(.text.head) - LOAD_OFFSET) {
71   - _text = .;
72   - *(.text.head)
73   - } :text = 0x9090
74   -
75   - /* The rest of the text */
76 68 .text : AT(ADDR(.text) - LOAD_OFFSET) {
  69 + _text = .;
  70 + /* bootstrapping code */
  71 + HEAD_TEXT
77 72 #ifdef CONFIG_X86_32
78   - /* not really needed, already page aligned */
79 73 . = ALIGN(PAGE_SIZE);
80 74 *(.text.page_aligned)
81 75 #endif
... ... @@ -94,13 +88,7 @@
94 88  
95 89 NOTES :text :note
96 90  
97   - /* Exception table */
98   - . = ALIGN(16);
99   - __ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET) {
100   - __start___ex_table = .;
101   - *(__ex_table)
102   - __stop___ex_table = .;
103   - } :text = 0x9090
  91 + EXCEPTION_TABLE(16) :text = 0x9090
104 92  
105 93 RO_DATA(PAGE_SIZE)
106 94  
... ... @@ -118,7 +106,6 @@
118 106 #endif
119 107  
120 108 PAGE_ALIGNED_DATA(PAGE_SIZE)
121   - *(.data.idt)
122 109  
123 110 CACHELINE_ALIGNED_DATA(CONFIG_X86_L1_CACHE_BYTES)
124 111  
125 112  
126 113  
127 114  
128 115  
129 116  
... ... @@ -135,24 +122,21 @@
135 122 #ifdef CONFIG_X86_64
136 123  
137 124 #define VSYSCALL_ADDR (-10*1024*1024)
138   -#define VSYSCALL_PHYS_ADDR ((LOADADDR(.data) + SIZEOF(.data) + \
139   - PAGE_SIZE - 1) & ~(PAGE_SIZE - 1))
140   -#define VSYSCALL_VIRT_ADDR ((ADDR(.data) + SIZEOF(.data) + \
141   - PAGE_SIZE - 1) & ~(PAGE_SIZE - 1))
142 125  
143   -#define VLOAD_OFFSET (VSYSCALL_ADDR - VSYSCALL_PHYS_ADDR)
  126 +#define VLOAD_OFFSET (VSYSCALL_ADDR - __vsyscall_0 + LOAD_OFFSET)
144 127 #define VLOAD(x) (ADDR(x) - VLOAD_OFFSET)
145 128  
146   -#define VVIRT_OFFSET (VSYSCALL_ADDR - VSYSCALL_VIRT_ADDR)
  129 +#define VVIRT_OFFSET (VSYSCALL_ADDR - __vsyscall_0)
147 130 #define VVIRT(x) (ADDR(x) - VVIRT_OFFSET)
148 131  
  132 + . = ALIGN(4096);
  133 + __vsyscall_0 = .;
  134 +
149 135 . = VSYSCALL_ADDR;
150   - .vsyscall_0 : AT(VSYSCALL_PHYS_ADDR) {
  136 + .vsyscall_0 : AT(VLOAD(.vsyscall_0)) {
151 137 *(.vsyscall_0)
152 138 } :user
153 139  
154   - __vsyscall_0 = VSYSCALL_VIRT_ADDR;
155   -
156 140 . = ALIGN(CONFIG_X86_L1_CACHE_BYTES);
157 141 .vsyscall_fn : AT(VLOAD(.vsyscall_fn)) {
158 142 *(.vsyscall_fn)
159 143  
... ... @@ -192,11 +176,9 @@
192 176 *(.vsyscall_3)
193 177 }
194 178  
195   - . = VSYSCALL_VIRT_ADDR + PAGE_SIZE;
  179 + . = __vsyscall_0 + PAGE_SIZE;
196 180  
197 181 #undef VSYSCALL_ADDR
198   -#undef VSYSCALL_PHYS_ADDR
199   -#undef VSYSCALL_VIRT_ADDR
200 182 #undef VLOAD_OFFSET
201 183 #undef VLOAD
202 184 #undef VVIRT_OFFSET
203 185  
204 186  
205 187  
... ... @@ -219,45 +201,19 @@
219 201 PERCPU_VADDR(0, :percpu)
220 202 #endif
221 203  
222   - .init.text : AT(ADDR(.init.text) - LOAD_OFFSET) {
223   - _sinittext = .;
224   - INIT_TEXT
225   - _einittext = .;
226   - }
  204 + INIT_TEXT_SECTION(PAGE_SIZE)
227 205 #ifdef CONFIG_X86_64
228 206 :init
229 207 #endif
230 208  
231   - .init.data : AT(ADDR(.init.data) - LOAD_OFFSET) {
232   - INIT_DATA
233   - }
  209 + INIT_DATA_SECTION(16)
234 210  
235   - . = ALIGN(16);
236   - .init.setup : AT(ADDR(.init.setup) - LOAD_OFFSET) {
237   - __setup_start = .;
238   - *(.init.setup)
239   - __setup_end = .;
240   - }
241   - .initcall.init : AT(ADDR(.initcall.init) - LOAD_OFFSET) {
242   - __initcall_start = .;
243   - INITCALLS
244   - __initcall_end = .;
245   - }
246   -
247   - .con_initcall.init : AT(ADDR(.con_initcall.init) - LOAD_OFFSET) {
248   - __con_initcall_start = .;
249   - *(.con_initcall.init)
250   - __con_initcall_end = .;
251   - }
252   -
253 211 .x86_cpu_dev.init : AT(ADDR(.x86_cpu_dev.init) - LOAD_OFFSET) {
254 212 __x86_cpu_dev_start = .;
255 213 *(.x86_cpu_dev.init)
256 214 __x86_cpu_dev_end = .;
257 215 }
258 216  
259   - SECURITY_INIT
260   -
261 217 . = ALIGN(8);
262 218 .parainstructions : AT(ADDR(.parainstructions) - LOAD_OFFSET) {
263 219 __parainstructions = .;
... ... @@ -287,15 +243,6 @@
287 243 .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) {
288 244 EXIT_DATA
289 245 }
290   -
291   -#ifdef CONFIG_BLK_DEV_INITRD
292   - . = ALIGN(PAGE_SIZE);
293   - .init.ramfs : AT(ADDR(.init.ramfs) - LOAD_OFFSET) {
294   - __initramfs_start = .;
295   - *(.init.ramfs)
296   - __initramfs_end = .;
297   - }
298   -#endif
299 246  
300 247 #if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP)
301 248 PERCPU(PAGE_SIZE)
arch/x86/mm/Makefile
1 1 obj-y := init.o init_$(BITS).o fault.o ioremap.o extable.o pageattr.o mmap.o \
2   - pat.o pgtable.o physaddr.o gup.o
  2 + pat.o pgtable.o physaddr.o gup.o setup_nx.o
3 3  
4 4 # Make sure __phys_addr has no stackprotector
5 5 nostackp := $(call cc-option, -fno-stack-protector)
6 6 CFLAGS_physaddr.o := $(nostackp)
  7 +CFLAGS_setup_nx.o := $(nostackp)
7 8  
8 9 obj-$(CONFIG_SMP) += tlb.o
9 10  
... ... @@ -28,69 +28,6 @@
28 28 #endif
29 29 ;
30 30  
31   -int nx_enabled;
32   -
33   -#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
34   -static int disable_nx __cpuinitdata;
35   -
36   -/*
37   - * noexec = on|off
38   - *
39   - * Control non-executable mappings for processes.
40   - *
41   - * on Enable
42   - * off Disable
43   - */
44   -static int __init noexec_setup(char *str)
45   -{
46   - if (!str)
47   - return -EINVAL;
48   - if (!strncmp(str, "on", 2)) {
49   - __supported_pte_mask |= _PAGE_NX;
50   - disable_nx = 0;
51   - } else if (!strncmp(str, "off", 3)) {
52   - disable_nx = 1;
53   - __supported_pte_mask &= ~_PAGE_NX;
54   - }
55   - return 0;
56   -}
57   -early_param("noexec", noexec_setup);
58   -#endif
59   -
60   -#ifdef CONFIG_X86_PAE
61   -static void __init set_nx(void)
62   -{
63   - unsigned int v[4], l, h;
64   -
65   - if (cpu_has_pae && (cpuid_eax(0x80000000) > 0x80000001)) {
66   - cpuid(0x80000001, &v[0], &v[1], &v[2], &v[3]);
67   -
68   - if ((v[3] & (1 << 20)) && !disable_nx) {
69   - rdmsr(MSR_EFER, l, h);
70   - l |= EFER_NX;
71   - wrmsr(MSR_EFER, l, h);
72   - nx_enabled = 1;
73   - __supported_pte_mask |= _PAGE_NX;
74   - }
75   - }
76   -}
77   -#else
78   -static inline void set_nx(void)
79   -{
80   -}
81   -#endif
82   -
83   -#ifdef CONFIG_X86_64
84   -void __cpuinit check_efer(void)
85   -{
86   - unsigned long efer;
87   -
88   - rdmsrl(MSR_EFER, efer);
89   - if (!(efer & EFER_NX) || disable_nx)
90   - __supported_pte_mask &= ~_PAGE_NX;
91   -}
92   -#endif
93   -
94 31 static void __init find_early_table_space(unsigned long end, int use_pse,
95 32 int use_gbpages)
96 33 {
... ... @@ -81,6 +81,7 @@
81 81 void pat_init(void)
82 82 {
83 83 u64 pat;
  84 + bool boot_cpu = !boot_pat_state;
84 85  
85 86 if (!pat_enabled)
86 87 return;
... ... @@ -122,8 +123,10 @@
122 123 rdmsrl(MSR_IA32_CR_PAT, boot_pat_state);
123 124  
124 125 wrmsrl(MSR_IA32_CR_PAT, pat);
125   - printk(KERN_INFO "x86 PAT enabled: cpu %d, old 0x%Lx, new 0x%Lx\n",
126   - smp_processor_id(), boot_pat_state, pat);
  126 +
  127 + if (boot_cpu)
  128 + printk(KERN_INFO "x86 PAT enabled: cpu %d, old 0x%Lx, new 0x%Lx\n",
  129 + smp_processor_id(), boot_pat_state, pat);
127 130 }
128 131  
129 132 #undef PAT
arch/x86/mm/setup_nx.c
  1 +#include <linux/spinlock.h>
  2 +#include <linux/errno.h>
  3 +#include <linux/init.h>
  4 +
  5 +#include <asm/pgtable.h>
  6 +
  7 +int nx_enabled;
  8 +
  9 +#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
  10 +static int disable_nx __cpuinitdata;
  11 +
  12 +/*
  13 + * noexec = on|off
  14 + *
  15 + * Control non-executable mappings for processes.
  16 + *
  17 + * on Enable
  18 + * off Disable
  19 + */
  20 +static int __init noexec_setup(char *str)
  21 +{
  22 + if (!str)
  23 + return -EINVAL;
  24 + if (!strncmp(str, "on", 2)) {
  25 + __supported_pte_mask |= _PAGE_NX;
  26 + disable_nx = 0;
  27 + } else if (!strncmp(str, "off", 3)) {
  28 + disable_nx = 1;
  29 + __supported_pte_mask &= ~_PAGE_NX;
  30 + }
  31 + return 0;
  32 +}
  33 +early_param("noexec", noexec_setup);
  34 +#endif
  35 +
  36 +#ifdef CONFIG_X86_PAE
  37 +void __init set_nx(void)
  38 +{
  39 + unsigned int v[4], l, h;
  40 +
  41 + if (cpu_has_pae && (cpuid_eax(0x80000000) > 0x80000001)) {
  42 + cpuid(0x80000001, &v[0], &v[1], &v[2], &v[3]);
  43 +
  44 + if ((v[3] & (1 << 20)) && !disable_nx) {
  45 + rdmsr(MSR_EFER, l, h);
  46 + l |= EFER_NX;
  47 + wrmsr(MSR_EFER, l, h);
  48 + nx_enabled = 1;
  49 + __supported_pte_mask |= _PAGE_NX;
  50 + }
  51 + }
  52 +}
  53 +#else
  54 +void set_nx(void)
  55 +{
  56 +}
  57 +#endif
  58 +
  59 +#ifdef CONFIG_X86_64
  60 +void __cpuinit check_efer(void)
  61 +{
  62 + unsigned long efer;
  63 +
  64 + rdmsrl(MSR_EFER, efer);
  65 + if (!(efer & EFER_NX) || disable_nx)
  66 + __supported_pte_mask &= ~_PAGE_NX;
  67 +}
  68 +#endif
arch/x86/xen/enlighten.c
... ... @@ -1082,6 +1082,11 @@
1082 1082  
1083 1083 __supported_pte_mask |= _PAGE_IOMAP;
1084 1084  
  1085 +#ifdef CONFIG_X86_64
  1086 + /* Work out if we support NX */
  1087 + check_efer();
  1088 +#endif
  1089 +
1085 1090 xen_setup_features();
1086 1091  
1087 1092 /* Get mfn list */
... ... @@ -1122,11 +1127,6 @@
1122 1127 xen_smp_init();
1123 1128  
1124 1129 pgd = (pgd_t *)xen_start_info->pt_base;
1125   -
1126   -#ifdef CONFIG_X86_64
1127   - /* Work out if we support NX */
1128   - check_efer();
1129   -#endif
1130 1130  
1131 1131 /* Don't do the full vcpu_info placement stuff until we have a
1132 1132 possible map and a non-dummy shared_info. */