Commit 15385dfe7e0fa6866b204dd0d14aec2cc48fc0a7

Authored by Linus Torvalds

Merge branch 'x86-smap-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull x86/smap support from Ingo Molnar:
 "This adds support for the SMAP (Supervisor Mode Access Prevention) CPU
  feature on Intel CPUs: a hardware feature that prevents unintended
  user-space data access from kernel privileged code.

  It's turned on automatically when possible.

  This, in combination with SMEP, makes it even harder to exploit kernel
  bugs such as NULL pointer dereferences."

Fix up trivial conflict in arch/x86/kernel/entry_64.S due to newly added
includes right next to each other.

* 'x86-smap-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  x86, smep, smap: Make the switching functions one-way
  x86, suspend: On wakeup always initialize cr4 and EFER
  x86-32: Start out eflags and cr4 clean
  x86, smap: Do not abuse the [f][x]rstor_checking() functions for user space
  x86-32, smap: Add STAC/CLAC instructions to 32-bit kernel entry
  x86, smap: Reduce the SMAP overhead for signal handling
  x86, smap: A page fault due to SMAP is an oops
  x86, smap: Turn on Supervisor Mode Access Prevention
  x86, smap: Add STAC and CLAC instructions to control user space access
  x86, uaccess: Merge prototypes for clear_user/__clear_user
  x86, smap: Add a header file with macros for STAC/CLAC
  x86, alternative: Add header guards to <asm/alternative-asm.h>
  x86, alternative: Use .pushsection/.popsection
  x86, smap: Add CR4 bit for SMAP
  x86-32, mm: The WP test should be done on a kernel page

Showing 31 changed files Side-by-side Diff

Documentation/kernel-parameters.txt
... ... @@ -1812,8 +1812,12 @@
1812 1812 noexec=on: enable non-executable mappings (default)
1813 1813 noexec=off: disable non-executable mappings
1814 1814  
  1815 + nosmap [X86]
  1816 + Disable SMAP (Supervisor Mode Access Prevention)
  1817 + even if it is supported by processor.
  1818 +
1815 1819 nosmep [X86]
1816   - Disable SMEP (Supervisor Mode Execution Protection)
  1820 + Disable SMEP (Supervisor Mode Execution Prevention)
1817 1821 even if it is supported by processor.
1818 1822  
1819 1823 noexec32 [X86-64]
... ... @@ -1493,6 +1493,17 @@
1493 1493 If supported, this is a high bandwidth, cryptographically
1494 1494 secure hardware random number generator.
1495 1495  
  1496 +config X86_SMAP
  1497 + def_bool y
  1498 + prompt "Supervisor Mode Access Prevention" if EXPERT
  1499 + ---help---
  1500 + Supervisor Mode Access Prevention (SMAP) is a security
  1501 + feature in newer Intel processors. There is a small
  1502 + performance cost if this enabled and turned on; there is
  1503 + also a small increase in the kernel size if this is enabled.
  1504 +
  1505 + If unsure, say Y.
  1506 +
1496 1507 config EFI
1497 1508 bool "EFI runtime service support"
1498 1509 depends on ACPI
arch/x86/ia32/ia32_signal.c
... ... @@ -32,6 +32,7 @@
32 32 #include <asm/sigframe.h>
33 33 #include <asm/sighandling.h>
34 34 #include <asm/sys_ia32.h>
  35 +#include <asm/smap.h>
35 36  
36 37 #define FIX_EFLAGS __FIX_EFLAGS
37 38  
38 39  
... ... @@ -251,11 +252,12 @@
251 252  
252 253 get_user_ex(tmp, &sc->fpstate);
253 254 buf = compat_ptr(tmp);
254   - err |= restore_xstate_sig(buf, 1);
255 255  
256 256 get_user_ex(*pax, &sc->ax);
257 257 } get_user_catch(err);
258 258  
  259 + err |= restore_xstate_sig(buf, 1);
  260 +
259 261 return err;
260 262 }
261 263  
... ... @@ -506,7 +508,6 @@
506 508 put_user_ex(sig, &frame->sig);
507 509 put_user_ex(ptr_to_compat(&frame->info), &frame->pinfo);
508 510 put_user_ex(ptr_to_compat(&frame->uc), &frame->puc);
509   - err |= copy_siginfo_to_user32(&frame->info, info);
510 511  
511 512 /* Create the ucontext. */
512 513 if (cpu_has_xsave)
... ... @@ -518,9 +519,6 @@
518 519 put_user_ex(sas_ss_flags(regs->sp),
519 520 &frame->uc.uc_stack.ss_flags);
520 521 put_user_ex(current->sas_ss_size, &frame->uc.uc_stack.ss_size);
521   - err |= ia32_setup_sigcontext(&frame->uc.uc_mcontext, fpstate,
522   - regs, set->sig[0]);
523   - err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
524 522  
525 523 if (ka->sa.sa_flags & SA_RESTORER)
526 524 restorer = ka->sa.sa_restorer;
... ... @@ -535,6 +533,11 @@
535 533 */
536 534 put_user_ex(*((u64 *)&code), (u64 __user *)frame->retcode);
537 535 } put_user_catch(err);
  536 +
  537 + err |= copy_siginfo_to_user32(&frame->info, info);
  538 + err |= ia32_setup_sigcontext(&frame->uc.uc_mcontext, fpstate,
  539 + regs, set->sig[0]);
  540 + err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
538 541  
539 542 if (err)
540 543 return -EFAULT;
arch/x86/ia32/ia32entry.S
... ... @@ -14,6 +14,7 @@
14 14 #include <asm/segment.h>
15 15 #include <asm/irqflags.h>
16 16 #include <asm/asm.h>
  17 +#include <asm/smap.h>
17 18 #include <linux/linkage.h>
18 19 #include <linux/err.h>
19 20  
20 21  
... ... @@ -146,8 +147,10 @@
146 147 SAVE_ARGS 0,1,0
147 148 /* no need to do an access_ok check here because rbp has been
148 149 32bit zero extended */
  150 + ASM_STAC
149 151 1: movl (%rbp),%ebp
150 152 _ASM_EXTABLE(1b,ia32_badarg)
  153 + ASM_CLAC
151 154 orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
152 155 testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
153 156 CFI_REMEMBER_STATE
154 157  
... ... @@ -301,8 +304,10 @@
301 304 /* no need to do an access_ok check here because r8 has been
302 305 32bit zero extended */
303 306 /* hardware stack frame is complete now */
  307 + ASM_STAC
304 308 1: movl (%r8),%r9d
305 309 _ASM_EXTABLE(1b,ia32_badarg)
  310 + ASM_CLAC
306 311 orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
307 312 testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
308 313 CFI_REMEMBER_STATE
... ... @@ -365,6 +370,7 @@
365 370 END(ia32_cstar_target)
366 371  
367 372 ia32_badarg:
  373 + ASM_CLAC
368 374 movq $-EFAULT,%rax
369 375 jmp ia32_sysret
370 376 CFI_ENDPROC
arch/x86/include/asm/alternative-asm.h
  1 +#ifndef _ASM_X86_ALTERNATIVE_ASM_H
  2 +#define _ASM_X86_ALTERNATIVE_ASM_H
  3 +
1 4 #ifdef __ASSEMBLY__
2 5  
3 6 #include <asm/asm.h>
4 7  
... ... @@ -5,10 +8,10 @@
5 8 #ifdef CONFIG_SMP
6 9 .macro LOCK_PREFIX
7 10 672: lock
8   - .section .smp_locks,"a"
  11 + .pushsection .smp_locks,"a"
9 12 .balign 4
10 13 .long 672b - .
11   - .previous
  14 + .popsection
12 15 .endm
13 16 #else
14 17 .macro LOCK_PREFIX
... ... @@ -24,4 +27,6 @@
24 27 .endm
25 28  
26 29 #endif /* __ASSEMBLY__ */
  30 +
  31 +#endif /* _ASM_X86_ALTERNATIVE_ASM_H */
arch/x86/include/asm/alternative.h
... ... @@ -29,10 +29,10 @@
29 29  
30 30 #ifdef CONFIG_SMP
31 31 #define LOCK_PREFIX_HERE \
32   - ".section .smp_locks,\"a\"\n" \
33   - ".balign 4\n" \
34   - ".long 671f - .\n" /* offset */ \
35   - ".previous\n" \
  32 + ".pushsection .smp_locks,\"a\"\n" \
  33 + ".balign 4\n" \
  34 + ".long 671f - .\n" /* offset */ \
  35 + ".popsection\n" \
36 36 "671:"
37 37  
38 38 #define LOCK_PREFIX LOCK_PREFIX_HERE "\n\tlock; "
39 39  
40 40  
41 41  
42 42  
43 43  
44 44  
45 45  
... ... @@ -99,30 +99,30 @@
99 99 /* alternative assembly primitive: */
100 100 #define ALTERNATIVE(oldinstr, newinstr, feature) \
101 101 OLDINSTR(oldinstr) \
102   - ".section .altinstructions,\"a\"\n" \
  102 + ".pushsection .altinstructions,\"a\"\n" \
103 103 ALTINSTR_ENTRY(feature, 1) \
104   - ".previous\n" \
105   - ".section .discard,\"aw\",@progbits\n" \
  104 + ".popsection\n" \
  105 + ".pushsection .discard,\"aw\",@progbits\n" \
106 106 DISCARD_ENTRY(1) \
107   - ".previous\n" \
108   - ".section .altinstr_replacement, \"ax\"\n" \
  107 + ".popsection\n" \
  108 + ".pushsection .altinstr_replacement, \"ax\"\n" \
109 109 ALTINSTR_REPLACEMENT(newinstr, feature, 1) \
110   - ".previous"
  110 + ".popsection"
111 111  
112 112 #define ALTERNATIVE_2(oldinstr, newinstr1, feature1, newinstr2, feature2)\
113 113 OLDINSTR(oldinstr) \
114   - ".section .altinstructions,\"a\"\n" \
  114 + ".pushsection .altinstructions,\"a\"\n" \
115 115 ALTINSTR_ENTRY(feature1, 1) \
116 116 ALTINSTR_ENTRY(feature2, 2) \
117   - ".previous\n" \
118   - ".section .discard,\"aw\",@progbits\n" \
  117 + ".popsection\n" \
  118 + ".pushsection .discard,\"aw\",@progbits\n" \
119 119 DISCARD_ENTRY(1) \
120 120 DISCARD_ENTRY(2) \
121   - ".previous\n" \
122   - ".section .altinstr_replacement, \"ax\"\n" \
  121 + ".popsection\n" \
  122 + ".pushsection .altinstr_replacement, \"ax\"\n" \
123 123 ALTINSTR_REPLACEMENT(newinstr1, feature1, 1) \
124 124 ALTINSTR_REPLACEMENT(newinstr2, feature2, 2) \
125   - ".previous"
  125 + ".popsection"
126 126  
127 127 /*
128 128 * This must be included *after* the definition of ALTERNATIVE due to
arch/x86/include/asm/fpu-internal.h
... ... @@ -21,6 +21,7 @@
21 21 #include <asm/user.h>
22 22 #include <asm/uaccess.h>
23 23 #include <asm/xsave.h>
  24 +#include <asm/smap.h>
24 25  
25 26 #ifdef CONFIG_X86_64
26 27 # include <asm/sigcontext32.h>
... ... @@ -121,6 +122,22 @@
121 122 __sanitize_i387_state(tsk);
122 123 }
123 124  
  125 +#define user_insn(insn, output, input...) \
  126 +({ \
  127 + int err; \
  128 + asm volatile(ASM_STAC "\n" \
  129 + "1:" #insn "\n\t" \
  130 + "2: " ASM_CLAC "\n" \
  131 + ".section .fixup,\"ax\"\n" \
  132 + "3: movl $-1,%[err]\n" \
  133 + " jmp 2b\n" \
  134 + ".previous\n" \
  135 + _ASM_EXTABLE(1b, 3b) \
  136 + : [err] "=r" (err), output \
  137 + : "0"(0), input); \
  138 + err; \
  139 +})
  140 +
124 141 #define check_insn(insn, output, input...) \
125 142 ({ \
126 143 int err; \
127 144  
128 145  
129 146  
... ... @@ -138,18 +155,18 @@
138 155  
139 156 static inline int fsave_user(struct i387_fsave_struct __user *fx)
140 157 {
141   - return check_insn(fnsave %[fx]; fwait, [fx] "=m" (*fx), "m" (*fx));
  158 + return user_insn(fnsave %[fx]; fwait, [fx] "=m" (*fx), "m" (*fx));
142 159 }
143 160  
144 161 static inline int fxsave_user(struct i387_fxsave_struct __user *fx)
145 162 {
146 163 if (config_enabled(CONFIG_X86_32))
147   - return check_insn(fxsave %[fx], [fx] "=m" (*fx), "m" (*fx));
  164 + return user_insn(fxsave %[fx], [fx] "=m" (*fx), "m" (*fx));
148 165 else if (config_enabled(CONFIG_AS_FXSAVEQ))
149   - return check_insn(fxsaveq %[fx], [fx] "=m" (*fx), "m" (*fx));
  166 + return user_insn(fxsaveq %[fx], [fx] "=m" (*fx), "m" (*fx));
150 167  
151 168 /* See comment in fpu_fxsave() below. */
152   - return check_insn(rex64/fxsave (%[fx]), "=m" (*fx), [fx] "R" (fx));
  169 + return user_insn(rex64/fxsave (%[fx]), "=m" (*fx), [fx] "R" (fx));
153 170 }
154 171  
155 172 static inline int fxrstor_checking(struct i387_fxsave_struct *fx)
156 173  
... ... @@ -164,9 +181,26 @@
164 181 "m" (*fx));
165 182 }
166 183  
  184 +static inline int fxrstor_user(struct i387_fxsave_struct __user *fx)
  185 +{
  186 + if (config_enabled(CONFIG_X86_32))
  187 + return user_insn(fxrstor %[fx], "=m" (*fx), [fx] "m" (*fx));
  188 + else if (config_enabled(CONFIG_AS_FXSAVEQ))
  189 + return user_insn(fxrstorq %[fx], "=m" (*fx), [fx] "m" (*fx));
  190 +
  191 + /* See comment in fpu_fxsave() below. */
  192 + return user_insn(rex64/fxrstor (%[fx]), "=m" (*fx), [fx] "R" (fx),
  193 + "m" (*fx));
  194 +}
  195 +
167 196 static inline int frstor_checking(struct i387_fsave_struct *fx)
168 197 {
169 198 return check_insn(frstor %[fx], "=m" (*fx), [fx] "m" (*fx));
  199 +}
  200 +
  201 +static inline int frstor_user(struct i387_fsave_struct __user *fx)
  202 +{
  203 + return user_insn(frstor %[fx], "=m" (*fx), [fx] "m" (*fx));
170 204 }
171 205  
172 206 static inline void fpu_fxsave(struct fpu *fpu)
arch/x86/include/asm/futex.h
... ... @@ -9,10 +9,13 @@
9 9 #include <asm/asm.h>
10 10 #include <asm/errno.h>
11 11 #include <asm/processor.h>
  12 +#include <asm/smap.h>
12 13  
13 14 #define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \
14   - asm volatile("1:\t" insn "\n" \
15   - "2:\t.section .fixup,\"ax\"\n" \
  15 + asm volatile("\t" ASM_STAC "\n" \
  16 + "1:\t" insn "\n" \
  17 + "2:\t" ASM_CLAC "\n" \
  18 + "\t.section .fixup,\"ax\"\n" \
16 19 "3:\tmov\t%3, %1\n" \
17 20 "\tjmp\t2b\n" \
18 21 "\t.previous\n" \
19 22  
... ... @@ -21,12 +24,14 @@
21 24 : "i" (-EFAULT), "0" (oparg), "1" (0))
22 25  
23 26 #define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \
24   - asm volatile("1:\tmovl %2, %0\n" \
  27 + asm volatile("\t" ASM_STAC "\n" \
  28 + "1:\tmovl %2, %0\n" \
25 29 "\tmovl\t%0, %3\n" \
26 30 "\t" insn "\n" \
27 31 "2:\t" LOCK_PREFIX "cmpxchgl %3, %2\n" \
28 32 "\tjnz\t1b\n" \
29   - "3:\t.section .fixup,\"ax\"\n" \
  33 + "3:\t" ASM_CLAC "\n" \
  34 + "\t.section .fixup,\"ax\"\n" \
30 35 "4:\tmov\t%5, %1\n" \
31 36 "\tjmp\t3b\n" \
32 37 "\t.previous\n" \
... ... @@ -122,8 +127,10 @@
122 127 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
123 128 return -EFAULT;
124 129  
125   - asm volatile("1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n"
126   - "2:\t.section .fixup, \"ax\"\n"
  130 + asm volatile("\t" ASM_STAC "\n"
  131 + "1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n"
  132 + "2:\t" ASM_CLAC "\n"
  133 + "\t.section .fixup, \"ax\"\n"
127 134 "3:\tmov %3, %0\n"
128 135 "\tjmp 2b\n"
129 136 "\t.previous\n"
arch/x86/include/asm/processor-flags.h
... ... @@ -65,6 +65,7 @@
65 65 #define X86_CR4_PCIDE 0x00020000 /* enable PCID support */
66 66 #define X86_CR4_OSXSAVE 0x00040000 /* enable xsave and xrestore */
67 67 #define X86_CR4_SMEP 0x00100000 /* enable SMEP support */
  68 +#define X86_CR4_SMAP 0x00200000 /* enable SMAP support */
68 69  
69 70 /*
70 71 * x86-64 Task Priority Register, CR8
arch/x86/include/asm/smap.h
  1 +/*
  2 + * Supervisor Mode Access Prevention support
  3 + *
  4 + * Copyright (C) 2012 Intel Corporation
  5 + * Author: H. Peter Anvin <hpa@linux.intel.com>
  6 + *
  7 + * This program is free software; you can redistribute it and/or
  8 + * modify it under the terms of the GNU General Public License
  9 + * as published by the Free Software Foundation; version 2
  10 + * of the License.
  11 + */
  12 +
  13 +#ifndef _ASM_X86_SMAP_H
  14 +#define _ASM_X86_SMAP_H
  15 +
  16 +#include <linux/stringify.h>
  17 +#include <asm/nops.h>
  18 +#include <asm/cpufeature.h>
  19 +
  20 +/* "Raw" instruction opcodes */
  21 +#define __ASM_CLAC .byte 0x0f,0x01,0xca
  22 +#define __ASM_STAC .byte 0x0f,0x01,0xcb
  23 +
  24 +#ifdef __ASSEMBLY__
  25 +
  26 +#include <asm/alternative-asm.h>
  27 +
  28 +#ifdef CONFIG_X86_SMAP
  29 +
  30 +#define ASM_CLAC \
  31 + 661: ASM_NOP3 ; \
  32 + .pushsection .altinstr_replacement, "ax" ; \
  33 + 662: __ASM_CLAC ; \
  34 + .popsection ; \
  35 + .pushsection .altinstructions, "a" ; \
  36 + altinstruction_entry 661b, 662b, X86_FEATURE_SMAP, 3, 3 ; \
  37 + .popsection
  38 +
  39 +#define ASM_STAC \
  40 + 661: ASM_NOP3 ; \
  41 + .pushsection .altinstr_replacement, "ax" ; \
  42 + 662: __ASM_STAC ; \
  43 + .popsection ; \
  44 + .pushsection .altinstructions, "a" ; \
  45 + altinstruction_entry 661b, 662b, X86_FEATURE_SMAP, 3, 3 ; \
  46 + .popsection
  47 +
  48 +#else /* CONFIG_X86_SMAP */
  49 +
  50 +#define ASM_CLAC
  51 +#define ASM_STAC
  52 +
  53 +#endif /* CONFIG_X86_SMAP */
  54 +
  55 +#else /* __ASSEMBLY__ */
  56 +
  57 +#include <asm/alternative.h>
  58 +
  59 +#ifdef CONFIG_X86_SMAP
  60 +
  61 +static __always_inline void clac(void)
  62 +{
  63 + /* Note: a barrier is implicit in alternative() */
  64 + alternative(ASM_NOP3, __stringify(__ASM_CLAC), X86_FEATURE_SMAP);
  65 +}
  66 +
  67 +static __always_inline void stac(void)
  68 +{
  69 + /* Note: a barrier is implicit in alternative() */
  70 + alternative(ASM_NOP3, __stringify(__ASM_STAC), X86_FEATURE_SMAP);
  71 +}
  72 +
  73 +/* These macros can be used in asm() statements */
  74 +#define ASM_CLAC \
  75 + ALTERNATIVE(ASM_NOP3, __stringify(__ASM_CLAC), X86_FEATURE_SMAP)
  76 +#define ASM_STAC \
  77 + ALTERNATIVE(ASM_NOP3, __stringify(__ASM_STAC), X86_FEATURE_SMAP)
  78 +
  79 +#else /* CONFIG_X86_SMAP */
  80 +
  81 +static inline void clac(void) { }
  82 +static inline void stac(void) { }
  83 +
  84 +#define ASM_CLAC
  85 +#define ASM_STAC
  86 +
  87 +#endif /* CONFIG_X86_SMAP */
  88 +
  89 +#endif /* __ASSEMBLY__ */
  90 +
  91 +#endif /* _ASM_X86_SMAP_H */
arch/x86/include/asm/uaccess.h
... ... @@ -9,6 +9,7 @@
9 9 #include <linux/string.h>
10 10 #include <asm/asm.h>
11 11 #include <asm/page.h>
  12 +#include <asm/smap.h>
12 13  
13 14 #define VERIFY_READ 0
14 15 #define VERIFY_WRITE 1
15 16  
... ... @@ -192,9 +193,10 @@
192 193  
193 194 #ifdef CONFIG_X86_32
194 195 #define __put_user_asm_u64(x, addr, err, errret) \
195   - asm volatile("1: movl %%eax,0(%2)\n" \
  196 + asm volatile(ASM_STAC "\n" \
  197 + "1: movl %%eax,0(%2)\n" \
196 198 "2: movl %%edx,4(%2)\n" \
197   - "3:\n" \
  199 + "3: " ASM_CLAC "\n" \
198 200 ".section .fixup,\"ax\"\n" \
199 201 "4: movl %3,%0\n" \
200 202 " jmp 3b\n" \
201 203  
... ... @@ -205,9 +207,10 @@
205 207 : "A" (x), "r" (addr), "i" (errret), "0" (err))
206 208  
207 209 #define __put_user_asm_ex_u64(x, addr) \
208   - asm volatile("1: movl %%eax,0(%1)\n" \
  210 + asm volatile(ASM_STAC "\n" \
  211 + "1: movl %%eax,0(%1)\n" \
209 212 "2: movl %%edx,4(%1)\n" \
210   - "3:\n" \
  213 + "3: " ASM_CLAC "\n" \
211 214 _ASM_EXTABLE_EX(1b, 2b) \
212 215 _ASM_EXTABLE_EX(2b, 3b) \
213 216 : : "A" (x), "r" (addr))
... ... @@ -379,8 +382,9 @@
379 382 } while (0)
380 383  
381 384 #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \
382   - asm volatile("1: mov"itype" %2,%"rtype"1\n" \
383   - "2:\n" \
  385 + asm volatile(ASM_STAC "\n" \
  386 + "1: mov"itype" %2,%"rtype"1\n" \
  387 + "2: " ASM_CLAC "\n" \
384 388 ".section .fixup,\"ax\"\n" \
385 389 "3: mov %3,%0\n" \
386 390 " xor"itype" %"rtype"1,%"rtype"1\n" \
... ... @@ -443,8 +447,9 @@
443 447 * aliasing issues.
444 448 */
445 449 #define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \
446   - asm volatile("1: mov"itype" %"rtype"1,%2\n" \
447   - "2:\n" \
  450 + asm volatile(ASM_STAC "\n" \
  451 + "1: mov"itype" %"rtype"1,%2\n" \
  452 + "2: " ASM_CLAC "\n" \
448 453 ".section .fixup,\"ax\"\n" \
449 454 "3: mov %3,%0\n" \
450 455 " jmp 2b\n" \
451 456  
452 457  
453 458  
... ... @@ -463,13 +468,13 @@
463 468 * uaccess_try and catch
464 469 */
465 470 #define uaccess_try do { \
466   - int prev_err = current_thread_info()->uaccess_err; \
467 471 current_thread_info()->uaccess_err = 0; \
  472 + stac(); \
468 473 barrier();
469 474  
470 475 #define uaccess_catch(err) \
  476 + clac(); \
471 477 (err) |= (current_thread_info()->uaccess_err ? -EFAULT : 0); \
472   - current_thread_info()->uaccess_err = prev_err; \
473 478 } while (0)
474 479  
475 480 /**
... ... @@ -568,6 +573,9 @@
568 573  
569 574 extern __must_check long strlen_user(const char __user *str);
570 575 extern __must_check long strnlen_user(const char __user *str, long n);
  576 +
  577 +unsigned long __must_check clear_user(void __user *mem, unsigned long len);
  578 +unsigned long __must_check __clear_user(void __user *mem, unsigned long len);
571 579  
572 580 /*
573 581 * movsl can be slow when source and dest are not both 8-byte aligned
arch/x86/include/asm/uaccess_32.h
... ... @@ -213,8 +213,5 @@
213 213 return n;
214 214 }
215 215  
216   -unsigned long __must_check clear_user(void __user *mem, unsigned long len);
217   -unsigned long __must_check __clear_user(void __user *mem, unsigned long len);
218   -
219 216 #endif /* _ASM_X86_UACCESS_32_H */
arch/x86/include/asm/uaccess_64.h
... ... @@ -217,9 +217,6 @@
217 217 }
218 218 }
219 219  
220   -__must_check unsigned long clear_user(void __user *mem, unsigned long len);
221   -__must_check unsigned long __clear_user(void __user *mem, unsigned long len);
222   -
223 220 static __must_check __always_inline int
224 221 __copy_from_user_inatomic(void *dst, const void __user *src, unsigned size)
225 222 {
arch/x86/include/asm/xsave.h
... ... @@ -70,8 +70,9 @@
70 70 if (unlikely(err))
71 71 return -EFAULT;
72 72  
73   - __asm__ __volatile__("1: .byte " REX_PREFIX "0x0f,0xae,0x27\n"
74   - "2:\n"
  73 + __asm__ __volatile__(ASM_STAC "\n"
  74 + "1: .byte " REX_PREFIX "0x0f,0xae,0x27\n"
  75 + "2: " ASM_CLAC "\n"
75 76 ".section .fixup,\"ax\"\n"
76 77 "3: movl $-1,%[err]\n"
77 78 " jmp 2b\n"
... ... @@ -90,8 +91,9 @@
90 91 u32 lmask = mask;
91 92 u32 hmask = mask >> 32;
92 93  
93   - __asm__ __volatile__("1: .byte " REX_PREFIX "0x0f,0xae,0x2f\n"
94   - "2:\n"
  94 + __asm__ __volatile__(ASM_STAC "\n"
  95 + "1: .byte " REX_PREFIX "0x0f,0xae,0x2f\n"
  96 + "2: " ASM_CLAC "\n"
95 97 ".section .fixup,\"ax\"\n"
96 98 "3: movl $-1,%[err]\n"
97 99 " jmp 2b\n"
arch/x86/kernel/acpi/sleep.c
... ... @@ -43,17 +43,22 @@
43 43  
44 44 header->video_mode = saved_video_mode;
45 45  
  46 + header->pmode_behavior = 0;
  47 +
46 48 #ifndef CONFIG_64BIT
47 49 store_gdt((struct desc_ptr *)&header->pmode_gdt);
48 50  
49   - if (rdmsr_safe(MSR_EFER, &header->pmode_efer_low,
50   - &header->pmode_efer_high))
51   - header->pmode_efer_low = header->pmode_efer_high = 0;
  51 + if (!rdmsr_safe(MSR_EFER,
  52 + &header->pmode_efer_low,
  53 + &header->pmode_efer_high))
  54 + header->pmode_behavior |= (1 << WAKEUP_BEHAVIOR_RESTORE_EFER);
52 55 #endif /* !CONFIG_64BIT */
53 56  
54 57 header->pmode_cr0 = read_cr0();
55   - header->pmode_cr4 = read_cr4_safe();
56   - header->pmode_behavior = 0;
  58 + if (__this_cpu_read(cpu_info.cpuid_level) >= 0) {
  59 + header->pmode_cr4 = read_cr4();
  60 + header->pmode_behavior |= (1 << WAKEUP_BEHAVIOR_RESTORE_CR4);
  61 + }
57 62 if (!rdmsr_safe(MSR_IA32_MISC_ENABLE,
58 63 &header->pmode_misc_en_low,
59 64 &header->pmode_misc_en_high))
arch/x86/kernel/cpu/common.c
... ... @@ -259,25 +259,38 @@
259 259 }
260 260 #endif
261 261  
262   -static int disable_smep __cpuinitdata;
263 262 static __init int setup_disable_smep(char *arg)
264 263 {
265   - disable_smep = 1;
  264 + setup_clear_cpu_cap(X86_FEATURE_SMEP);
266 265 return 1;
267 266 }
268 267 __setup("nosmep", setup_disable_smep);
269 268  
270   -static __cpuinit void setup_smep(struct cpuinfo_x86 *c)
  269 +static __always_inline void setup_smep(struct cpuinfo_x86 *c)
271 270 {
272   - if (cpu_has(c, X86_FEATURE_SMEP)) {
273   - if (unlikely(disable_smep)) {
274   - setup_clear_cpu_cap(X86_FEATURE_SMEP);
275   - clear_in_cr4(X86_CR4_SMEP);
276   - } else
277   - set_in_cr4(X86_CR4_SMEP);
278   - }
  271 + if (cpu_has(c, X86_FEATURE_SMEP))
  272 + set_in_cr4(X86_CR4_SMEP);
279 273 }
280 274  
  275 +static __init int setup_disable_smap(char *arg)
  276 +{
  277 + setup_clear_cpu_cap(X86_FEATURE_SMAP);
  278 + return 1;
  279 +}
  280 +__setup("nosmap", setup_disable_smap);
  281 +
  282 +static __always_inline void setup_smap(struct cpuinfo_x86 *c)
  283 +{
  284 + unsigned long eflags;
  285 +
  286 + /* This should have been cleared long ago */
  287 + raw_local_save_flags(eflags);
  288 + BUG_ON(eflags & X86_EFLAGS_AC);
  289 +
  290 + if (cpu_has(c, X86_FEATURE_SMAP))
  291 + set_in_cr4(X86_CR4_SMAP);
  292 +}
  293 +
281 294 /*
282 295 * Some CPU features depend on higher CPUID levels, which may not always
283 296 * be available due to CPUID level capping or broken virtualization
... ... @@ -712,8 +725,6 @@
712 725 c->cpu_index = 0;
713 726 filter_cpuid_features(c, false);
714 727  
715   - setup_smep(c);
716   -
717 728 if (this_cpu->c_bsp_init)
718 729 this_cpu->c_bsp_init(c);
719 730 }
... ... @@ -798,8 +809,6 @@
798 809 c->phys_proc_id = c->initial_apicid;
799 810 }
800 811  
801   - setup_smep(c);
802   -
803 812 get_model_name(c); /* Default name */
804 813  
805 814 detect_nopl(c);
... ... @@ -864,6 +873,10 @@
864 873 /* Disable the PN if appropriate */
865 874 squash_the_stupid_serial_number(c);
866 875  
  876 + /* Set up SMEP/SMAP */
  877 + setup_smep(c);
  878 + setup_smap(c);
  879 +
867 880 /*
868 881 * The vendor-specific functions might have changed features.
869 882 * Now we do "generic changes."
... ... @@ -1114,7 +1127,8 @@
1114 1127  
1115 1128 /* Flags to clear on syscall */
1116 1129 wrmsrl(MSR_SYSCALL_MASK,
1117   - X86_EFLAGS_TF|X86_EFLAGS_DF|X86_EFLAGS_IF|X86_EFLAGS_IOPL);
  1130 + X86_EFLAGS_TF|X86_EFLAGS_DF|X86_EFLAGS_IF|
  1131 + X86_EFLAGS_IOPL|X86_EFLAGS_AC);
1118 1132 }
1119 1133  
1120 1134 /*
arch/x86/kernel/entry_32.S
... ... @@ -57,6 +57,7 @@
57 57 #include <asm/cpufeature.h>
58 58 #include <asm/alternative-asm.h>
59 59 #include <asm/asm.h>
  60 +#include <asm/smap.h>
60 61  
61 62 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
62 63 #include <linux/elf-em.h>
63 64  
... ... @@ -407,7 +408,9 @@
407 408 */
408 409 cmpl $__PAGE_OFFSET-3,%ebp
409 410 jae syscall_fault
  411 + ASM_STAC
410 412 1: movl (%ebp),%ebp
  413 + ASM_CLAC
411 414 movl %ebp,PT_EBP(%esp)
412 415 _ASM_EXTABLE(1b,syscall_fault)
413 416  
... ... @@ -488,6 +491,7 @@
488 491 # system call handler stub
489 492 ENTRY(system_call)
490 493 RING0_INT_FRAME # can't unwind into user space anyway
  494 + ASM_CLAC
491 495 pushl_cfi %eax # save orig_eax
492 496 SAVE_ALL
493 497 GET_THREAD_INFO(%ebp)
... ... @@ -670,6 +674,7 @@
670 674  
671 675 RING0_INT_FRAME # can't unwind into user space anyway
672 676 syscall_fault:
  677 + ASM_CLAC
673 678 GET_THREAD_INFO(%ebp)
674 679 movl $-EFAULT,PT_EAX(%esp)
675 680 jmp resume_userspace
... ... @@ -825,6 +830,7 @@
825 830 */
826 831 .p2align CONFIG_X86_L1_CACHE_SHIFT
827 832 common_interrupt:
  833 + ASM_CLAC
828 834 addl $-0x80,(%esp) /* Adjust vector into the [-256,-1] range */
829 835 SAVE_ALL
830 836 TRACE_IRQS_OFF
... ... @@ -841,6 +847,7 @@
841 847 #define BUILD_INTERRUPT3(name, nr, fn) \
842 848 ENTRY(name) \
843 849 RING0_INT_FRAME; \
  850 + ASM_CLAC; \
844 851 pushl_cfi $~(nr); \
845 852 SAVE_ALL; \
846 853 TRACE_IRQS_OFF \
... ... @@ -857,6 +864,7 @@
857 864  
858 865 ENTRY(coprocessor_error)
859 866 RING0_INT_FRAME
  867 + ASM_CLAC
860 868 pushl_cfi $0
861 869 pushl_cfi $do_coprocessor_error
862 870 jmp error_code
... ... @@ -865,6 +873,7 @@
865 873  
866 874 ENTRY(simd_coprocessor_error)
867 875 RING0_INT_FRAME
  876 + ASM_CLAC
868 877 pushl_cfi $0
869 878 #ifdef CONFIG_X86_INVD_BUG
870 879 /* AMD 486 bug: invd from userspace calls exception 19 instead of #GP */
... ... @@ -886,6 +895,7 @@
886 895  
887 896 ENTRY(device_not_available)
888 897 RING0_INT_FRAME
  898 + ASM_CLAC
889 899 pushl_cfi $-1 # mark this as an int
890 900 pushl_cfi $do_device_not_available
891 901 jmp error_code
... ... @@ -906,6 +916,7 @@
906 916  
907 917 ENTRY(overflow)
908 918 RING0_INT_FRAME
  919 + ASM_CLAC
909 920 pushl_cfi $0
910 921 pushl_cfi $do_overflow
911 922 jmp error_code
... ... @@ -914,6 +925,7 @@
914 925  
915 926 ENTRY(bounds)
916 927 RING0_INT_FRAME
  928 + ASM_CLAC
917 929 pushl_cfi $0
918 930 pushl_cfi $do_bounds
919 931 jmp error_code
... ... @@ -922,6 +934,7 @@
922 934  
923 935 ENTRY(invalid_op)
924 936 RING0_INT_FRAME
  937 + ASM_CLAC
925 938 pushl_cfi $0
926 939 pushl_cfi $do_invalid_op
927 940 jmp error_code
... ... @@ -930,6 +943,7 @@
930 943  
931 944 ENTRY(coprocessor_segment_overrun)
932 945 RING0_INT_FRAME
  946 + ASM_CLAC
933 947 pushl_cfi $0
934 948 pushl_cfi $do_coprocessor_segment_overrun
935 949 jmp error_code
... ... @@ -938,6 +952,7 @@
938 952  
939 953 ENTRY(invalid_TSS)
940 954 RING0_EC_FRAME
  955 + ASM_CLAC
941 956 pushl_cfi $do_invalid_TSS
942 957 jmp error_code
943 958 CFI_ENDPROC
... ... @@ -945,6 +960,7 @@
945 960  
946 961 ENTRY(segment_not_present)
947 962 RING0_EC_FRAME
  963 + ASM_CLAC
948 964 pushl_cfi $do_segment_not_present
949 965 jmp error_code
950 966 CFI_ENDPROC
... ... @@ -952,6 +968,7 @@
952 968  
953 969 ENTRY(stack_segment)
954 970 RING0_EC_FRAME
  971 + ASM_CLAC
955 972 pushl_cfi $do_stack_segment
956 973 jmp error_code
957 974 CFI_ENDPROC
... ... @@ -959,6 +976,7 @@
959 976  
960 977 ENTRY(alignment_check)
961 978 RING0_EC_FRAME
  979 + ASM_CLAC
962 980 pushl_cfi $do_alignment_check
963 981 jmp error_code
964 982 CFI_ENDPROC
... ... @@ -966,6 +984,7 @@
966 984  
967 985 ENTRY(divide_error)
968 986 RING0_INT_FRAME
  987 + ASM_CLAC
969 988 pushl_cfi $0 # no error code
970 989 pushl_cfi $do_divide_error
971 990 jmp error_code
... ... @@ -975,6 +994,7 @@
975 994 #ifdef CONFIG_X86_MCE
976 995 ENTRY(machine_check)
977 996 RING0_INT_FRAME
  997 + ASM_CLAC
978 998 pushl_cfi $0
979 999 pushl_cfi machine_check_vector
980 1000 jmp error_code
... ... @@ -984,6 +1004,7 @@
984 1004  
985 1005 ENTRY(spurious_interrupt_bug)
986 1006 RING0_INT_FRAME
  1007 + ASM_CLAC
987 1008 pushl_cfi $0
988 1009 pushl_cfi $do_spurious_interrupt_bug
989 1010 jmp error_code
... ... @@ -1273,6 +1294,7 @@
1273 1294  
1274 1295 ENTRY(page_fault)
1275 1296 RING0_EC_FRAME
  1297 + ASM_CLAC
1276 1298 pushl_cfi $do_page_fault
1277 1299 ALIGN
1278 1300 error_code:
... ... @@ -1345,6 +1367,7 @@
1345 1367  
1346 1368 ENTRY(debug)
1347 1369 RING0_INT_FRAME
  1370 + ASM_CLAC
1348 1371 cmpl $ia32_sysenter_target,(%esp)
1349 1372 jne debug_stack_correct
1350 1373 FIX_STACK 12, debug_stack_correct, debug_esp_fix_insn
... ... @@ -1369,6 +1392,7 @@
1369 1392 */
1370 1393 ENTRY(nmi)
1371 1394 RING0_INT_FRAME
  1395 + ASM_CLAC
1372 1396 pushl_cfi %eax
1373 1397 movl %ss, %eax
1374 1398 cmpw $__ESPFIX_SS, %ax
... ... @@ -1439,6 +1463,7 @@
1439 1463  
1440 1464 ENTRY(int3)
1441 1465 RING0_INT_FRAME
  1466 + ASM_CLAC
1442 1467 pushl_cfi $-1 # mark this as an int
1443 1468 SAVE_ALL
1444 1469 TRACE_IRQS_OFF
... ... @@ -1459,6 +1484,7 @@
1459 1484 #ifdef CONFIG_KVM_GUEST
1460 1485 ENTRY(async_page_fault)
1461 1486 RING0_EC_FRAME
  1487 + ASM_CLAC
1462 1488 pushl_cfi $do_async_page_fault
1463 1489 jmp error_code
1464 1490 CFI_ENDPROC
arch/x86/kernel/entry_64.S
... ... @@ -57,6 +57,7 @@
57 57 #include <asm/percpu.h>
58 58 #include <asm/asm.h>
59 59 #include <asm/rcu.h>
  60 +#include <asm/smap.h>
60 61 #include <linux/err.h>
61 62  
62 63 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
... ... @@ -568,7 +569,8 @@
568 569 * System call entry. Up to 6 arguments in registers are supported.
569 570 *
570 571 * SYSCALL does not save anything on the stack and does not change the
571   - * stack pointer.
  572 + * stack pointer. However, it does mask the flags register for us, so
  573 + * CLD and CLAC are not needed.
572 574 */
573 575  
574 576 /*
... ... @@ -987,6 +989,7 @@
987 989 */
988 990 .p2align CONFIG_X86_L1_CACHE_SHIFT
989 991 common_interrupt:
  992 + ASM_CLAC
990 993 XCPT_FRAME
991 994 addq $-0x80,(%rsp) /* Adjust vector to [-256,-1] range */
992 995 interrupt do_IRQ
... ... @@ -1126,6 +1129,7 @@
1126 1129 */
1127 1130 .macro apicinterrupt num sym do_sym
1128 1131 ENTRY(\sym)
  1132 + ASM_CLAC
1129 1133 INTR_FRAME
1130 1134 pushq_cfi $~(\num)
1131 1135 .Lcommon_\sym:
... ... @@ -1180,6 +1184,7 @@
1180 1184 */
1181 1185 .macro zeroentry sym do_sym
1182 1186 ENTRY(\sym)
  1187 + ASM_CLAC
1183 1188 INTR_FRAME
1184 1189 PARAVIRT_ADJUST_EXCEPTION_FRAME
1185 1190 pushq_cfi $-1 /* ORIG_RAX: no syscall to restart */
... ... @@ -1197,6 +1202,7 @@
1197 1202  
1198 1203 .macro paranoidzeroentry sym do_sym
1199 1204 ENTRY(\sym)
  1205 + ASM_CLAC
1200 1206 INTR_FRAME
1201 1207 PARAVIRT_ADJUST_EXCEPTION_FRAME
1202 1208 pushq_cfi $-1 /* ORIG_RAX: no syscall to restart */
... ... @@ -1215,6 +1221,7 @@
1215 1221 #define INIT_TSS_IST(x) PER_CPU_VAR(init_tss) + (TSS_ist + ((x) - 1) * 8)
1216 1222 .macro paranoidzeroentry_ist sym do_sym ist
1217 1223 ENTRY(\sym)
  1224 + ASM_CLAC
1218 1225 INTR_FRAME
1219 1226 PARAVIRT_ADJUST_EXCEPTION_FRAME
1220 1227 pushq_cfi $-1 /* ORIG_RAX: no syscall to restart */
... ... @@ -1234,6 +1241,7 @@
1234 1241  
1235 1242 .macro errorentry sym do_sym
1236 1243 ENTRY(\sym)
  1244 + ASM_CLAC
1237 1245 XCPT_FRAME
1238 1246 PARAVIRT_ADJUST_EXCEPTION_FRAME
1239 1247 subq $ORIG_RAX-R15, %rsp
... ... @@ -1252,6 +1260,7 @@
1252 1260 /* error code is on the stack already */
1253 1261 .macro paranoiderrorentry sym do_sym
1254 1262 ENTRY(\sym)
  1263 + ASM_CLAC
1255 1264 XCPT_FRAME
1256 1265 PARAVIRT_ADJUST_EXCEPTION_FRAME
1257 1266 subq $ORIG_RAX-R15, %rsp
arch/x86/kernel/head_32.S
... ... @@ -287,27 +287,28 @@
287 287 leal -__PAGE_OFFSET(%ecx),%esp
288 288  
289 289 default_entry:
290   -
291 290 /*
292 291 * New page tables may be in 4Mbyte page mode and may
293 292 * be using the global pages.
294 293 *
295 294 * NOTE! If we are on a 486 we may have no cr4 at all!
296   - * So we do not try to touch it unless we really have
297   - * some bits in it to set. This won't work if the BSP
298   - * implements cr4 but this AP does not -- very unlikely
299   - * but be warned! The same applies to the pse feature
300   - * if not equally supported. --macro
301   - *
302   - * NOTE! We have to correct for the fact that we're
303   - * not yet offset PAGE_OFFSET..
  295 + * Specifically, cr4 exists if and only if CPUID exists,
  296 + * which in turn exists if and only if EFLAGS.ID exists.
304 297 */
305   -#define cr4_bits pa(mmu_cr4_features)
306   - movl cr4_bits,%edx
307   - andl %edx,%edx
308   - jz 6f
309   - movl %cr4,%eax # Turn on paging options (PSE,PAE,..)
310   - orl %edx,%eax
  298 + movl $X86_EFLAGS_ID,%ecx
  299 + pushl %ecx
  300 + popfl
  301 + pushfl
  302 + popl %eax
  303 + pushl $0
  304 + popfl
  305 + pushfl
  306 + popl %edx
  307 + xorl %edx,%eax
  308 + testl %ecx,%eax
  309 + jz 6f # No ID flag = no CPUID = no CR4
  310 +
  311 + movl pa(mmu_cr4_features),%eax
311 312 movl %eax,%cr4
312 313  
313 314 testb $X86_CR4_PAE, %al # check if PAE is enabled
arch/x86/kernel/signal.c
... ... @@ -114,11 +114,12 @@
114 114 regs->orig_ax = -1; /* disable syscall checks */
115 115  
116 116 get_user_ex(buf, &sc->fpstate);
117   - err |= restore_xstate_sig(buf, config_enabled(CONFIG_X86_32));
118 117  
119 118 get_user_ex(*pax, &sc->ax);
120 119 } get_user_catch(err);
121 120  
  121 + err |= restore_xstate_sig(buf, config_enabled(CONFIG_X86_32));
  122 +
122 123 return err;
123 124 }
124 125  
... ... @@ -355,7 +356,6 @@
355 356 put_user_ex(sig, &frame->sig);
356 357 put_user_ex(&frame->info, &frame->pinfo);
357 358 put_user_ex(&frame->uc, &frame->puc);
358   - err |= copy_siginfo_to_user(&frame->info, info);
359 359  
360 360 /* Create the ucontext. */
361 361 if (cpu_has_xsave)
... ... @@ -367,9 +367,6 @@
367 367 put_user_ex(sas_ss_flags(regs->sp),
368 368 &frame->uc.uc_stack.ss_flags);
369 369 put_user_ex(current->sas_ss_size, &frame->uc.uc_stack.ss_size);
370   - err |= setup_sigcontext(&frame->uc.uc_mcontext, fpstate,
371   - regs, set->sig[0]);
372   - err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
373 370  
374 371 /* Set up to return from userspace. */
375 372 restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
... ... @@ -386,6 +383,11 @@
386 383 */
387 384 put_user_ex(*((u64 *)&rt_retcode), (u64 *)frame->retcode);
388 385 } put_user_catch(err);
  386 +
  387 + err |= copy_siginfo_to_user(&frame->info, info);
  388 + err |= setup_sigcontext(&frame->uc.uc_mcontext, fpstate,
  389 + regs, set->sig[0]);
  390 + err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
389 391  
390 392 if (err)
391 393 return -EFAULT;
... ... @@ -434,8 +436,6 @@
434 436 put_user_ex(sas_ss_flags(regs->sp),
435 437 &frame->uc.uc_stack.ss_flags);
436 438 put_user_ex(me->sas_ss_size, &frame->uc.uc_stack.ss_size);
437   - err |= setup_sigcontext(&frame->uc.uc_mcontext, fp, regs, set->sig[0]);
438   - err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
439 439  
440 440 /* Set up to return from userspace. If provided, use a stub
441 441 already in userspace. */
... ... @@ -448,6 +448,9 @@
448 448 }
449 449 } put_user_catch(err);
450 450  
  451 + err |= setup_sigcontext(&frame->uc.uc_mcontext, fp, regs, set->sig[0]);
  452 + err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
  453 +
451 454 if (err)
452 455 return -EFAULT;
453 456  
... ... @@ -504,9 +507,6 @@
504 507 &frame->uc.uc_stack.ss_flags);
505 508 put_user_ex(current->sas_ss_size, &frame->uc.uc_stack.ss_size);
506 509 put_user_ex(0, &frame->uc.uc__pad0);
507   - err |= setup_sigcontext(&frame->uc.uc_mcontext, fpstate,
508   - regs, set->sig[0]);
509   - err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
510 510  
511 511 if (ka->sa.sa_flags & SA_RESTORER) {
512 512 restorer = ka->sa.sa_restorer;
... ... @@ -517,6 +517,10 @@
517 517 }
518 518 put_user_ex(restorer, &frame->pretcode);
519 519 } put_user_catch(err);
  520 +
  521 + err |= setup_sigcontext(&frame->uc.uc_mcontext, fpstate,
  522 + regs, set->sig[0]);
  523 + err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
520 524  
521 525 if (err)
522 526 return -EFAULT;
arch/x86/kernel/xsave.c
... ... @@ -315,7 +315,7 @@
315 315 if ((unsigned long)buf % 64 || fx_only) {
316 316 u64 init_bv = pcntxt_mask & ~XSTATE_FPSSE;
317 317 xrstor_state(init_xstate_buf, init_bv);
318   - return fxrstor_checking((__force void *) buf);
  318 + return fxrstor_user(buf);
319 319 } else {
320 320 u64 init_bv = pcntxt_mask & ~xbv;
321 321 if (unlikely(init_bv))
322 322  
... ... @@ -323,9 +323,9 @@
323 323 return xrestore_user(buf, xbv);
324 324 }
325 325 } else if (use_fxsr()) {
326   - return fxrstor_checking((__force void *) buf);
  326 + return fxrstor_user(buf);
327 327 } else
328   - return frstor_checking((__force void *) buf);
  328 + return frstor_user(buf);
329 329 }
330 330  
331 331 int __restore_xstate_sig(void __user *buf, void __user *buf_fx, int size)
arch/x86/lib/copy_user_64.S
... ... @@ -17,6 +17,7 @@
17 17 #include <asm/cpufeature.h>
18 18 #include <asm/alternative-asm.h>
19 19 #include <asm/asm.h>
  20 +#include <asm/smap.h>
20 21  
21 22 /*
22 23 * By placing feature2 after feature1 in altinstructions section, we logically
... ... @@ -130,6 +131,7 @@
130 131 */
131 132 ENTRY(copy_user_generic_unrolled)
132 133 CFI_STARTPROC
  134 + ASM_STAC
133 135 cmpl $8,%edx
134 136 jb 20f /* less then 8 bytes, go to byte copy loop */
135 137 ALIGN_DESTINATION
... ... @@ -177,6 +179,7 @@
177 179 decl %ecx
178 180 jnz 21b
179 181 23: xor %eax,%eax
  182 + ASM_CLAC
180 183 ret
181 184  
182 185 .section .fixup,"ax"
... ... @@ -232,6 +235,7 @@
232 235 */
233 236 ENTRY(copy_user_generic_string)
234 237 CFI_STARTPROC
  238 + ASM_STAC
235 239 andl %edx,%edx
236 240 jz 4f
237 241 cmpl $8,%edx
... ... @@ -246,6 +250,7 @@
246 250 3: rep
247 251 movsb
248 252 4: xorl %eax,%eax
  253 + ASM_CLAC
249 254 ret
250 255  
251 256 .section .fixup,"ax"
252 257  
... ... @@ -273,12 +278,14 @@
273 278 */
274 279 ENTRY(copy_user_enhanced_fast_string)
275 280 CFI_STARTPROC
  281 + ASM_STAC
276 282 andl %edx,%edx
277 283 jz 2f
278 284 movl %edx,%ecx
279 285 1: rep
280 286 movsb
281 287 2: xorl %eax,%eax
  288 + ASM_CLAC
282 289 ret
283 290  
284 291 .section .fixup,"ax"
arch/x86/lib/copy_user_nocache_64.S
... ... @@ -15,6 +15,7 @@
15 15 #include <asm/asm-offsets.h>
16 16 #include <asm/thread_info.h>
17 17 #include <asm/asm.h>
  18 +#include <asm/smap.h>
18 19  
19 20 .macro ALIGN_DESTINATION
20 21 #ifdef FIX_ALIGNMENT
... ... @@ -48,6 +49,7 @@
48 49 */
49 50 ENTRY(__copy_user_nocache)
50 51 CFI_STARTPROC
  52 + ASM_STAC
51 53 cmpl $8,%edx
52 54 jb 20f /* less then 8 bytes, go to byte copy loop */
53 55 ALIGN_DESTINATION
... ... @@ -95,6 +97,7 @@
95 97 decl %ecx
96 98 jnz 21b
97 99 23: xorl %eax,%eax
  100 + ASM_CLAC
98 101 sfence
99 102 ret
100 103  
arch/x86/lib/getuser.S
... ... @@ -33,6 +33,7 @@
33 33 #include <asm/asm-offsets.h>
34 34 #include <asm/thread_info.h>
35 35 #include <asm/asm.h>
  36 +#include <asm/smap.h>
36 37  
37 38 .text
38 39 ENTRY(__get_user_1)
39 40  
... ... @@ -40,8 +41,10 @@
40 41 GET_THREAD_INFO(%_ASM_DX)
41 42 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
42 43 jae bad_get_user
  44 + ASM_STAC
43 45 1: movzb (%_ASM_AX),%edx
44 46 xor %eax,%eax
  47 + ASM_CLAC
45 48 ret
46 49 CFI_ENDPROC
47 50 ENDPROC(__get_user_1)
48 51  
... ... @@ -53,8 +56,10 @@
53 56 GET_THREAD_INFO(%_ASM_DX)
54 57 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
55 58 jae bad_get_user
  59 + ASM_STAC
56 60 2: movzwl -1(%_ASM_AX),%edx
57 61 xor %eax,%eax
  62 + ASM_CLAC
58 63 ret
59 64 CFI_ENDPROC
60 65 ENDPROC(__get_user_2)
61 66  
... ... @@ -66,8 +71,10 @@
66 71 GET_THREAD_INFO(%_ASM_DX)
67 72 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
68 73 jae bad_get_user
  74 + ASM_STAC
69 75 3: mov -3(%_ASM_AX),%edx
70 76 xor %eax,%eax
  77 + ASM_CLAC
71 78 ret
72 79 CFI_ENDPROC
73 80 ENDPROC(__get_user_4)
74 81  
... ... @@ -80,8 +87,10 @@
80 87 GET_THREAD_INFO(%_ASM_DX)
81 88 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
82 89 jae bad_get_user
  90 + ASM_STAC
83 91 4: movq -7(%_ASM_AX),%_ASM_DX
84 92 xor %eax,%eax
  93 + ASM_CLAC
85 94 ret
86 95 CFI_ENDPROC
87 96 ENDPROC(__get_user_8)
... ... @@ -91,6 +100,7 @@
91 100 CFI_STARTPROC
92 101 xor %edx,%edx
93 102 mov $(-EFAULT),%_ASM_AX
  103 + ASM_CLAC
94 104 ret
95 105 CFI_ENDPROC
96 106 END(bad_get_user)
arch/x86/lib/putuser.S
... ... @@ -15,6 +15,7 @@
15 15 #include <asm/thread_info.h>
16 16 #include <asm/errno.h>
17 17 #include <asm/asm.h>
  18 +#include <asm/smap.h>
18 19  
19 20  
20 21 /*
... ... @@ -31,7 +32,8 @@
31 32  
32 33 #define ENTER CFI_STARTPROC ; \
33 34 GET_THREAD_INFO(%_ASM_BX)
34   -#define EXIT ret ; \
  35 +#define EXIT ASM_CLAC ; \
  36 + ret ; \
35 37 CFI_ENDPROC
36 38  
37 39 .text
... ... @@ -39,6 +41,7 @@
39 41 ENTER
40 42 cmp TI_addr_limit(%_ASM_BX),%_ASM_CX
41 43 jae bad_put_user
  44 + ASM_STAC
42 45 1: movb %al,(%_ASM_CX)
43 46 xor %eax,%eax
44 47 EXIT
... ... @@ -50,6 +53,7 @@
50 53 sub $1,%_ASM_BX
51 54 cmp %_ASM_BX,%_ASM_CX
52 55 jae bad_put_user
  56 + ASM_STAC
53 57 2: movw %ax,(%_ASM_CX)
54 58 xor %eax,%eax
55 59 EXIT
... ... @@ -61,6 +65,7 @@
61 65 sub $3,%_ASM_BX
62 66 cmp %_ASM_BX,%_ASM_CX
63 67 jae bad_put_user
  68 + ASM_STAC
64 69 3: movl %eax,(%_ASM_CX)
65 70 xor %eax,%eax
66 71 EXIT
... ... @@ -72,6 +77,7 @@
72 77 sub $7,%_ASM_BX
73 78 cmp %_ASM_BX,%_ASM_CX
74 79 jae bad_put_user
  80 + ASM_STAC
75 81 4: mov %_ASM_AX,(%_ASM_CX)
76 82 #ifdef CONFIG_X86_32
77 83 5: movl %edx,4(%_ASM_CX)
arch/x86/lib/usercopy_32.c
... ... @@ -42,10 +42,11 @@
42 42 int __d0; \
43 43 might_fault(); \
44 44 __asm__ __volatile__( \
  45 + ASM_STAC "\n" \
45 46 "0: rep; stosl\n" \
46 47 " movl %2,%0\n" \
47 48 "1: rep; stosb\n" \
48   - "2:\n" \
  49 + "2: " ASM_CLAC "\n" \
49 50 ".section .fixup,\"ax\"\n" \
50 51 "3: lea 0(%2,%0,4),%0\n" \
51 52 " jmp 2b\n" \
52 53  
... ... @@ -626,10 +627,12 @@
626 627 return n;
627 628 }
628 629 #endif
  630 + stac();
629 631 if (movsl_is_ok(to, from, n))
630 632 __copy_user(to, from, n);
631 633 else
632 634 n = __copy_user_intel(to, from, n);
  635 + clac();
633 636 return n;
634 637 }
635 638 EXPORT_SYMBOL(__copy_to_user_ll);
636 639  
... ... @@ -637,10 +640,12 @@
637 640 unsigned long __copy_from_user_ll(void *to, const void __user *from,
638 641 unsigned long n)
639 642 {
  643 + stac();
640 644 if (movsl_is_ok(to, from, n))
641 645 __copy_user_zeroing(to, from, n);
642 646 else
643 647 n = __copy_user_zeroing_intel(to, from, n);
  648 + clac();
644 649 return n;
645 650 }
646 651 EXPORT_SYMBOL(__copy_from_user_ll);
647 652  
... ... @@ -648,11 +653,13 @@
648 653 unsigned long __copy_from_user_ll_nozero(void *to, const void __user *from,
649 654 unsigned long n)
650 655 {
  656 + stac();
651 657 if (movsl_is_ok(to, from, n))
652 658 __copy_user(to, from, n);
653 659 else
654 660 n = __copy_user_intel((void __user *)to,
655 661 (const void *)from, n);
  662 + clac();
656 663 return n;
657 664 }
658 665 EXPORT_SYMBOL(__copy_from_user_ll_nozero);
... ... @@ -660,6 +667,7 @@
660 667 unsigned long __copy_from_user_ll_nocache(void *to, const void __user *from,
661 668 unsigned long n)
662 669 {
  670 + stac();
663 671 #ifdef CONFIG_X86_INTEL_USERCOPY
664 672 if (n > 64 && cpu_has_xmm2)
665 673 n = __copy_user_zeroing_intel_nocache(to, from, n);
... ... @@ -668,6 +676,7 @@
668 676 #else
669 677 __copy_user_zeroing(to, from, n);
670 678 #endif
  679 + clac();
671 680 return n;
672 681 }
673 682 EXPORT_SYMBOL(__copy_from_user_ll_nocache);
... ... @@ -675,6 +684,7 @@
675 684 unsigned long __copy_from_user_ll_nocache_nozero(void *to, const void __user *from,
676 685 unsigned long n)
677 686 {
  687 + stac();
678 688 #ifdef CONFIG_X86_INTEL_USERCOPY
679 689 if (n > 64 && cpu_has_xmm2)
680 690 n = __copy_user_intel_nocache(to, from, n);
... ... @@ -683,6 +693,7 @@
683 693 #else
684 694 __copy_user(to, from, n);
685 695 #endif
  696 + clac();
686 697 return n;
687 698 }
688 699 EXPORT_SYMBOL(__copy_from_user_ll_nocache_nozero);
arch/x86/lib/usercopy_64.c
... ... @@ -18,6 +18,7 @@
18 18 might_fault();
19 19 /* no memory constraint because it doesn't change any memory gcc knows
20 20 about */
  21 + stac();
21 22 asm volatile(
22 23 " testq %[size8],%[size8]\n"
23 24 " jz 4f\n"
... ... @@ -40,6 +41,7 @@
40 41 : [size8] "=&c"(size), [dst] "=&D" (__d0)
41 42 : [size1] "r"(size & 7), "[size8]" (size / 8), "[dst]"(addr),
42 43 [zero] "r" (0UL), [eight] "r" (8UL));
  44 + clac();
43 45 return size;
44 46 }
45 47 EXPORT_SYMBOL(__clear_user);
... ... @@ -82,6 +84,7 @@
82 84 for (c = 0, zero_len = len; zerorest && zero_len; --zero_len)
83 85 if (__put_user_nocheck(c, to++, sizeof(char)))
84 86 break;
  87 + clac();
85 88 return len;
86 89 }
... ... @@ -996,6 +996,17 @@
996 996 return address >= TASK_SIZE_MAX;
997 997 }
998 998  
  999 +static inline bool smap_violation(int error_code, struct pt_regs *regs)
  1000 +{
  1001 + if (error_code & PF_USER)
  1002 + return false;
  1003 +
  1004 + if (!user_mode_vm(regs) && (regs->flags & X86_EFLAGS_AC))
  1005 + return false;
  1006 +
  1007 + return true;
  1008 +}
  1009 +
999 1010 /*
1000 1011 * This routine handles page faults. It determines the address,
1001 1012 * and the problem, and then passes it off to one of the appropriate
... ... @@ -1088,6 +1099,13 @@
1088 1099  
1089 1100 if (unlikely(error_code & PF_RSVD))
1090 1101 pgtable_bad(regs, error_code, address);
  1102 +
  1103 + if (static_cpu_has(X86_FEATURE_SMAP)) {
  1104 + if (unlikely(smap_violation(error_code, regs))) {
  1105 + bad_area_nosemaphore(regs, error_code, address);
  1106 + return;
  1107 + }
  1108 + }
1091 1109  
1092 1110 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
1093 1111  
arch/x86/mm/init_32.c
... ... @@ -709,7 +709,7 @@
709 709 "Checking if this processor honours the WP bit even in supervisor mode...");
710 710  
711 711 /* Any page-aligned address will do, the test is non-destructive */
712   - __set_fixmap(FIX_WP_TEST, __pa(&swapper_pg_dir), PAGE_READONLY);
  712 + __set_fixmap(FIX_WP_TEST, __pa(&swapper_pg_dir), PAGE_KERNEL_RO);
713 713 boot_cpu_data.wp_works_ok = do_test_wp_bit();
714 714 clear_fixmap(FIX_WP_TEST);
715 715  
arch/x86/realmode/rm/wakeup.h
... ... @@ -36,6 +36,8 @@
36 36  
37 37 /* Wakeup behavior bits */
38 38 #define WAKEUP_BEHAVIOR_RESTORE_MISC_ENABLE 0
  39 +#define WAKEUP_BEHAVIOR_RESTORE_CR4 1
  40 +#define WAKEUP_BEHAVIOR_RESTORE_EFER 2
39 41  
40 42 #endif /* ARCH_X86_KERNEL_ACPI_RM_WAKEUP_H */
arch/x86/realmode/rm/wakeup_asm.S
... ... @@ -74,9 +74,18 @@
74 74  
75 75 lidtl wakeup_idt
76 76  
77   - /* Clear the EFLAGS */
78   - pushl $0
  77 + /* Clear the EFLAGS but remember if we have EFLAGS.ID */
  78 + movl $X86_EFLAGS_ID, %ecx
  79 + pushl %ecx
79 80 popfl
  81 + pushfl
  82 + popl %edi
  83 + pushl $0
  84 + popfl
  85 + pushfl
  86 + popl %edx
  87 + xorl %edx, %edi
  88 + andl %ecx, %edi /* %edi is zero iff CPUID & %cr4 are missing */
80 89  
81 90 /* Check header signature... */
82 91 movl signature, %eax
... ... @@ -93,8 +102,8 @@
93 102  
94 103 /* Restore MISC_ENABLE before entering protected mode, in case
95 104 BIOS decided to clear XD_DISABLE during S3. */
96   - movl pmode_behavior, %eax
97   - btl $WAKEUP_BEHAVIOR_RESTORE_MISC_ENABLE, %eax
  105 + movl pmode_behavior, %edi
  106 + btl $WAKEUP_BEHAVIOR_RESTORE_MISC_ENABLE, %edi
98 107 jnc 1f
99 108  
100 109 movl pmode_misc_en, %eax
101 110  
102 111  
... ... @@ -110,15 +119,15 @@
110 119 movl pmode_cr3, %eax
111 120 movl %eax, %cr3
112 121  
113   - movl pmode_cr4, %ecx
114   - jecxz 1f
115   - movl %ecx, %cr4
  122 + btl $WAKEUP_BEHAVIOR_RESTORE_CR4, %edi
  123 + jz 1f
  124 + movl pmode_cr4, %eax
  125 + movl %eax, %cr4
116 126 1:
  127 + btl $WAKEUP_BEHAVIOR_RESTORE_EFER, %edi
  128 + jz 1f
117 129 movl pmode_efer, %eax
118 130 movl pmode_efer + 4, %edx
119   - movl %eax, %ecx
120   - orl %edx, %ecx
121   - jz 1f
122 131 movl $MSR_EFER, %ecx
123 132 wrmsr
124 133 1: