Commit 63bcff2a307b9bcc712a8251eb27df8b2e117967

Authored by H. Peter Anvin
1 parent a052858fab

x86, smap: Add STAC and CLAC instructions to control user space access

When Supervisor Mode Access Prevention (SMAP) is enabled, access to
userspace from the kernel is controlled by the AC flag.  To make the
performance of manipulating that flag acceptable, there are two new
instructions, STAC and CLAC, to set and clear it.

This patch adds those instructions, via alternative(), when the SMAP
feature is enabled.  It also adds X86_EFLAGS_AC unconditionally to the
SYSCALL entry mask; there is simply no reason to make that one
conditional.

Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
Link: http://lkml.kernel.org/r/1348256595-29119-9-git-send-email-hpa@linux.intel.com

Showing 14 changed files with 106 additions and 32 deletions Side-by-side Diff

arch/x86/ia32/ia32entry.S
... ... @@ -14,6 +14,7 @@
14 14 #include <asm/segment.h>
15 15 #include <asm/irqflags.h>
16 16 #include <asm/asm.h>
  17 +#include <asm/smap.h>
17 18 #include <linux/linkage.h>
18 19 #include <linux/err.h>
19 20  
20 21  
... ... @@ -146,8 +147,10 @@
146 147 SAVE_ARGS 0,1,0
147 148 /* no need to do an access_ok check here because rbp has been
148 149 32bit zero extended */
  150 + ASM_STAC
149 151 1: movl (%rbp),%ebp
150 152 _ASM_EXTABLE(1b,ia32_badarg)
  153 + ASM_CLAC
151 154 orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
152 155 testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
153 156 CFI_REMEMBER_STATE
154 157  
... ... @@ -301,8 +304,10 @@
301 304 /* no need to do an access_ok check here because r8 has been
302 305 32bit zero extended */
303 306 /* hardware stack frame is complete now */
  307 + ASM_STAC
304 308 1: movl (%r8),%r9d
305 309 _ASM_EXTABLE(1b,ia32_badarg)
  310 + ASM_CLAC
306 311 orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
307 312 testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
308 313 CFI_REMEMBER_STATE
... ... @@ -365,6 +370,7 @@
365 370 END(ia32_cstar_target)
366 371  
367 372 ia32_badarg:
  373 + ASM_CLAC
368 374 movq $-EFAULT,%rax
369 375 jmp ia32_sysret
370 376 CFI_ENDPROC
arch/x86/include/asm/fpu-internal.h
... ... @@ -126,8 +126,9 @@
126 126  
127 127 /* See comment in fxsave() below. */
128 128 #ifdef CONFIG_AS_FXSAVEQ
129   - asm volatile("1: fxsaveq %[fx]\n\t"
130   - "2:\n"
  129 + asm volatile(ASM_STAC "\n"
  130 + "1: fxsaveq %[fx]\n\t"
  131 + "2: " ASM_CLAC "\n"
131 132 ".section .fixup,\"ax\"\n"
132 133 "3: movl $-1,%[err]\n"
133 134 " jmp 2b\n"
... ... @@ -136,8 +137,9 @@
136 137 : [err] "=r" (err), [fx] "=m" (*fx)
137 138 : "0" (0));
138 139 #else
139   - asm volatile("1: rex64/fxsave (%[fx])\n\t"
140   - "2:\n"
  140 + asm volatile(ASM_STAC "\n"
  141 + "1: rex64/fxsave (%[fx])\n\t"
  142 + "2: " ASM_CLAC "\n"
141 143 ".section .fixup,\"ax\"\n"
142 144 "3: movl $-1,%[err]\n"
143 145 " jmp 2b\n"
arch/x86/include/asm/futex.h
... ... @@ -9,10 +9,13 @@
9 9 #include <asm/asm.h>
10 10 #include <asm/errno.h>
11 11 #include <asm/processor.h>
  12 +#include <asm/smap.h>
12 13  
13 14 #define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \
14   - asm volatile("1:\t" insn "\n" \
15   - "2:\t.section .fixup,\"ax\"\n" \
  15 + asm volatile("\t" ASM_STAC "\n" \
  16 + "1:\t" insn "\n" \
  17 + "2:\t" ASM_CLAC "\n" \
  18 + "\t.section .fixup,\"ax\"\n" \
16 19 "3:\tmov\t%3, %1\n" \
17 20 "\tjmp\t2b\n" \
18 21 "\t.previous\n" \
19 22  
... ... @@ -21,12 +24,14 @@
21 24 : "i" (-EFAULT), "0" (oparg), "1" (0))
22 25  
23 26 #define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \
24   - asm volatile("1:\tmovl %2, %0\n" \
  27 + asm volatile("\t" ASM_STAC "\n" \
  28 + "1:\tmovl %2, %0\n" \
25 29 "\tmovl\t%0, %3\n" \
26 30 "\t" insn "\n" \
27 31 "2:\t" LOCK_PREFIX "cmpxchgl %3, %2\n" \
28 32 "\tjnz\t1b\n" \
29   - "3:\t.section .fixup,\"ax\"\n" \
  33 + "3:\t" ASM_CLAC "\n" \
  34 + "\t.section .fixup,\"ax\"\n" \
30 35 "4:\tmov\t%5, %1\n" \
31 36 "\tjmp\t3b\n" \
32 37 "\t.previous\n" \
... ... @@ -122,8 +127,10 @@
122 127 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
123 128 return -EFAULT;
124 129  
125   - asm volatile("1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n"
126   - "2:\t.section .fixup, \"ax\"\n"
  130 + asm volatile("\t" ASM_STAC "\n"
  131 + "1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n"
  132 + "2:\t" ASM_CLAC "\n"
  133 + "\t.section .fixup, \"ax\"\n"
127 134 "3:\tmov %3, %0\n"
128 135 "\tjmp 2b\n"
129 136 "\t.previous\n"
arch/x86/include/asm/smap.h
... ... @@ -58,13 +58,13 @@
58 58  
59 59 #ifdef CONFIG_X86_SMAP
60 60  
61   -static inline void clac(void)
  61 +static __always_inline void clac(void)
62 62 {
63 63 /* Note: a barrier is implicit in alternative() */
64 64 alternative(ASM_NOP3, __stringify(__ASM_CLAC), X86_FEATURE_SMAP);
65 65 }
66 66  
67   -static inline void stac(void)
  67 +static __always_inline void stac(void)
68 68 {
69 69 /* Note: a barrier is implicit in alternative() */
70 70 alternative(ASM_NOP3, __stringify(__ASM_STAC), X86_FEATURE_SMAP);
arch/x86/include/asm/uaccess.h
... ... @@ -9,6 +9,7 @@
9 9 #include <linux/string.h>
10 10 #include <asm/asm.h>
11 11 #include <asm/page.h>
  12 +#include <asm/smap.h>
12 13  
13 14 #define VERIFY_READ 0
14 15 #define VERIFY_WRITE 1
15 16  
... ... @@ -192,9 +193,10 @@
192 193  
193 194 #ifdef CONFIG_X86_32
194 195 #define __put_user_asm_u64(x, addr, err, errret) \
195   - asm volatile("1: movl %%eax,0(%2)\n" \
  196 + asm volatile(ASM_STAC "\n" \
  197 + "1: movl %%eax,0(%2)\n" \
196 198 "2: movl %%edx,4(%2)\n" \
197   - "3:\n" \
  199 + "3: " ASM_CLAC "\n" \
198 200 ".section .fixup,\"ax\"\n" \
199 201 "4: movl %3,%0\n" \
200 202 " jmp 3b\n" \
201 203  
... ... @@ -205,9 +207,10 @@
205 207 : "A" (x), "r" (addr), "i" (errret), "0" (err))
206 208  
207 209 #define __put_user_asm_ex_u64(x, addr) \
208   - asm volatile("1: movl %%eax,0(%1)\n" \
  210 + asm volatile(ASM_STAC "\n" \
  211 + "1: movl %%eax,0(%1)\n" \
209 212 "2: movl %%edx,4(%1)\n" \
210   - "3:\n" \
  213 + "3: " ASM_CLAC "\n" \
211 214 _ASM_EXTABLE_EX(1b, 2b) \
212 215 _ASM_EXTABLE_EX(2b, 3b) \
213 216 : : "A" (x), "r" (addr))
... ... @@ -379,8 +382,9 @@
379 382 } while (0)
380 383  
381 384 #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \
382   - asm volatile("1: mov"itype" %2,%"rtype"1\n" \
383   - "2:\n" \
  385 + asm volatile(ASM_STAC "\n" \
  386 + "1: mov"itype" %2,%"rtype"1\n" \
  387 + "2: " ASM_CLAC "\n" \
384 388 ".section .fixup,\"ax\"\n" \
385 389 "3: mov %3,%0\n" \
386 390 " xor"itype" %"rtype"1,%"rtype"1\n" \
... ... @@ -412,8 +416,9 @@
412 416 } while (0)
413 417  
414 418 #define __get_user_asm_ex(x, addr, itype, rtype, ltype) \
415   - asm volatile("1: mov"itype" %1,%"rtype"0\n" \
416   - "2:\n" \
  419 + asm volatile(ASM_STAC "\n" \
  420 + "1: mov"itype" %1,%"rtype"0\n" \
  421 + "2: " ASM_CLAC "\n" \
417 422 _ASM_EXTABLE_EX(1b, 2b) \
418 423 : ltype(x) : "m" (__m(addr)))
419 424  
... ... @@ -443,8 +448,9 @@
443 448 * aliasing issues.
444 449 */
445 450 #define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \
446   - asm volatile("1: mov"itype" %"rtype"1,%2\n" \
447   - "2:\n" \
  451 + asm volatile(ASM_STAC "\n" \
  452 + "1: mov"itype" %"rtype"1,%2\n" \
  453 + "2: " ASM_CLAC "\n" \
448 454 ".section .fixup,\"ax\"\n" \
449 455 "3: mov %3,%0\n" \
450 456 " jmp 2b\n" \
... ... @@ -454,8 +460,9 @@
454 460 : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err))
455 461  
456 462 #define __put_user_asm_ex(x, addr, itype, rtype, ltype) \
457   - asm volatile("1: mov"itype" %"rtype"0,%1\n" \
458   - "2:\n" \
  463 + asm volatile(ASM_STAC "\n" \
  464 + "1: mov"itype" %"rtype"0,%1\n" \
  465 + "2: " ASM_CLAC "\n" \
459 466 _ASM_EXTABLE_EX(1b, 2b) \
460 467 : : ltype(x), "m" (__m(addr)))
461 468  
arch/x86/include/asm/xsave.h
... ... @@ -74,8 +74,9 @@
74 74 if (unlikely(err))
75 75 return -EFAULT;
76 76  
77   - __asm__ __volatile__("1: .byte " REX_PREFIX "0x0f,0xae,0x27\n"
78   - "2:\n"
  77 + __asm__ __volatile__(ASM_STAC "\n"
  78 + "1: .byte " REX_PREFIX "0x0f,0xae,0x27\n"
  79 + "2: " ASM_CLAC "\n"
79 80 ".section .fixup,\"ax\"\n"
80 81 "3: movl $-1,%[err]\n"
81 82 " jmp 2b\n"
... ... @@ -97,8 +98,9 @@
97 98 u32 lmask = mask;
98 99 u32 hmask = mask >> 32;
99 100  
100   - __asm__ __volatile__("1: .byte " REX_PREFIX "0x0f,0xae,0x2f\n"
101   - "2:\n"
  101 + __asm__ __volatile__(ASM_STAC "\n"
  102 + "1: .byte " REX_PREFIX "0x0f,0xae,0x2f\n"
  103 + "2: " ASM_CLAC "\n"
102 104 ".section .fixup,\"ax\"\n"
103 105 "3: movl $-1,%[err]\n"
104 106 " jmp 2b\n"
arch/x86/kernel/cpu/common.c
... ... @@ -1113,7 +1113,8 @@
1113 1113  
1114 1114 /* Flags to clear on syscall */
1115 1115 wrmsrl(MSR_SYSCALL_MASK,
1116   - X86_EFLAGS_TF|X86_EFLAGS_DF|X86_EFLAGS_IF|X86_EFLAGS_IOPL);
  1116 + X86_EFLAGS_TF|X86_EFLAGS_DF|X86_EFLAGS_IF|
  1117 + X86_EFLAGS_IOPL|X86_EFLAGS_AC);
1117 1118 }
1118 1119  
1119 1120 unsigned long kernel_eflags;
arch/x86/kernel/entry_64.S
... ... @@ -56,6 +56,7 @@
56 56 #include <asm/ftrace.h>
57 57 #include <asm/percpu.h>
58 58 #include <asm/asm.h>
  59 +#include <asm/smap.h>
59 60 #include <linux/err.h>
60 61  
61 62 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
... ... @@ -465,7 +466,8 @@
465 466 * System call entry. Up to 6 arguments in registers are supported.
466 467 *
467 468 * SYSCALL does not save anything on the stack and does not change the
468   - * stack pointer.
  469 + * stack pointer. However, it does mask the flags register for us, so
  470 + * CLD and CLAC are not needed.
469 471 */
470 472  
471 473 /*
... ... @@ -884,6 +886,7 @@
884 886 */
885 887 .p2align CONFIG_X86_L1_CACHE_SHIFT
886 888 common_interrupt:
  889 + ASM_CLAC
887 890 XCPT_FRAME
888 891 addq $-0x80,(%rsp) /* Adjust vector to [-256,-1] range */
889 892 interrupt do_IRQ
... ... @@ -1023,6 +1026,7 @@
1023 1026 */
1024 1027 .macro apicinterrupt num sym do_sym
1025 1028 ENTRY(\sym)
  1029 + ASM_CLAC
1026 1030 INTR_FRAME
1027 1031 pushq_cfi $~(\num)
1028 1032 .Lcommon_\sym:
... ... @@ -1077,6 +1081,7 @@
1077 1081 */
1078 1082 .macro zeroentry sym do_sym
1079 1083 ENTRY(\sym)
  1084 + ASM_CLAC
1080 1085 INTR_FRAME
1081 1086 PARAVIRT_ADJUST_EXCEPTION_FRAME
1082 1087 pushq_cfi $-1 /* ORIG_RAX: no syscall to restart */
... ... @@ -1094,6 +1099,7 @@
1094 1099  
1095 1100 .macro paranoidzeroentry sym do_sym
1096 1101 ENTRY(\sym)
  1102 + ASM_CLAC
1097 1103 INTR_FRAME
1098 1104 PARAVIRT_ADJUST_EXCEPTION_FRAME
1099 1105 pushq_cfi $-1 /* ORIG_RAX: no syscall to restart */
... ... @@ -1112,6 +1118,7 @@
1112 1118 #define INIT_TSS_IST(x) PER_CPU_VAR(init_tss) + (TSS_ist + ((x) - 1) * 8)
1113 1119 .macro paranoidzeroentry_ist sym do_sym ist
1114 1120 ENTRY(\sym)
  1121 + ASM_CLAC
1115 1122 INTR_FRAME
1116 1123 PARAVIRT_ADJUST_EXCEPTION_FRAME
1117 1124 pushq_cfi $-1 /* ORIG_RAX: no syscall to restart */
... ... @@ -1131,6 +1138,7 @@
1131 1138  
1132 1139 .macro errorentry sym do_sym
1133 1140 ENTRY(\sym)
  1141 + ASM_CLAC
1134 1142 XCPT_FRAME
1135 1143 PARAVIRT_ADJUST_EXCEPTION_FRAME
1136 1144 subq $ORIG_RAX-R15, %rsp
... ... @@ -1149,6 +1157,7 @@
1149 1157 /* error code is on the stack already */
1150 1158 .macro paranoiderrorentry sym do_sym
1151 1159 ENTRY(\sym)
  1160 + ASM_CLAC
1152 1161 XCPT_FRAME
1153 1162 PARAVIRT_ADJUST_EXCEPTION_FRAME
1154 1163 subq $ORIG_RAX-R15, %rsp
arch/x86/lib/copy_user_64.S
... ... @@ -17,6 +17,7 @@
17 17 #include <asm/cpufeature.h>
18 18 #include <asm/alternative-asm.h>
19 19 #include <asm/asm.h>
  20 +#include <asm/smap.h>
20 21  
21 22 /*
22 23 * By placing feature2 after feature1 in altinstructions section, we logically
... ... @@ -130,6 +131,7 @@
130 131 */
131 132 ENTRY(copy_user_generic_unrolled)
132 133 CFI_STARTPROC
  134 + ASM_STAC
133 135 cmpl $8,%edx
134 136 jb 20f /* less then 8 bytes, go to byte copy loop */
135 137 ALIGN_DESTINATION
... ... @@ -177,6 +179,7 @@
177 179 decl %ecx
178 180 jnz 21b
179 181 23: xor %eax,%eax
  182 + ASM_CLAC
180 183 ret
181 184  
182 185 .section .fixup,"ax"
... ... @@ -232,6 +235,7 @@
232 235 */
233 236 ENTRY(copy_user_generic_string)
234 237 CFI_STARTPROC
  238 + ASM_STAC
235 239 andl %edx,%edx
236 240 jz 4f
237 241 cmpl $8,%edx
... ... @@ -246,6 +250,7 @@
246 250 3: rep
247 251 movsb
248 252 4: xorl %eax,%eax
  253 + ASM_CLAC
249 254 ret
250 255  
251 256 .section .fixup,"ax"
252 257  
... ... @@ -273,12 +278,14 @@
273 278 */
274 279 ENTRY(copy_user_enhanced_fast_string)
275 280 CFI_STARTPROC
  281 + ASM_STAC
276 282 andl %edx,%edx
277 283 jz 2f
278 284 movl %edx,%ecx
279 285 1: rep
280 286 movsb
281 287 2: xorl %eax,%eax
  288 + ASM_CLAC
282 289 ret
283 290  
284 291 .section .fixup,"ax"
arch/x86/lib/copy_user_nocache_64.S
... ... @@ -15,6 +15,7 @@
15 15 #include <asm/asm-offsets.h>
16 16 #include <asm/thread_info.h>
17 17 #include <asm/asm.h>
  18 +#include <asm/smap.h>
18 19  
19 20 .macro ALIGN_DESTINATION
20 21 #ifdef FIX_ALIGNMENT
... ... @@ -48,6 +49,7 @@
48 49 */
49 50 ENTRY(__copy_user_nocache)
50 51 CFI_STARTPROC
  52 + ASM_STAC
51 53 cmpl $8,%edx
52 54 jb 20f /* less then 8 bytes, go to byte copy loop */
53 55 ALIGN_DESTINATION
... ... @@ -95,6 +97,7 @@
95 97 decl %ecx
96 98 jnz 21b
97 99 23: xorl %eax,%eax
  100 + ASM_CLAC
98 101 sfence
99 102 ret
100 103  
arch/x86/lib/getuser.S
... ... @@ -33,6 +33,7 @@
33 33 #include <asm/asm-offsets.h>
34 34 #include <asm/thread_info.h>
35 35 #include <asm/asm.h>
  36 +#include <asm/smap.h>
36 37  
37 38 .text
38 39 ENTRY(__get_user_1)
39 40  
... ... @@ -40,8 +41,10 @@
40 41 GET_THREAD_INFO(%_ASM_DX)
41 42 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
42 43 jae bad_get_user
  44 + ASM_STAC
43 45 1: movzb (%_ASM_AX),%edx
44 46 xor %eax,%eax
  47 + ASM_CLAC
45 48 ret
46 49 CFI_ENDPROC
47 50 ENDPROC(__get_user_1)
48 51  
... ... @@ -53,8 +56,10 @@
53 56 GET_THREAD_INFO(%_ASM_DX)
54 57 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
55 58 jae bad_get_user
  59 + ASM_STAC
56 60 2: movzwl -1(%_ASM_AX),%edx
57 61 xor %eax,%eax
  62 + ASM_CLAC
58 63 ret
59 64 CFI_ENDPROC
60 65 ENDPROC(__get_user_2)
61 66  
... ... @@ -66,8 +71,10 @@
66 71 GET_THREAD_INFO(%_ASM_DX)
67 72 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
68 73 jae bad_get_user
  74 + ASM_STAC
69 75 3: mov -3(%_ASM_AX),%edx
70 76 xor %eax,%eax
  77 + ASM_CLAC
71 78 ret
72 79 CFI_ENDPROC
73 80 ENDPROC(__get_user_4)
74 81  
... ... @@ -80,8 +87,10 @@
80 87 GET_THREAD_INFO(%_ASM_DX)
81 88 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
82 89 jae bad_get_user
  90 + ASM_STAC
83 91 4: movq -7(%_ASM_AX),%_ASM_DX
84 92 xor %eax,%eax
  93 + ASM_CLAC
85 94 ret
86 95 CFI_ENDPROC
87 96 ENDPROC(__get_user_8)
... ... @@ -91,6 +100,7 @@
91 100 CFI_STARTPROC
92 101 xor %edx,%edx
93 102 mov $(-EFAULT),%_ASM_AX
  103 + ASM_CLAC
94 104 ret
95 105 CFI_ENDPROC
96 106 END(bad_get_user)
arch/x86/lib/putuser.S
... ... @@ -15,6 +15,7 @@
15 15 #include <asm/thread_info.h>
16 16 #include <asm/errno.h>
17 17 #include <asm/asm.h>
  18 +#include <asm/smap.h>
18 19  
19 20  
20 21 /*
... ... @@ -31,7 +32,8 @@
31 32  
32 33 #define ENTER CFI_STARTPROC ; \
33 34 GET_THREAD_INFO(%_ASM_BX)
34   -#define EXIT ret ; \
  35 +#define EXIT ASM_CLAC ; \
  36 + ret ; \
35 37 CFI_ENDPROC
36 38  
37 39 .text
... ... @@ -39,6 +41,7 @@
39 41 ENTER
40 42 cmp TI_addr_limit(%_ASM_BX),%_ASM_CX
41 43 jae bad_put_user
  44 + ASM_STAC
42 45 1: movb %al,(%_ASM_CX)
43 46 xor %eax,%eax
44 47 EXIT
... ... @@ -50,6 +53,7 @@
50 53 sub $1,%_ASM_BX
51 54 cmp %_ASM_BX,%_ASM_CX
52 55 jae bad_put_user
  56 + ASM_STAC
53 57 2: movw %ax,(%_ASM_CX)
54 58 xor %eax,%eax
55 59 EXIT
... ... @@ -61,6 +65,7 @@
61 65 sub $3,%_ASM_BX
62 66 cmp %_ASM_BX,%_ASM_CX
63 67 jae bad_put_user
  68 + ASM_STAC
64 69 3: movl %eax,(%_ASM_CX)
65 70 xor %eax,%eax
66 71 EXIT
... ... @@ -72,6 +77,7 @@
72 77 sub $7,%_ASM_BX
73 78 cmp %_ASM_BX,%_ASM_CX
74 79 jae bad_put_user
  80 + ASM_STAC
75 81 4: mov %_ASM_AX,(%_ASM_CX)
76 82 #ifdef CONFIG_X86_32
77 83 5: movl %edx,4(%_ASM_CX)
arch/x86/lib/usercopy_32.c
... ... @@ -42,10 +42,11 @@
42 42 int __d0; \
43 43 might_fault(); \
44 44 __asm__ __volatile__( \
  45 + ASM_STAC "\n" \
45 46 "0: rep; stosl\n" \
46 47 " movl %2,%0\n" \
47 48 "1: rep; stosb\n" \
48   - "2:\n" \
  49 + "2: " ASM_CLAC "\n" \
49 50 ".section .fixup,\"ax\"\n" \
50 51 "3: lea 0(%2,%0,4),%0\n" \
51 52 " jmp 2b\n" \
52 53  
... ... @@ -626,10 +627,12 @@
626 627 return n;
627 628 }
628 629 #endif
  630 + stac();
629 631 if (movsl_is_ok(to, from, n))
630 632 __copy_user(to, from, n);
631 633 else
632 634 n = __copy_user_intel(to, from, n);
  635 + clac();
633 636 return n;
634 637 }
635 638 EXPORT_SYMBOL(__copy_to_user_ll);
636 639  
... ... @@ -637,10 +640,12 @@
637 640 unsigned long __copy_from_user_ll(void *to, const void __user *from,
638 641 unsigned long n)
639 642 {
  643 + stac();
640 644 if (movsl_is_ok(to, from, n))
641 645 __copy_user_zeroing(to, from, n);
642 646 else
643 647 n = __copy_user_zeroing_intel(to, from, n);
  648 + clac();
644 649 return n;
645 650 }
646 651 EXPORT_SYMBOL(__copy_from_user_ll);
647 652  
... ... @@ -648,11 +653,13 @@
648 653 unsigned long __copy_from_user_ll_nozero(void *to, const void __user *from,
649 654 unsigned long n)
650 655 {
  656 + stac();
651 657 if (movsl_is_ok(to, from, n))
652 658 __copy_user(to, from, n);
653 659 else
654 660 n = __copy_user_intel((void __user *)to,
655 661 (const void *)from, n);
  662 + clac();
656 663 return n;
657 664 }
658 665 EXPORT_SYMBOL(__copy_from_user_ll_nozero);
... ... @@ -660,6 +667,7 @@
660 667 unsigned long __copy_from_user_ll_nocache(void *to, const void __user *from,
661 668 unsigned long n)
662 669 {
  670 + stac();
663 671 #ifdef CONFIG_X86_INTEL_USERCOPY
664 672 if (n > 64 && cpu_has_xmm2)
665 673 n = __copy_user_zeroing_intel_nocache(to, from, n);
... ... @@ -668,6 +676,7 @@
668 676 #else
669 677 __copy_user_zeroing(to, from, n);
670 678 #endif
  679 + clac();
671 680 return n;
672 681 }
673 682 EXPORT_SYMBOL(__copy_from_user_ll_nocache);
... ... @@ -675,6 +684,7 @@
675 684 unsigned long __copy_from_user_ll_nocache_nozero(void *to, const void __user *from,
676 685 unsigned long n)
677 686 {
  687 + stac();
678 688 #ifdef CONFIG_X86_INTEL_USERCOPY
679 689 if (n > 64 && cpu_has_xmm2)
680 690 n = __copy_user_intel_nocache(to, from, n);
... ... @@ -683,6 +693,7 @@
683 693 #else
684 694 __copy_user(to, from, n);
685 695 #endif
  696 + clac();
686 697 return n;
687 698 }
688 699 EXPORT_SYMBOL(__copy_from_user_ll_nocache_nozero);
arch/x86/lib/usercopy_64.c
... ... @@ -18,6 +18,7 @@
18 18 might_fault();
19 19 /* no memory constraint because it doesn't change any memory gcc knows
20 20 about */
  21 + stac();
21 22 asm volatile(
22 23 " testq %[size8],%[size8]\n"
23 24 " jz 4f\n"
... ... @@ -40,6 +41,7 @@
40 41 : [size8] "=&c"(size), [dst] "=&D" (__d0)
41 42 : [size1] "r"(size & 7), "[size8]" (size / 8), "[dst]"(addr),
42 43 [zero] "r" (0UL), [eight] "r" (8UL));
  44 + clac();
43 45 return size;
44 46 }
45 47 EXPORT_SYMBOL(__clear_user);
... ... @@ -82,6 +84,7 @@
82 84 for (c = 0, zero_len = len; zerorest && zero_len; --zero_len)
83 85 if (__put_user_nocheck(c, to++, sizeof(char)))
84 86 break;
  87 + clac();
85 88 return len;
86 89 }