Skip to content
This repository

HTTPS clone URL

Subversion checkout URL

You can clone with HTTPS or Subversion.

Download ZIP
Browse code

Merge branch 'x86-smap-for-linus' of git://git.kernel.org/pub/scm/lin…

…ux/kernel/git/tip/tip

Pull x86/smap support from Ingo Molnar:
 "This adds support for the SMAP (Supervisor Mode Access Prevention) CPU
  feature on Intel CPUs: a hardware feature that prevents unintended
  user-space data access from kernel privileged code.

  It's turned on automatically when possible.

  This, in combination with SMEP, makes it even harder to exploit kernel
  bugs such as NULL pointer dereferences."

Fix up trivial conflict in arch/x86/kernel/entry_64.S due to newly added
includes right next to each other.

* 'x86-smap-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  x86, smep, smap: Make the switching functions one-way
  x86, suspend: On wakeup always initialize cr4 and EFER
  x86-32: Start out eflags and cr4 clean
  x86, smap: Do not abuse the [f][x]rstor_checking() functions for user space
  x86-32, smap: Add STAC/CLAC instructions to 32-bit kernel entry
  x86, smap: Reduce the SMAP overhead for signal handling
  x86, smap: A page fault due to SMAP is an oops
  x86, smap: Turn on Supervisor Mode Access Prevention
  x86, smap: Add STAC and CLAC instructions to control user space access
  x86, uaccess: Merge prototypes for clear_user/__clear_user
  x86, smap: Add a header file with macros for STAC/CLAC
  x86, alternative: Add header guards to <asm/alternative-asm.h>
  x86, alternative: Use .pushsection/.popsection
  x86, smap: Add CR4 bit for SMAP
  x86-32, mm: The WP test should be done on a kernel page
  • Loading branch information...
commit 15385dfe7e0fa6866b204dd0d14aec2cc48fc0a7 2 parents a57d985 + b2cc2a0
Linus Torvalds authored October 01, 2012

Showing 31 changed files with 410 additions and 116 deletions. Show diff stats Hide diff stats

  1. 6  Documentation/kernel-parameters.txt
  2. 11  arch/x86/Kconfig
  3. 13  arch/x86/ia32/ia32_signal.c
  4. 6  arch/x86/ia32/ia32entry.S
  5. 9  arch/x86/include/asm/alternative-asm.h
  6. 32  arch/x86/include/asm/alternative.h
  7. 42  arch/x86/include/asm/fpu-internal.h
  8. 19  arch/x86/include/asm/futex.h
  9. 1  arch/x86/include/asm/processor-flags.h
  10. 91  arch/x86/include/asm/smap.h
  11. 28  arch/x86/include/asm/uaccess.h
  12. 3  arch/x86/include/asm/uaccess_32.h
  13. 3  arch/x86/include/asm/uaccess_64.h
  14. 10  arch/x86/include/asm/xsave.h
  15. 15  arch/x86/kernel/acpi/sleep.c
  16. 44  arch/x86/kernel/cpu/common.c
  17. 26  arch/x86/kernel/entry_32.S
  18. 11  arch/x86/kernel/entry_64.S
  19. 31  arch/x86/kernel/head_32.S
  20. 24  arch/x86/kernel/signal.c
  21. 6  arch/x86/kernel/xsave.c
  22. 7  arch/x86/lib/copy_user_64.S
  23. 3  arch/x86/lib/copy_user_nocache_64.S
  24. 10  arch/x86/lib/getuser.S
  25. 8  arch/x86/lib/putuser.S
  26. 13  arch/x86/lib/usercopy_32.c
  27. 3  arch/x86/lib/usercopy_64.c
  28. 18  arch/x86/mm/fault.c
  29. 2  arch/x86/mm/init_32.c
  30. 2  arch/x86/realmode/rm/wakeup.h
  31. 29  arch/x86/realmode/rm/wakeup_asm.S
6  Documentation/kernel-parameters.txt
@@ -1812,8 +1812,12 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
1812 1812
 			noexec=on: enable non-executable mappings (default)
1813 1813
 			noexec=off: disable non-executable mappings
1814 1814
 
  1815
+	nosmap		[X86]
  1816
+			Disable SMAP (Supervisor Mode Access Prevention)
  1817
+			even if it is supported by processor.
  1818
+
1815 1819
 	nosmep		[X86]
1816  
-			Disable SMEP (Supervisor Mode Execution Protection)
  1820
+			Disable SMEP (Supervisor Mode Execution Prevention)
1817 1821
 			even if it is supported by processor.
1818 1822
 
1819 1823
 	noexec32	[X86-64]
11  arch/x86/Kconfig
@@ -1493,6 +1493,17 @@ config ARCH_RANDOM
1493 1493
 	  If supported, this is a high bandwidth, cryptographically
1494 1494
 	  secure hardware random number generator.
1495 1495
 
  1496
+config X86_SMAP
  1497
+	def_bool y
  1498
+	prompt "Supervisor Mode Access Prevention" if EXPERT
  1499
+	---help---
  1500
+	  Supervisor Mode Access Prevention (SMAP) is a security
  1501
+	  feature in newer Intel processors.  There is a small
  1502
+	  performance cost if this enabled and turned on; there is
  1503
+	  also a small increase in the kernel size if this is enabled.
  1504
+
  1505
+	  If unsure, say Y.
  1506
+
1496 1507
 config EFI
1497 1508
 	bool "EFI runtime service support"
1498 1509
 	depends on ACPI
13  arch/x86/ia32/ia32_signal.c
@@ -32,6 +32,7 @@
32 32
 #include <asm/sigframe.h>
33 33
 #include <asm/sighandling.h>
34 34
 #include <asm/sys_ia32.h>
  35
+#include <asm/smap.h>
35 36
 
36 37
 #define FIX_EFLAGS	__FIX_EFLAGS
37 38
 
@@ -251,11 +252,12 @@ static int ia32_restore_sigcontext(struct pt_regs *regs,
251 252
 
252 253
 		get_user_ex(tmp, &sc->fpstate);
253 254
 		buf = compat_ptr(tmp);
254  
-		err |= restore_xstate_sig(buf, 1);
255 255
 
256 256
 		get_user_ex(*pax, &sc->ax);
257 257
 	} get_user_catch(err);
258 258
 
  259
+	err |= restore_xstate_sig(buf, 1);
  260
+
259 261
 	return err;
260 262
 }
261 263
 
@@ -506,7 +508,6 @@ int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
506 508
 		put_user_ex(sig, &frame->sig);
507 509
 		put_user_ex(ptr_to_compat(&frame->info), &frame->pinfo);
508 510
 		put_user_ex(ptr_to_compat(&frame->uc), &frame->puc);
509  
-		err |= copy_siginfo_to_user32(&frame->info, info);
510 511
 
511 512
 		/* Create the ucontext.  */
512 513
 		if (cpu_has_xsave)
@@ -518,9 +519,6 @@ int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
518 519
 		put_user_ex(sas_ss_flags(regs->sp),
519 520
 			    &frame->uc.uc_stack.ss_flags);
520 521
 		put_user_ex(current->sas_ss_size, &frame->uc.uc_stack.ss_size);
521  
-		err |= ia32_setup_sigcontext(&frame->uc.uc_mcontext, fpstate,
522  
-					     regs, set->sig[0]);
523  
-		err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
524 522
 
525 523
 		if (ka->sa.sa_flags & SA_RESTORER)
526 524
 			restorer = ka->sa.sa_restorer;
@@ -536,6 +534,11 @@ int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
536 534
 		put_user_ex(*((u64 *)&code), (u64 __user *)frame->retcode);
537 535
 	} put_user_catch(err);
538 536
 
  537
+	err |= copy_siginfo_to_user32(&frame->info, info);
  538
+	err |= ia32_setup_sigcontext(&frame->uc.uc_mcontext, fpstate,
  539
+				     regs, set->sig[0]);
  540
+	err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
  541
+
539 542
 	if (err)
540 543
 		return -EFAULT;
541 544
 
6  arch/x86/ia32/ia32entry.S
@@ -14,6 +14,7 @@
14 14
 #include <asm/segment.h>
15 15
 #include <asm/irqflags.h>
16 16
 #include <asm/asm.h>
  17
+#include <asm/smap.h>
17 18
 #include <linux/linkage.h>
18 19
 #include <linux/err.h>
19 20
 
@@ -146,8 +147,10 @@ ENTRY(ia32_sysenter_target)
146 147
 	SAVE_ARGS 0,1,0
147 148
  	/* no need to do an access_ok check here because rbp has been
148 149
  	   32bit zero extended */ 
  150
+	ASM_STAC
149 151
 1:	movl	(%rbp),%ebp
150 152
 	_ASM_EXTABLE(1b,ia32_badarg)
  153
+	ASM_CLAC
151 154
 	orl     $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
152 155
 	testl   $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
153 156
 	CFI_REMEMBER_STATE
@@ -301,8 +304,10 @@ ENTRY(ia32_cstar_target)
301 304
 	/* no need to do an access_ok check here because r8 has been
302 305
 	   32bit zero extended */ 
303 306
 	/* hardware stack frame is complete now */	
  307
+	ASM_STAC
304 308
 1:	movl	(%r8),%r9d
305 309
 	_ASM_EXTABLE(1b,ia32_badarg)
  310
+	ASM_CLAC
306 311
 	orl     $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
307 312
 	testl   $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
308 313
 	CFI_REMEMBER_STATE
@@ -365,6 +370,7 @@ cstar_tracesys:
365 370
 END(ia32_cstar_target)
366 371
 				
367 372
 ia32_badarg:
  373
+	ASM_CLAC
368 374
 	movq $-EFAULT,%rax
369 375
 	jmp ia32_sysret
370 376
 	CFI_ENDPROC
9  arch/x86/include/asm/alternative-asm.h
... ...
@@ -1,3 +1,6 @@
  1
+#ifndef _ASM_X86_ALTERNATIVE_ASM_H
  2
+#define _ASM_X86_ALTERNATIVE_ASM_H
  3
+
1 4
 #ifdef __ASSEMBLY__
2 5
 
3 6
 #include <asm/asm.h>
@@ -5,10 +8,10 @@
5 8
 #ifdef CONFIG_SMP
6 9
 	.macro LOCK_PREFIX
7 10
 672:	lock
8  
-	.section .smp_locks,"a"
  11
+	.pushsection .smp_locks,"a"
9 12
 	.balign 4
10 13
 	.long 672b - .
11  
-	.previous
  14
+	.popsection
12 15
 	.endm
13 16
 #else
14 17
 	.macro LOCK_PREFIX
@@ -24,3 +27,5 @@
24 27
 .endm
25 28
 
26 29
 #endif  /*  __ASSEMBLY__  */
  30
+
  31
+#endif /* _ASM_X86_ALTERNATIVE_ASM_H */
32  arch/x86/include/asm/alternative.h
@@ -29,10 +29,10 @@
29 29
 
30 30
 #ifdef CONFIG_SMP
31 31
 #define LOCK_PREFIX_HERE \
32  
-		".section .smp_locks,\"a\"\n"	\
33  
-		".balign 4\n"			\
34  
-		".long 671f - .\n" /* offset */	\
35  
-		".previous\n"			\
  32
+		".pushsection .smp_locks,\"a\"\n"	\
  33
+		".balign 4\n"				\
  34
+		".long 671f - .\n" /* offset */		\
  35
+		".popsection\n"				\
36 36
 		"671:"
37 37
 
38 38
 #define LOCK_PREFIX LOCK_PREFIX_HERE "\n\tlock; "
@@ -99,30 +99,30 @@ static inline int alternatives_text_reserved(void *start, void *end)
99 99
 /* alternative assembly primitive: */
100 100
 #define ALTERNATIVE(oldinstr, newinstr, feature)			\
101 101
 	OLDINSTR(oldinstr)						\
102  
-	".section .altinstructions,\"a\"\n"				\
  102
+	".pushsection .altinstructions,\"a\"\n"				\
103 103
 	ALTINSTR_ENTRY(feature, 1)					\
104  
-	".previous\n"							\
105  
-	".section .discard,\"aw\",@progbits\n"				\
  104
+	".popsection\n"							\
  105
+	".pushsection .discard,\"aw\",@progbits\n"			\
106 106
 	DISCARD_ENTRY(1)						\
107  
-	".previous\n"							\
108  
-	".section .altinstr_replacement, \"ax\"\n"			\
  107
+	".popsection\n"							\
  108
+	".pushsection .altinstr_replacement, \"ax\"\n"			\
109 109
 	ALTINSTR_REPLACEMENT(newinstr, feature, 1)			\
110  
-	".previous"
  110
+	".popsection"
111 111
 
112 112
 #define ALTERNATIVE_2(oldinstr, newinstr1, feature1, newinstr2, feature2)\
113 113
 	OLDINSTR(oldinstr)						\
114  
-	".section .altinstructions,\"a\"\n"				\
  114
+	".pushsection .altinstructions,\"a\"\n"				\
115 115
 	ALTINSTR_ENTRY(feature1, 1)					\
116 116
 	ALTINSTR_ENTRY(feature2, 2)					\
117  
-	".previous\n"							\
118  
-	".section .discard,\"aw\",@progbits\n"				\
  117
+	".popsection\n"							\
  118
+	".pushsection .discard,\"aw\",@progbits\n"			\
119 119
 	DISCARD_ENTRY(1)						\
120 120
 	DISCARD_ENTRY(2)						\
121  
-	".previous\n"							\
122  
-	".section .altinstr_replacement, \"ax\"\n"			\
  121
+	".popsection\n"							\
  122
+	".pushsection .altinstr_replacement, \"ax\"\n"			\
123 123
 	ALTINSTR_REPLACEMENT(newinstr1, feature1, 1)			\
124 124
 	ALTINSTR_REPLACEMENT(newinstr2, feature2, 2)			\
125  
-	".previous"
  125
+	".popsection"
126 126
 
127 127
 /*
128 128
  * This must be included *after* the definition of ALTERNATIVE due to
42  arch/x86/include/asm/fpu-internal.h
@@ -21,6 +21,7 @@
21 21
 #include <asm/user.h>
22 22
 #include <asm/uaccess.h>
23 23
 #include <asm/xsave.h>
  24
+#include <asm/smap.h>
24 25
 
25 26
 #ifdef CONFIG_X86_64
26 27
 # include <asm/sigcontext32.h>
@@ -121,6 +122,22 @@ static inline void sanitize_i387_state(struct task_struct *tsk)
121 122
 	__sanitize_i387_state(tsk);
122 123
 }
123 124
 
  125
+#define user_insn(insn, output, input...)				\
  126
+({									\
  127
+	int err;							\
  128
+	asm volatile(ASM_STAC "\n"					\
  129
+		     "1:" #insn "\n\t"					\
  130
+		     "2: " ASM_CLAC "\n"				\
  131
+		     ".section .fixup,\"ax\"\n"				\
  132
+		     "3:  movl $-1,%[err]\n"				\
  133
+		     "    jmp  2b\n"					\
  134
+		     ".previous\n"					\
  135
+		     _ASM_EXTABLE(1b, 3b)				\
  136
+		     : [err] "=r" (err), output				\
  137
+		     : "0"(0), input);					\
  138
+	err;								\
  139
+})
  140
+
124 141
 #define check_insn(insn, output, input...)				\
125 142
 ({									\
126 143
 	int err;							\
@@ -138,18 +155,18 @@ static inline void sanitize_i387_state(struct task_struct *tsk)
138 155
 
139 156
 static inline int fsave_user(struct i387_fsave_struct __user *fx)
140 157
 {
141  
-	return check_insn(fnsave %[fx]; fwait,  [fx] "=m" (*fx), "m" (*fx));
  158
+	return user_insn(fnsave %[fx]; fwait,  [fx] "=m" (*fx), "m" (*fx));
142 159
 }
143 160
 
144 161
 static inline int fxsave_user(struct i387_fxsave_struct __user *fx)
145 162
 {
146 163
 	if (config_enabled(CONFIG_X86_32))
147  
-		return check_insn(fxsave %[fx], [fx] "=m" (*fx), "m" (*fx));
  164
+		return user_insn(fxsave %[fx], [fx] "=m" (*fx), "m" (*fx));
148 165
 	else if (config_enabled(CONFIG_AS_FXSAVEQ))
149  
-		return check_insn(fxsaveq %[fx], [fx] "=m" (*fx), "m" (*fx));
  166
+		return user_insn(fxsaveq %[fx], [fx] "=m" (*fx), "m" (*fx));
150 167
 
151 168
 	/* See comment in fpu_fxsave() below. */
152  
-	return check_insn(rex64/fxsave (%[fx]), "=m" (*fx), [fx] "R" (fx));
  169
+	return user_insn(rex64/fxsave (%[fx]), "=m" (*fx), [fx] "R" (fx));
153 170
 }
154 171
 
155 172
 static inline int fxrstor_checking(struct i387_fxsave_struct *fx)
@@ -164,11 +181,28 @@ static inline int fxrstor_checking(struct i387_fxsave_struct *fx)
164 181
 			  "m" (*fx));
165 182
 }
166 183
 
  184
+static inline int fxrstor_user(struct i387_fxsave_struct __user *fx)
  185
+{
  186
+	if (config_enabled(CONFIG_X86_32))
  187
+		return user_insn(fxrstor %[fx], "=m" (*fx), [fx] "m" (*fx));
  188
+	else if (config_enabled(CONFIG_AS_FXSAVEQ))
  189
+		return user_insn(fxrstorq %[fx], "=m" (*fx), [fx] "m" (*fx));
  190
+
  191
+	/* See comment in fpu_fxsave() below. */
  192
+	return user_insn(rex64/fxrstor (%[fx]), "=m" (*fx), [fx] "R" (fx),
  193
+			  "m" (*fx));
  194
+}
  195
+
167 196
 static inline int frstor_checking(struct i387_fsave_struct *fx)
168 197
 {
169 198
 	return check_insn(frstor %[fx], "=m" (*fx), [fx] "m" (*fx));
170 199
 }
171 200
 
  201
+static inline int frstor_user(struct i387_fsave_struct __user *fx)
  202
+{
  203
+	return user_insn(frstor %[fx], "=m" (*fx), [fx] "m" (*fx));
  204
+}
  205
+
172 206
 static inline void fpu_fxsave(struct fpu *fpu)
173 207
 {
174 208
 	if (config_enabled(CONFIG_X86_32))
19  arch/x86/include/asm/futex.h
@@ -9,10 +9,13 @@
9 9
 #include <asm/asm.h>
10 10
 #include <asm/errno.h>
11 11
 #include <asm/processor.h>
  12
+#include <asm/smap.h>
12 13
 
13 14
 #define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg)	\
14  
-	asm volatile("1:\t" insn "\n"				\
15  
-		     "2:\t.section .fixup,\"ax\"\n"		\
  15
+	asm volatile("\t" ASM_STAC "\n"				\
  16
+		     "1:\t" insn "\n"				\
  17
+		     "2:\t" ASM_CLAC "\n"			\
  18
+		     "\t.section .fixup,\"ax\"\n"		\
16 19
 		     "3:\tmov\t%3, %1\n"			\
17 20
 		     "\tjmp\t2b\n"				\
18 21
 		     "\t.previous\n"				\
@@ -21,12 +24,14 @@
21 24
 		     : "i" (-EFAULT), "0" (oparg), "1" (0))
22 25
 
23 26
 #define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg)	\
24  
-	asm volatile("1:\tmovl	%2, %0\n"			\
  27
+	asm volatile("\t" ASM_STAC "\n"				\
  28
+		     "1:\tmovl	%2, %0\n"			\
25 29
 		     "\tmovl\t%0, %3\n"				\
26 30
 		     "\t" insn "\n"				\
27 31
 		     "2:\t" LOCK_PREFIX "cmpxchgl %3, %2\n"	\
28 32
 		     "\tjnz\t1b\n"				\
29  
-		     "3:\t.section .fixup,\"ax\"\n"		\
  33
+		     "3:\t" ASM_CLAC "\n"			\
  34
+		     "\t.section .fixup,\"ax\"\n"		\
30 35
 		     "4:\tmov\t%5, %1\n"			\
31 36
 		     "\tjmp\t3b\n"				\
32 37
 		     "\t.previous\n"				\
@@ -122,8 +127,10 @@ static inline int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
122 127
 	if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
123 128
 		return -EFAULT;
124 129
 
125  
-	asm volatile("1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n"
126  
-		     "2:\t.section .fixup, \"ax\"\n"
  130
+	asm volatile("\t" ASM_STAC "\n"
  131
+		     "1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n"
  132
+		     "2:\t" ASM_CLAC "\n"
  133
+		     "\t.section .fixup, \"ax\"\n"
127 134
 		     "3:\tmov     %3, %0\n"
128 135
 		     "\tjmp     2b\n"
129 136
 		     "\t.previous\n"
1  arch/x86/include/asm/processor-flags.h
@@ -65,6 +65,7 @@
65 65
 #define X86_CR4_PCIDE	0x00020000 /* enable PCID support */
66 66
 #define X86_CR4_OSXSAVE 0x00040000 /* enable xsave and xrestore */
67 67
 #define X86_CR4_SMEP	0x00100000 /* enable SMEP support */
  68
+#define X86_CR4_SMAP	0x00200000 /* enable SMAP support */
68 69
 
69 70
 /*
70 71
  * x86-64 Task Priority Register, CR8
91  arch/x86/include/asm/smap.h
... ...
@@ -0,0 +1,91 @@
  1
+/*
  2
+ * Supervisor Mode Access Prevention support
  3
+ *
  4
+ * Copyright (C) 2012 Intel Corporation
  5
+ * Author: H. Peter Anvin <hpa@linux.intel.com>
  6
+ *
  7
+ * This program is free software; you can redistribute it and/or
  8
+ * modify it under the terms of the GNU General Public License
  9
+ * as published by the Free Software Foundation; version 2
  10
+ * of the License.
  11
+ */
  12
+
  13
+#ifndef _ASM_X86_SMAP_H
  14
+#define _ASM_X86_SMAP_H
  15
+
  16
+#include <linux/stringify.h>
  17
+#include <asm/nops.h>
  18
+#include <asm/cpufeature.h>
  19
+
  20
+/* "Raw" instruction opcodes */
  21
+#define __ASM_CLAC	.byte 0x0f,0x01,0xca
  22
+#define __ASM_STAC	.byte 0x0f,0x01,0xcb
  23
+
  24
+#ifdef __ASSEMBLY__
  25
+
  26
+#include <asm/alternative-asm.h>
  27
+
  28
+#ifdef CONFIG_X86_SMAP
  29
+
  30
+#define ASM_CLAC							\
  31
+	661: ASM_NOP3 ;							\
  32
+	.pushsection .altinstr_replacement, "ax" ;			\
  33
+	662: __ASM_CLAC ;						\
  34
+	.popsection ;							\
  35
+	.pushsection .altinstructions, "a" ;				\
  36
+	altinstruction_entry 661b, 662b, X86_FEATURE_SMAP, 3, 3 ;	\
  37
+	.popsection
  38
+
  39
+#define ASM_STAC							\
  40
+	661: ASM_NOP3 ;							\
  41
+	.pushsection .altinstr_replacement, "ax" ;			\
  42
+	662: __ASM_STAC ;						\
  43
+	.popsection ;							\
  44
+	.pushsection .altinstructions, "a" ;				\
  45
+	altinstruction_entry 661b, 662b, X86_FEATURE_SMAP, 3, 3 ;	\
  46
+	.popsection
  47
+
  48
+#else /* CONFIG_X86_SMAP */
  49
+
  50
+#define ASM_CLAC
  51
+#define ASM_STAC
  52
+
  53
+#endif /* CONFIG_X86_SMAP */
  54
+
  55
+#else /* __ASSEMBLY__ */
  56
+
  57
+#include <asm/alternative.h>
  58
+
  59
+#ifdef CONFIG_X86_SMAP
  60
+
  61
+static __always_inline void clac(void)
  62
+{
  63
+	/* Note: a barrier is implicit in alternative() */
  64
+	alternative(ASM_NOP3, __stringify(__ASM_CLAC), X86_FEATURE_SMAP);
  65
+}
  66
+
  67
+static __always_inline void stac(void)
  68
+{
  69
+	/* Note: a barrier is implicit in alternative() */
  70
+	alternative(ASM_NOP3, __stringify(__ASM_STAC), X86_FEATURE_SMAP);
  71
+}
  72
+
  73
+/* These macros can be used in asm() statements */
  74
+#define ASM_CLAC \
  75
+	ALTERNATIVE(ASM_NOP3, __stringify(__ASM_CLAC), X86_FEATURE_SMAP)
  76
+#define ASM_STAC \
  77
+	ALTERNATIVE(ASM_NOP3, __stringify(__ASM_STAC), X86_FEATURE_SMAP)
  78
+
  79
+#else /* CONFIG_X86_SMAP */
  80
+
  81
+static inline void clac(void) { }
  82
+static inline void stac(void) { }
  83
+
  84
+#define ASM_CLAC
  85
+#define ASM_STAC
  86
+
  87
+#endif /* CONFIG_X86_SMAP */
  88
+
  89
+#endif /* __ASSEMBLY__ */
  90
+
  91
+#endif /* _ASM_X86_SMAP_H */
28  arch/x86/include/asm/uaccess.h
@@ -9,6 +9,7 @@
9 9
 #include <linux/string.h>
10 10
 #include <asm/asm.h>
11 11
 #include <asm/page.h>
  12
+#include <asm/smap.h>
12 13
 
13 14
 #define VERIFY_READ 0
14 15
 #define VERIFY_WRITE 1
@@ -192,9 +193,10 @@ extern int __get_user_bad(void);
192 193
 
193 194
 #ifdef CONFIG_X86_32
194 195
 #define __put_user_asm_u64(x, addr, err, errret)			\
195  
-	asm volatile("1:	movl %%eax,0(%2)\n"			\
  196
+	asm volatile(ASM_STAC "\n"					\
  197
+		     "1:	movl %%eax,0(%2)\n"			\
196 198
 		     "2:	movl %%edx,4(%2)\n"			\
197  
-		     "3:\n"						\
  199
+		     "3: " ASM_CLAC "\n"				\
198 200
 		     ".section .fixup,\"ax\"\n"				\
199 201
 		     "4:	movl %3,%0\n"				\
200 202
 		     "	jmp 3b\n"					\
@@ -205,9 +207,10 @@ extern int __get_user_bad(void);
205 207
 		     : "A" (x), "r" (addr), "i" (errret), "0" (err))
206 208
 
207 209
 #define __put_user_asm_ex_u64(x, addr)					\
208  
-	asm volatile("1:	movl %%eax,0(%1)\n"			\
  210
+	asm volatile(ASM_STAC "\n"					\
  211
+		     "1:	movl %%eax,0(%1)\n"			\
209 212
 		     "2:	movl %%edx,4(%1)\n"			\
210  
-		     "3:\n"						\
  213
+		     "3: " ASM_CLAC "\n"				\
211 214
 		     _ASM_EXTABLE_EX(1b, 2b)				\
212 215
 		     _ASM_EXTABLE_EX(2b, 3b)				\
213 216
 		     : : "A" (x), "r" (addr))
@@ -379,8 +382,9 @@ do {									\
379 382
 } while (0)
380 383
 
381 384
 #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret)	\
382  
-	asm volatile("1:	mov"itype" %2,%"rtype"1\n"		\
383  
-		     "2:\n"						\
  385
+	asm volatile(ASM_STAC "\n"					\
  386
+		     "1:	mov"itype" %2,%"rtype"1\n"		\
  387
+		     "2: " ASM_CLAC "\n"				\
384 388
 		     ".section .fixup,\"ax\"\n"				\
385 389
 		     "3:	mov %3,%0\n"				\
386 390
 		     "	xor"itype" %"rtype"1,%"rtype"1\n"		\
@@ -443,8 +447,9 @@ struct __large_struct { unsigned long buf[100]; };
443 447
  * aliasing issues.
444 448
  */
445 449
 #define __put_user_asm(x, addr, err, itype, rtype, ltype, errret)	\
446  
-	asm volatile("1:	mov"itype" %"rtype"1,%2\n"		\
447  
-		     "2:\n"						\
  450
+	asm volatile(ASM_STAC "\n"					\
  451
+		     "1:	mov"itype" %"rtype"1,%2\n"		\
  452
+		     "2: " ASM_CLAC "\n"				\
448 453
 		     ".section .fixup,\"ax\"\n"				\
449 454
 		     "3:	mov %3,%0\n"				\
450 455
 		     "	jmp 2b\n"					\
@@ -463,13 +468,13 @@ struct __large_struct { unsigned long buf[100]; };
463 468
  * uaccess_try and catch
464 469
  */
465 470
 #define uaccess_try	do {						\
466  
-	int prev_err = current_thread_info()->uaccess_err;		\
467 471
 	current_thread_info()->uaccess_err = 0;				\
  472
+	stac();								\
468 473
 	barrier();
469 474
 
470 475
 #define uaccess_catch(err)						\
  476
+	clac();								\
471 477
 	(err) |= (current_thread_info()->uaccess_err ? -EFAULT : 0);	\
472  
-	current_thread_info()->uaccess_err = prev_err;			\
473 478
 } while (0)
474 479
 
475 480
 /**
@@ -569,6 +574,9 @@ strncpy_from_user(char *dst, const char __user *src, long count);
569 574
 extern __must_check long strlen_user(const char __user *str);
570 575
 extern __must_check long strnlen_user(const char __user *str, long n);
571 576
 
  577
+unsigned long __must_check clear_user(void __user *mem, unsigned long len);
  578
+unsigned long __must_check __clear_user(void __user *mem, unsigned long len);
  579
+
572 580
 /*
573 581
  * movsl can be slow when source and dest are not both 8-byte aligned
574 582
  */
3  arch/x86/include/asm/uaccess_32.h
@@ -213,7 +213,4 @@ static inline unsigned long __must_check copy_from_user(void *to,
213 213
 	return n;
214 214
 }
215 215
 
216  
-unsigned long __must_check clear_user(void __user *mem, unsigned long len);
217  
-unsigned long __must_check __clear_user(void __user *mem, unsigned long len);
218  
-
219 216
 #endif /* _ASM_X86_UACCESS_32_H */
3  arch/x86/include/asm/uaccess_64.h
@@ -217,9 +217,6 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
217 217
 	}
218 218
 }
219 219
 
220  
-__must_check unsigned long clear_user(void __user *mem, unsigned long len);
221  
-__must_check unsigned long __clear_user(void __user *mem, unsigned long len);
222  
-
223 220
 static __must_check __always_inline int
224 221
 __copy_from_user_inatomic(void *dst, const void __user *src, unsigned size)
225 222
 {
10  arch/x86/include/asm/xsave.h
@@ -70,8 +70,9 @@ static inline int xsave_user(struct xsave_struct __user *buf)
70 70
 	if (unlikely(err))
71 71
 		return -EFAULT;
72 72
 
73  
-	__asm__ __volatile__("1: .byte " REX_PREFIX "0x0f,0xae,0x27\n"
74  
-			     "2:\n"
  73
+	__asm__ __volatile__(ASM_STAC "\n"
  74
+			     "1: .byte " REX_PREFIX "0x0f,0xae,0x27\n"
  75
+			     "2: " ASM_CLAC "\n"
75 76
 			     ".section .fixup,\"ax\"\n"
76 77
 			     "3:  movl $-1,%[err]\n"
77 78
 			     "    jmp  2b\n"
@@ -90,8 +91,9 @@ static inline int xrestore_user(struct xsave_struct __user *buf, u64 mask)
90 91
 	u32 lmask = mask;
91 92
 	u32 hmask = mask >> 32;
92 93
 
93  
-	__asm__ __volatile__("1: .byte " REX_PREFIX "0x0f,0xae,0x2f\n"
94  
-			     "2:\n"
  94
+	__asm__ __volatile__(ASM_STAC "\n"
  95
+			     "1: .byte " REX_PREFIX "0x0f,0xae,0x2f\n"
  96
+			     "2: " ASM_CLAC "\n"
95 97
 			     ".section .fixup,\"ax\"\n"
96 98
 			     "3:  movl $-1,%[err]\n"
97 99
 			     "    jmp  2b\n"
15  arch/x86/kernel/acpi/sleep.c
@@ -43,17 +43,22 @@ int acpi_suspend_lowlevel(void)
43 43
 
44 44
 	header->video_mode = saved_video_mode;
45 45
 
  46
+	header->pmode_behavior = 0;
  47
+
46 48
 #ifndef CONFIG_64BIT
47 49
 	store_gdt((struct desc_ptr *)&header->pmode_gdt);
48 50
 
49  
-	if (rdmsr_safe(MSR_EFER, &header->pmode_efer_low,
50  
-		       &header->pmode_efer_high))
51  
-		header->pmode_efer_low = header->pmode_efer_high = 0;
  51
+	if (!rdmsr_safe(MSR_EFER,
  52
+			&header->pmode_efer_low,
  53
+			&header->pmode_efer_high))
  54
+		header->pmode_behavior |= (1 << WAKEUP_BEHAVIOR_RESTORE_EFER);
52 55
 #endif /* !CONFIG_64BIT */
53 56
 
54 57
 	header->pmode_cr0 = read_cr0();
55  
-	header->pmode_cr4 = read_cr4_safe();
56  
-	header->pmode_behavior = 0;
  58
+	if (__this_cpu_read(cpu_info.cpuid_level) >= 0) {
  59
+		header->pmode_cr4 = read_cr4();
  60
+		header->pmode_behavior |= (1 << WAKEUP_BEHAVIOR_RESTORE_CR4);
  61
+	}
57 62
 	if (!rdmsr_safe(MSR_IA32_MISC_ENABLE,
58 63
 			&header->pmode_misc_en_low,
59 64
 			&header->pmode_misc_en_high))
44  arch/x86/kernel/cpu/common.c
@@ -259,23 +259,36 @@ static inline void squash_the_stupid_serial_number(struct cpuinfo_x86 *c)
259 259
 }
260 260
 #endif
261 261
 
262  
-static int disable_smep __cpuinitdata;
263 262
 static __init int setup_disable_smep(char *arg)
264 263
 {
265  
-	disable_smep = 1;
  264
+	setup_clear_cpu_cap(X86_FEATURE_SMEP);
266 265
 	return 1;
267 266
 }
268 267
 __setup("nosmep", setup_disable_smep);
269 268
 
270  
-static __cpuinit void setup_smep(struct cpuinfo_x86 *c)
  269
+static __always_inline void setup_smep(struct cpuinfo_x86 *c)
271 270
 {
272  
-	if (cpu_has(c, X86_FEATURE_SMEP)) {
273  
-		if (unlikely(disable_smep)) {
274  
-			setup_clear_cpu_cap(X86_FEATURE_SMEP);
275  
-			clear_in_cr4(X86_CR4_SMEP);
276  
-		} else
277  
-			set_in_cr4(X86_CR4_SMEP);
278  
-	}
  271
+	if (cpu_has(c, X86_FEATURE_SMEP))
  272
+		set_in_cr4(X86_CR4_SMEP);
  273
+}
  274
+
  275
+static __init int setup_disable_smap(char *arg)
  276
+{
  277
+	setup_clear_cpu_cap(X86_FEATURE_SMAP);
  278
+	return 1;
  279
+}
  280
+__setup("nosmap", setup_disable_smap);
  281
+
  282
+static __always_inline void setup_smap(struct cpuinfo_x86 *c)
  283
+{
  284
+	unsigned long eflags;
  285
+
  286
+	/* This should have been cleared long ago */
  287
+	raw_local_save_flags(eflags);
  288
+	BUG_ON(eflags & X86_EFLAGS_AC);
  289
+
  290
+	if (cpu_has(c, X86_FEATURE_SMAP))
  291
+		set_in_cr4(X86_CR4_SMAP);
279 292
 }
280 293
 
281 294
 /*
@@ -712,8 +725,6 @@ static void __init early_identify_cpu(struct cpuinfo_x86 *c)
712 725
 	c->cpu_index = 0;
713 726
 	filter_cpuid_features(c, false);
714 727
 
715  
-	setup_smep(c);
716  
-
717 728
 	if (this_cpu->c_bsp_init)
718 729
 		this_cpu->c_bsp_init(c);
719 730
 }
@@ -798,8 +809,6 @@ static void __cpuinit generic_identify(struct cpuinfo_x86 *c)
798 809
 		c->phys_proc_id = c->initial_apicid;
799 810
 	}
800 811
 
801  
-	setup_smep(c);
802  
-
803 812
 	get_model_name(c); /* Default name */
804 813
 
805 814
 	detect_nopl(c);
@@ -864,6 +873,10 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
864 873
 	/* Disable the PN if appropriate */
865 874
 	squash_the_stupid_serial_number(c);
866 875
 
  876
+	/* Set up SMEP/SMAP */
  877
+	setup_smep(c);
  878
+	setup_smap(c);
  879
+
867 880
 	/*
868 881
 	 * The vendor-specific functions might have changed features.
869 882
 	 * Now we do "generic changes."
@@ -1114,7 +1127,8 @@ void syscall_init(void)
1114 1127
 
1115 1128
 	/* Flags to clear on syscall */
1116 1129
 	wrmsrl(MSR_SYSCALL_MASK,
1117  
-	       X86_EFLAGS_TF|X86_EFLAGS_DF|X86_EFLAGS_IF|X86_EFLAGS_IOPL);
  1130
+	       X86_EFLAGS_TF|X86_EFLAGS_DF|X86_EFLAGS_IF|
  1131
+	       X86_EFLAGS_IOPL|X86_EFLAGS_AC);
1118 1132
 }
1119 1133
 
1120 1134
 /*
26  arch/x86/kernel/entry_32.S
@@ -57,6 +57,7 @@
57 57
 #include <asm/cpufeature.h>
58 58
 #include <asm/alternative-asm.h>
59 59
 #include <asm/asm.h>
  60
+#include <asm/smap.h>
60 61
 
61 62
 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this.  */
62 63
 #include <linux/elf-em.h>
@@ -407,7 +408,9 @@ sysenter_past_esp:
407 408
  */
408 409
 	cmpl $__PAGE_OFFSET-3,%ebp
409 410
 	jae syscall_fault
  411
+	ASM_STAC
410 412
 1:	movl (%ebp),%ebp
  413
+	ASM_CLAC
411 414
 	movl %ebp,PT_EBP(%esp)
412 415
 	_ASM_EXTABLE(1b,syscall_fault)
413 416
 
@@ -488,6 +491,7 @@ ENDPROC(ia32_sysenter_target)
488 491
 	# system call handler stub
489 492
 ENTRY(system_call)
490 493
 	RING0_INT_FRAME			# can't unwind into user space anyway
  494
+	ASM_CLAC
491 495
 	pushl_cfi %eax			# save orig_eax
492 496
 	SAVE_ALL
493 497
 	GET_THREAD_INFO(%ebp)
@@ -670,6 +674,7 @@ END(syscall_exit_work)
670 674
 
671 675
 	RING0_INT_FRAME			# can't unwind into user space anyway
672 676
 syscall_fault:
  677
+	ASM_CLAC
673 678
 	GET_THREAD_INFO(%ebp)
674 679
 	movl $-EFAULT,PT_EAX(%esp)
675 680
 	jmp resume_userspace
@@ -825,6 +830,7 @@ END(interrupt)
825 830
  */
826 831
 	.p2align CONFIG_X86_L1_CACHE_SHIFT
827 832
 common_interrupt:
  833
+	ASM_CLAC
828 834
 	addl $-0x80,(%esp)	/* Adjust vector into the [-256,-1] range */
829 835
 	SAVE_ALL
830 836
 	TRACE_IRQS_OFF
@@ -841,6 +847,7 @@ ENDPROC(common_interrupt)
841 847
 #define BUILD_INTERRUPT3(name, nr, fn)	\
842 848
 ENTRY(name)				\
843 849
 	RING0_INT_FRAME;		\
  850
+	ASM_CLAC;			\
844 851
 	pushl_cfi $~(nr);		\
845 852
 	SAVE_ALL;			\
846 853
 	TRACE_IRQS_OFF			\
@@ -857,6 +864,7 @@ ENDPROC(name)
857 864
 
858 865
 ENTRY(coprocessor_error)
859 866
 	RING0_INT_FRAME
  867
+	ASM_CLAC
860 868
 	pushl_cfi $0
861 869
 	pushl_cfi $do_coprocessor_error
862 870
 	jmp error_code
@@ -865,6 +873,7 @@ END(coprocessor_error)
865 873
 
866 874
 ENTRY(simd_coprocessor_error)
867 875
 	RING0_INT_FRAME
  876
+	ASM_CLAC
868 877
 	pushl_cfi $0
869 878
 #ifdef CONFIG_X86_INVD_BUG
870 879
 	/* AMD 486 bug: invd from userspace calls exception 19 instead of #GP */
@@ -886,6 +895,7 @@ END(simd_coprocessor_error)
886 895
 
887 896
 ENTRY(device_not_available)
888 897
 	RING0_INT_FRAME
  898
+	ASM_CLAC
889 899
 	pushl_cfi $-1			# mark this as an int
890 900
 	pushl_cfi $do_device_not_available
891 901
 	jmp error_code
@@ -906,6 +916,7 @@ END(native_irq_enable_sysexit)
906 916
 
907 917
 ENTRY(overflow)
908 918
 	RING0_INT_FRAME
  919
+	ASM_CLAC
909 920
 	pushl_cfi $0
910 921
 	pushl_cfi $do_overflow
911 922
 	jmp error_code
@@ -914,6 +925,7 @@ END(overflow)
914 925
 
915 926
 ENTRY(bounds)
916 927
 	RING0_INT_FRAME
  928
+	ASM_CLAC
917 929
 	pushl_cfi $0
918 930
 	pushl_cfi $do_bounds
919 931
 	jmp error_code
@@ -922,6 +934,7 @@ END(bounds)
922 934
 
923 935
 ENTRY(invalid_op)
924 936
 	RING0_INT_FRAME
  937
+	ASM_CLAC
925 938
 	pushl_cfi $0
926 939
 	pushl_cfi $do_invalid_op
927 940
 	jmp error_code
@@ -930,6 +943,7 @@ END(invalid_op)
930 943
 
931 944
 ENTRY(coprocessor_segment_overrun)
932 945
 	RING0_INT_FRAME
  946
+	ASM_CLAC
933 947
 	pushl_cfi $0
934 948
 	pushl_cfi $do_coprocessor_segment_overrun
935 949
 	jmp error_code
@@ -938,6 +952,7 @@ END(coprocessor_segment_overrun)
938 952
 
939 953
 ENTRY(invalid_TSS)
940 954
 	RING0_EC_FRAME
  955
+	ASM_CLAC
941 956
 	pushl_cfi $do_invalid_TSS
942 957
 	jmp error_code
943 958
 	CFI_ENDPROC
@@ -945,6 +960,7 @@ END(invalid_TSS)
945 960
 
946 961
 ENTRY(segment_not_present)
947 962
 	RING0_EC_FRAME
  963
+	ASM_CLAC
948 964
 	pushl_cfi $do_segment_not_present
949 965
 	jmp error_code
950 966
 	CFI_ENDPROC
@@ -952,6 +968,7 @@ END(segment_not_present)
952 968
 
953 969
 ENTRY(stack_segment)
954 970
 	RING0_EC_FRAME
  971
+	ASM_CLAC
955 972
 	pushl_cfi $do_stack_segment
956 973
 	jmp error_code
957 974
 	CFI_ENDPROC
@@ -959,6 +976,7 @@ END(stack_segment)
959 976
 
960 977
 ENTRY(alignment_check)
961 978
 	RING0_EC_FRAME
  979
+	ASM_CLAC
962 980
 	pushl_cfi $do_alignment_check
963 981
 	jmp error_code
964 982
 	CFI_ENDPROC
@@ -966,6 +984,7 @@ END(alignment_check)
966 984
 
967 985
 ENTRY(divide_error)
968 986
 	RING0_INT_FRAME
  987
+	ASM_CLAC
969 988
 	pushl_cfi $0			# no error code
970 989
 	pushl_cfi $do_divide_error
971 990
 	jmp error_code
@@ -975,6 +994,7 @@ END(divide_error)
975 994
 #ifdef CONFIG_X86_MCE
976 995
 ENTRY(machine_check)
977 996
 	RING0_INT_FRAME
  997
+	ASM_CLAC
978 998
 	pushl_cfi $0
979 999
 	pushl_cfi machine_check_vector
980 1000
 	jmp error_code
@@ -984,6 +1004,7 @@ END(machine_check)
984 1004
 
985 1005
 ENTRY(spurious_interrupt_bug)
986 1006
 	RING0_INT_FRAME
  1007
+	ASM_CLAC
987 1008
 	pushl_cfi $0
988 1009
 	pushl_cfi $do_spurious_interrupt_bug
989 1010
 	jmp error_code
@@ -1273,6 +1294,7 @@ return_to_handler:
1273 1294
 
1274 1295
 ENTRY(page_fault)
1275 1296
 	RING0_EC_FRAME
  1297
+	ASM_CLAC
1276 1298
 	pushl_cfi $do_page_fault
1277 1299
 	ALIGN
1278 1300
 error_code:
@@ -1345,6 +1367,7 @@ END(page_fault)
1345 1367
 
1346 1368
 ENTRY(debug)
1347 1369
 	RING0_INT_FRAME
  1370
+	ASM_CLAC
1348 1371
 	cmpl $ia32_sysenter_target,(%esp)
1349 1372
 	jne debug_stack_correct
1350 1373
 	FIX_STACK 12, debug_stack_correct, debug_esp_fix_insn
@@ -1369,6 +1392,7 @@ END(debug)
1369 1392
  */
1370 1393
 ENTRY(nmi)
1371 1394
 	RING0_INT_FRAME
  1395
+	ASM_CLAC
1372 1396
 	pushl_cfi %eax
1373 1397
 	movl %ss, %eax
1374 1398
 	cmpw $__ESPFIX_SS, %ax
@@ -1439,6 +1463,7 @@ END(nmi)
1439 1463
 
1440 1464
 ENTRY(int3)
1441 1465
 	RING0_INT_FRAME
  1466
+	ASM_CLAC
1442 1467
 	pushl_cfi $-1			# mark this as an int
1443 1468
 	SAVE_ALL
1444 1469
 	TRACE_IRQS_OFF
@@ -1459,6 +1484,7 @@ END(general_protection)
1459 1484
 #ifdef CONFIG_KVM_GUEST
1460 1485
 ENTRY(async_page_fault)
1461 1486
 	RING0_EC_FRAME
  1487
+	ASM_CLAC
1462 1488
 	pushl_cfi $do_async_page_fault
1463 1489
 	jmp error_code
1464 1490
 	CFI_ENDPROC
11  arch/x86/kernel/entry_64.S
@@ -57,6 +57,7 @@
57 57
 #include <asm/percpu.h>
58 58
 #include <asm/asm.h>
59 59
 #include <asm/rcu.h>
  60
+#include <asm/smap.h>
60 61
 #include <linux/err.h>
61 62
 
62 63
 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this.  */
@@ -568,7 +569,8 @@ END(ret_from_fork)
568 569
  * System call entry. Up to 6 arguments in registers are supported.
569 570
  *
570 571
  * SYSCALL does not save anything on the stack and does not change the
571  
- * stack pointer.
  572
+ * stack pointer.  However, it does mask the flags register for us, so
  573
+ * CLD and CLAC are not needed.
572 574
  */
573 575
 
574 576
 /*
@@ -987,6 +989,7 @@ END(interrupt)
987 989
 	 */
988 990
 	.p2align CONFIG_X86_L1_CACHE_SHIFT
989 991
 common_interrupt:
  992
+	ASM_CLAC
990 993
 	XCPT_FRAME
991 994
 	addq $-0x80,(%rsp)		/* Adjust vector to [-256,-1] range */
992 995
 	interrupt do_IRQ
@@ -1126,6 +1129,7 @@ END(common_interrupt)
1126 1129
  */
1127 1130
 .macro apicinterrupt num sym do_sym
1128 1131
 ENTRY(\sym)
  1132
+	ASM_CLAC
1129 1133
 	INTR_FRAME
1130 1134
 	pushq_cfi $~(\num)
1131 1135
 .Lcommon_\sym:
@@ -1180,6 +1184,7 @@ apicinterrupt IRQ_WORK_VECTOR \
1180 1184
  */
1181 1185
 .macro zeroentry sym do_sym
1182 1186
 ENTRY(\sym)
  1187
+	ASM_CLAC
1183 1188
 	INTR_FRAME
1184 1189
 	PARAVIRT_ADJUST_EXCEPTION_FRAME
1185 1190
 	pushq_cfi $-1		/* ORIG_RAX: no syscall to restart */
@@ -1197,6 +1202,7 @@ END(\sym)
1197 1202
 
1198 1203
 .macro paranoidzeroentry sym do_sym
1199 1204
 ENTRY(\sym)
  1205
+	ASM_CLAC
1200 1206
 	INTR_FRAME
1201 1207
 	PARAVIRT_ADJUST_EXCEPTION_FRAME
1202 1208
 	pushq_cfi $-1		/* ORIG_RAX: no syscall to restart */
@@ -1215,6 +1221,7 @@ END(\sym)
1215 1221
 #define INIT_TSS_IST(x) PER_CPU_VAR(init_tss) + (TSS_ist + ((x) - 1) * 8)
1216 1222
 .macro paranoidzeroentry_ist sym do_sym ist
1217 1223
 ENTRY(\sym)
  1224
+	ASM_CLAC
1218 1225
 	INTR_FRAME
1219 1226
 	PARAVIRT_ADJUST_EXCEPTION_FRAME
1220 1227
 	pushq_cfi $-1		/* ORIG_RAX: no syscall to restart */
@@ -1234,6 +1241,7 @@ END(\sym)
1234 1241
 
1235 1242
 .macro errorentry sym do_sym
1236 1243
 ENTRY(\sym)
  1244
+	ASM_CLAC
1237 1245
 	XCPT_FRAME
1238 1246
 	PARAVIRT_ADJUST_EXCEPTION_FRAME
1239 1247
 	subq $ORIG_RAX-R15, %rsp
@@ -1252,6 +1260,7 @@ END(\sym)
1252 1260
 	/* error code is on the stack already */
1253 1261
 .macro paranoiderrorentry sym do_sym
1254 1262
 ENTRY(\sym)
  1263
+	ASM_CLAC
1255 1264
 	XCPT_FRAME
1256 1265
 	PARAVIRT_ADJUST_EXCEPTION_FRAME
1257 1266
 	subq $ORIG_RAX-R15, %rsp
31  arch/x86/kernel/head_32.S
@@ -287,27 +287,28 @@ ENTRY(startup_32_smp)
287 287
 	leal -__PAGE_OFFSET(%ecx),%esp
288 288
 
289 289
 default_entry:
290  
-
291 290
 /*
292 291
  *	New page tables may be in 4Mbyte page mode and may
293 292
  *	be using the global pages. 
294 293
  *
295 294
  *	NOTE! If we are on a 486 we may have no cr4 at all!
296  
- *	So we do not try to touch it unless we really have
297  
- *	some bits in it to set.  This won't work if the BSP
298  
- *	implements cr4 but this AP does not -- very unlikely
299  
- *	but be warned!  The same applies to the pse feature
300  
- *	if not equally supported. --macro
301  
- *
302  
- *	NOTE! We have to correct for the fact that we're
303  
- *	not yet offset PAGE_OFFSET..
  295
+ *	Specifically, cr4 exists if and only if CPUID exists,
  296
+ *	which in turn exists if and only if EFLAGS.ID exists.
304 297
  */
305  
-#define cr4_bits pa(mmu_cr4_features)
306  
-	movl cr4_bits,%edx
307  
-	andl %edx,%edx
308  
-	jz 6f
309  
-	movl %cr4,%eax		# Turn on paging options (PSE,PAE,..)
310  
-	orl %edx,%eax
  298
+	movl $X86_EFLAGS_ID,%ecx
  299
+	pushl %ecx
  300
+	popfl
  301
+	pushfl
  302
+	popl %eax
  303
+	pushl $0
  304
+	popfl
  305
+	pushfl
  306
+	popl %edx
  307
+	xorl %edx,%eax
  308
+	testl %ecx,%eax
  309
+	jz 6f			# No ID flag = no CPUID = no CR4
  310
+
  311
+	movl pa(mmu_cr4_features),%eax
311 312
 	movl %eax,%cr4
312 313
 
313 314
 	testb $X86_CR4_PAE, %al		# check if PAE is enabled
24  arch/x86/kernel/signal.c
@@ -114,11 +114,12 @@ int restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc,
114 114
 		regs->orig_ax = -1;		/* disable syscall checks */
115 115
 
116 116
 		get_user_ex(buf, &sc->fpstate);
117  
-		err |= restore_xstate_sig(buf, config_enabled(CONFIG_X86_32));
118 117
 
119 118
 		get_user_ex(*pax, &sc->ax);
120 119
 	} get_user_catch(err);
121 120
 
  121
+	err |= restore_xstate_sig(buf, config_enabled(CONFIG_X86_32));
  122
+
122 123
 	return err;
123 124
 }
124 125
 
@@ -355,7 +356,6 @@ static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
355 356
 		put_user_ex(sig, &frame->sig);
356 357
 		put_user_ex(&frame->info, &frame->pinfo);
357 358
 		put_user_ex(&frame->uc, &frame->puc);
358  
-		err |= copy_siginfo_to_user(&frame->info, info);
359 359
 
360 360
 		/* Create the ucontext.  */
361 361
 		if (cpu_has_xsave)
@@ -367,9 +367,6 @@ static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
367 367
 		put_user_ex(sas_ss_flags(regs->sp),
368 368
 			    &frame->uc.uc_stack.ss_flags);
369 369
 		put_user_ex(current->sas_ss_size, &frame->uc.uc_stack.ss_size);
370  
-		err |= setup_sigcontext(&frame->uc.uc_mcontext, fpstate,
371  
-					regs, set->sig[0]);
372  
-		err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
373 370
 
374 371
 		/* Set up to return from userspace.  */
375 372
 		restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
@@ -386,6 +383,11 @@ static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
386 383
 		 */
387 384
 		put_user_ex(*((u64 *)&rt_retcode), (u64 *)frame->retcode);
388 385
 	} put_user_catch(err);
  386
+	
  387
+	err |= copy_siginfo_to_user(&frame->info, info);
  388
+	err |= setup_sigcontext(&frame->uc.uc_mcontext, fpstate,
  389
+				regs, set->sig[0]);
  390
+	err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
389 391
 
390 392
 	if (err)
391 393
 		return -EFAULT;
@@ -434,8 +436,6 @@ static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
434 436
 		put_user_ex(sas_ss_flags(regs->sp),
435 437
 			    &frame->uc.uc_stack.ss_flags);
436 438
 		put_user_ex(me->sas_ss_size, &frame->uc.uc_stack.ss_size);
437  
-		err |= setup_sigcontext(&frame->uc.uc_mcontext, fp, regs, set->sig[0]);
438  
-		err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
439 439
 
440