Skip to content

Commit c331b40

Browse files
ubizjaksean-jc
authored andcommitted
KVM: SVM: Ensure SPEC_CTRL[63:32] is context switched between guest and host
SPEC_CTRL is an MSR, i.e. a 64-bit value, but the VMRUN assembly code assumes bits 63:32 are always zero. The bug is _currently_ benign because neither KVM nor the kernel support setting any of bits 63:32, but it's still a bug that needs to be fixed. Signed-off-by: Uros Bizjak <ubizjak@gmail.com> Suggested-by: Sean Christopherson <seanjc@google.com> Co-developed-by: Sean Christopherson <seanjc@google.com> Link: https://patch.msgid.link/20251106191230.182393-1-seanjc@google.com Signed-off-by: Sean Christopherson <seanjc@google.com>
1 parent 3d80f4c commit c331b40

File tree

1 file changed

+37
-10
lines changed

1 file changed

+37
-10
lines changed

arch/x86/kvm/svm/vmenter.S

Lines changed: 37 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -52,11 +52,23 @@
5252
* there must not be any returns or indirect branches between this code
5353
* and vmentry.
5454
*/
55-
movl SVM_spec_ctrl(%_ASM_DI), %eax
56-
cmp PER_CPU_VAR(x86_spec_ctrl_current), %eax
55+
#ifdef CONFIG_X86_64
56+
mov SVM_spec_ctrl(%rdi), %rdx
57+
cmp PER_CPU_VAR(x86_spec_ctrl_current), %rdx
58+
je 801b
59+
movl %edx, %eax
60+
shr $32, %rdx
61+
#else
62+
mov SVM_spec_ctrl(%edi), %eax
63+
mov PER_CPU_VAR(x86_spec_ctrl_current), %ecx
64+
xor %eax, %ecx
65+
mov SVM_spec_ctrl + 4(%edi), %edx
66+
mov PER_CPU_VAR(x86_spec_ctrl_current + 4), %esi
67+
xor %edx, %esi
68+
or %esi, %ecx
5769
je 801b
70+
#endif
5871
mov $MSR_IA32_SPEC_CTRL, %ecx
59-
xor %edx, %edx
6072
wrmsr
6173
jmp 801b
6274
.endm
@@ -81,13 +93,25 @@
8193
jnz 998f
8294
rdmsr
8395
movl %eax, SVM_spec_ctrl(%_ASM_DI)
96+
movl %edx, SVM_spec_ctrl + 4(%_ASM_DI)
8497
998:
85-
8698
/* Now restore the host value of the MSR if different from the guest's. */
87-
movl PER_CPU_VAR(x86_spec_ctrl_current), %eax
88-
cmp SVM_spec_ctrl(%_ASM_DI), %eax
99+
#ifdef CONFIG_X86_64
100+
mov PER_CPU_VAR(x86_spec_ctrl_current), %rdx
101+
cmp SVM_spec_ctrl(%rdi), %rdx
89102
je 901b
90-
xor %edx, %edx
103+
movl %edx, %eax
104+
shr $32, %rdx
105+
#else
106+
mov PER_CPU_VAR(x86_spec_ctrl_current), %eax
107+
mov SVM_spec_ctrl(%edi), %esi
108+
xor %eax, %esi
109+
mov PER_CPU_VAR(x86_spec_ctrl_current + 4), %edx
110+
mov SVM_spec_ctrl + 4(%edi), %edi
111+
xor %edx, %edi
112+
or %edi, %esi
113+
je 901b
114+
#endif
91115
wrmsr
92116
jmp 901b
93117
.endm
@@ -134,7 +158,7 @@ SYM_FUNC_START(__svm_vcpu_run)
134158
mov %_ASM_ARG1, %_ASM_DI
135159
.endif
136160

137-
/* Clobbers RAX, RCX, RDX. */
161+
/* Clobbers RAX, RCX, RDX (and ESI on 32-bit), consumes RDI (@svm). */
138162
RESTORE_GUEST_SPEC_CTRL
139163

140164
/*
@@ -211,7 +235,10 @@ SYM_FUNC_START(__svm_vcpu_run)
211235
/* IMPORTANT: Stuff the RSB immediately after VM-Exit, before RET! */
212236
FILL_RETURN_BUFFER %_ASM_AX, RSB_CLEAR_LOOPS, X86_FEATURE_RSB_VMEXIT
213237

214-
/* Clobbers RAX, RCX, RDX. */
238+
/*
239+
* Clobbers RAX, RCX, RDX (and ESI, EDI on 32-bit), consumes RDI (@svm)
240+
* and RSP (pointer to @spec_ctrl_intercepted).
241+
*/
215242
RESTORE_HOST_SPEC_CTRL
216243

217244
/*
@@ -331,7 +358,7 @@ SYM_FUNC_START(__svm_sev_es_vcpu_run)
331358
mov %rdi, SEV_ES_RDI (%rdx)
332359
mov %rsi, SEV_ES_RSI (%rdx)
333360

334-
/* Clobbers RAX, RCX, RDX (@hostsa). */
361+
/* Clobbers RAX, RCX, and RDX (@hostsa), consumes RDI (@svm). */
335362
RESTORE_GUEST_SPEC_CTRL
336363

337364
/* Get svm->current_vmcb->pa into RAX. */

0 commit comments

Comments
 (0)