Skip to content

Commit d00b99c

Browse files
babumogerbonzini
authored andcommitted
KVM: SVM: Add support for Virtual SPEC_CTRL
Newer AMD processors have a feature to virtualize the use of the SPEC_CTRL MSR. Presence of this feature is indicated via CPUID function 0x8000000A_EDX[20]: GuestSpecCtrl. Hypervisors are not required to enable this feature since it is automatically enabled on processors that support it. A hypervisor may wish to impose speculation controls on guest execution or a guest may want to impose its own speculation controls. Therefore, the processor implements both host and guest versions of SPEC_CTRL. When in host mode, the host SPEC_CTRL value is in effect and writes update only the host version of SPEC_CTRL. On a VMRUN, the processor loads the guest version of SPEC_CTRL from the VMCB. When the guest writes SPEC_CTRL, only the guest version is updated. On a VMEXIT, the guest version is saved into the VMCB and the processor returns to only using the host SPEC_CTRL for speculation control. The guest SPEC_CTRL is located at offset 0x2E0 in the VMCB. The effective SPEC_CTRL setting is the guest SPEC_CTRL setting or'ed with the hypervisor SPEC_CTRL setting. This allows the hypervisor to ensure a minimum SPEC_CTRL if desired. This support also fixes an issue where a guest may sometimes see an inconsistent value for the SPEC_CTRL MSR on processors that support this feature. With the current SPEC_CTRL support, the first write to SPEC_CTRL is intercepted and the virtualized version of the SPEC_CTRL MSR is not updated. When the guest reads back the SPEC_CTRL MSR, it will be 0x0, instead of the actual expected value. There isn’t a security concern here, because the host SPEC_CTRL value is or’ed with the Guest SPEC_CTRL value to generate the effective SPEC_CTRL value. KVM writes with the guest's virtualized SPEC_CTRL value to SPEC_CTRL MSR just before the VMRUN, so it will always have the actual value even though it doesn’t appear that way in the guest. The guest will only see the proper value for the SPEC_CTRL register if the guest was to write to the SPEC_CTRL register again. With Virtual SPEC_CTRL support, the save area spec_ctrl is properly saved and restored. So, the guest will always see the proper value when it is read back. Signed-off-by: Babu Moger <babu.moger@amd.com> Message-Id: <161188100955.28787.11816849358413330720.stgit@bmoger-ubuntu> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
1 parent f333374 commit d00b99c

File tree

3 files changed

+39
-6
lines changed

3 files changed

+39
-6
lines changed

arch/x86/include/asm/svm.h

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -269,7 +269,9 @@ struct vmcb_save_area {
269269
* SEV-ES guests when referenced through the GHCB or for
270270
* saving to the host save area.
271271
*/
272-
u8 reserved_7[80];
272+
u8 reserved_7[72];
273+
u32 spec_ctrl; /* Guest version of SPEC_CTRL at 0x2E0 */
274+
u8 reserved_7b[4];
273275
u32 pkru;
274276
u8 reserved_7a[20];
275277
u64 reserved_8; /* rax already available at 0x01f8 */

arch/x86/kvm/svm/nested.c

Lines changed: 15 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -512,6 +512,18 @@ static void nested_vmcb02_prepare_control(struct vcpu_svm *svm)
512512
recalc_intercepts(svm);
513513
}
514514

515+
static void nested_svm_copy_common_state(struct vmcb *from_vmcb, struct vmcb *to_vmcb)
516+
{
517+
/*
518+
* Some VMCB state is shared between L1 and L2 and thus has to be
519+
* moved at the time of nested vmrun and vmexit.
520+
*
521+
* VMLOAD/VMSAVE state would also belong in this category, but KVM
522+
* always performs VMLOAD and VMSAVE from the VMCB01.
523+
*/
524+
to_vmcb->save.spec_ctrl = from_vmcb->save.spec_ctrl;
525+
}
526+
515527
int enter_svm_guest_mode(struct kvm_vcpu *vcpu, u64 vmcb12_gpa,
516528
struct vmcb *vmcb12)
517529
{
@@ -536,6 +548,7 @@ int enter_svm_guest_mode(struct kvm_vcpu *vcpu, u64 vmcb12_gpa,
536548

537549
WARN_ON(svm->vmcb == svm->nested.vmcb02.ptr);
538550

551+
nested_svm_copy_common_state(svm->vmcb01.ptr, svm->nested.vmcb02.ptr);
539552
nested_load_control_from_vmcb12(svm, &vmcb12->control);
540553

541554
svm_switch_vmcb(svm, &svm->nested.vmcb02);
@@ -725,6 +738,8 @@ int nested_svm_vmexit(struct vcpu_svm *svm)
725738
vmcb12->control.pause_filter_thresh =
726739
svm->vmcb->control.pause_filter_thresh;
727740

741+
nested_svm_copy_common_state(svm->nested.vmcb02.ptr, svm->vmcb01.ptr);
742+
728743
svm_switch_vmcb(svm, &svm->vmcb01);
729744

730745
/*

arch/x86/kvm/svm/svm.c

Lines changed: 21 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -1247,6 +1247,13 @@ static void init_vmcb(struct kvm_vcpu *vcpu)
12471247

12481248
svm_check_invpcid(svm);
12491249

1250+
/*
1251+
* If the host supports V_SPEC_CTRL then disable the interception
1252+
* of MSR_IA32_SPEC_CTRL.
1253+
*/
1254+
if (boot_cpu_has(X86_FEATURE_V_SPEC_CTRL))
1255+
set_msr_interception(vcpu, svm->msrpm, MSR_IA32_SPEC_CTRL, 1, 1);
1256+
12501257
if (kvm_vcpu_apicv_active(vcpu))
12511258
avic_init_vmcb(svm);
12521259

@@ -2710,7 +2717,10 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
27102717
!guest_has_spec_ctrl_msr(vcpu))
27112718
return 1;
27122719

2713-
msr_info->data = svm->spec_ctrl;
2720+
if (boot_cpu_has(X86_FEATURE_V_SPEC_CTRL))
2721+
msr_info->data = svm->vmcb->save.spec_ctrl;
2722+
else
2723+
msr_info->data = svm->spec_ctrl;
27142724
break;
27152725
case MSR_AMD64_VIRT_SPEC_CTRL:
27162726
if (!msr_info->host_initiated &&
@@ -2808,7 +2818,10 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
28082818
if (kvm_spec_ctrl_test_value(data))
28092819
return 1;
28102820

2811-
svm->spec_ctrl = data;
2821+
if (boot_cpu_has(X86_FEATURE_V_SPEC_CTRL))
2822+
svm->vmcb->save.spec_ctrl = data;
2823+
else
2824+
svm->spec_ctrl = data;
28122825
if (!data)
28132826
break;
28142827

@@ -3802,7 +3815,8 @@ static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu)
38023815
* is no need to worry about the conditional branch over the wrmsr
38033816
* being speculatively taken.
38043817
*/
3805-
x86_spec_ctrl_set_guest(svm->spec_ctrl, svm->virt_spec_ctrl);
3818+
if (!static_cpu_has(X86_FEATURE_V_SPEC_CTRL))
3819+
x86_spec_ctrl_set_guest(svm->spec_ctrl, svm->virt_spec_ctrl);
38063820

38073821
svm_vcpu_enter_exit(vcpu);
38083822

@@ -3821,13 +3835,15 @@ static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu)
38213835
* If the L02 MSR bitmap does not intercept the MSR, then we need to
38223836
* save it.
38233837
*/
3824-
if (unlikely(!msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL)))
3838+
if (!static_cpu_has(X86_FEATURE_V_SPEC_CTRL) &&
3839+
unlikely(!msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL)))
38253840
svm->spec_ctrl = native_read_msr(MSR_IA32_SPEC_CTRL);
38263841

38273842
if (!sev_es_guest(vcpu->kvm))
38283843
reload_tss(vcpu);
38293844

3830-
x86_spec_ctrl_restore_host(svm->spec_ctrl, svm->virt_spec_ctrl);
3845+
if (!static_cpu_has(X86_FEATURE_V_SPEC_CTRL))
3846+
x86_spec_ctrl_restore_host(svm->spec_ctrl, svm->virt_spec_ctrl);
38313847

38323848
if (!sev_es_guest(vcpu->kvm)) {
38333849
vcpu->arch.cr2 = svm->vmcb->save.cr2;

0 commit comments

Comments
 (0)