Skip to content

Commit

Permalink
KVM: SVM: restore host save area from assembly
Browse files Browse the repository at this point in the history
[ Upstream commit e287bd0 ]

Allow access to the percpu area via the GS segment base, which is
needed in order to access the saved host spec_ctrl value.  In linux-next
FILL_RETURN_BUFFER also needs to access percpu data.

For simplicity, the physical address of the save area is added to struct
svm_cpu_data.

Cc: stable@vger.kernel.org
Fixes: a149180 ("x86: Add magic AMD return-thunk")
Reported-by: Nathan Chancellor <nathan@kernel.org>
Analyzed-by: Andrew Cooper <andrew.cooper3@citrix.com>
Tested-by: Nathan Chancellor <nathan@kernel.org>
Reviewed-by: Sean Christopherson <seanjc@google.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Signed-off-by: Sasha Levin <sashal@kernel.org>
  • Loading branch information
bonzini authored and gregkh committed Nov 26, 2022
1 parent d7be22f commit 54da204
Show file tree
Hide file tree
Showing 5 changed files with 26 additions and 13 deletions.
1 change: 1 addition & 0 deletions arch/x86/kvm/kvm-asm-offsets.c
Expand Up @@ -18,6 +18,7 @@ static void __used common(void)
OFFSET(SVM_current_vmcb, vcpu_svm, current_vmcb);
OFFSET(SVM_vmcb01, vcpu_svm, vmcb01);
OFFSET(KVM_VMCB_pa, kvm_vmcb_info, pa);
OFFSET(SD_save_area_pa, svm_cpu_data, save_area_pa);
}

if (IS_ENABLED(CONFIG_KVM_INTEL)) {
Expand Down
14 changes: 6 additions & 8 deletions arch/x86/kvm/svm/svm.c
Expand Up @@ -594,7 +594,7 @@ static int svm_hardware_enable(void)

wrmsrl(MSR_EFER, efer | EFER_SVME);

wrmsrl(MSR_VM_HSAVE_PA, __sme_page_pa(sd->save_area));
wrmsrl(MSR_VM_HSAVE_PA, sd->save_area_pa);

if (static_cpu_has(X86_FEATURE_TSCRATEMSR)) {
/*
Expand Down Expand Up @@ -650,6 +650,7 @@ static void svm_cpu_uninit(int cpu)

kfree(sd->sev_vmcbs);
__free_page(sd->save_area);
sd->save_area_pa = 0;
sd->save_area = NULL;
}

Expand All @@ -667,6 +668,7 @@ static int svm_cpu_init(int cpu)
if (ret)
goto free_save_area;

sd->save_area_pa = __sme_page_pa(sd->save_area);
return 0;

free_save_area:
Expand Down Expand Up @@ -1452,7 +1454,7 @@ static void svm_prepare_switch_to_guest(struct kvm_vcpu *vcpu)
* Save additional host state that will be restored on VMEXIT (sev-es)
* or subsequent vmload of host save area.
*/
vmsave(__sme_page_pa(sd->save_area));
vmsave(sd->save_area_pa);
if (sev_es_guest(vcpu->kvm)) {
struct sev_es_save_area *hostsa;
hostsa = (struct sev_es_save_area *)(page_address(sd->save_area) + 0x400);
Expand Down Expand Up @@ -3906,14 +3908,10 @@ static noinstr void svm_vcpu_enter_exit(struct kvm_vcpu *vcpu)

guest_state_enter_irqoff();

if (sev_es_guest(vcpu->kvm)) {
if (sev_es_guest(vcpu->kvm))
__svm_sev_es_vcpu_run(svm);
} else {
struct svm_cpu_data *sd = per_cpu_ptr(&svm_data, vcpu->cpu);

else
__svm_vcpu_run(svm);
vmload(__sme_page_pa(sd->save_area));
}

guest_state_exit_irqoff();
}
Expand Down
2 changes: 2 additions & 0 deletions arch/x86/kvm/svm/svm.h
Expand Up @@ -288,6 +288,8 @@ struct svm_cpu_data {
struct kvm_ldttss_desc *tss_desc;

struct page *save_area;
unsigned long save_area_pa;

struct vmcb *current_vmcb;

/* index = sev_asid, value = vmcb pointer */
Expand Down
5 changes: 0 additions & 5 deletions arch/x86/kvm/svm/svm_ops.h
Expand Up @@ -61,9 +61,4 @@ static __always_inline void vmsave(unsigned long pa)
svm_asm1(vmsave, "a" (pa), "memory");
}

static __always_inline void vmload(unsigned long pa)
{
svm_asm1(vmload, "a" (pa), "memory");
}

#endif /* __KVM_X86_SVM_OPS_H */
17 changes: 17 additions & 0 deletions arch/x86/kvm/svm/vmenter.S
Expand Up @@ -49,6 +49,14 @@ SYM_FUNC_START(__svm_vcpu_run)
#endif
push %_ASM_BX

/*
* Save variables needed after vmexit on the stack, in inverse
* order compared to when they are needed.
*/

/* Needed to restore access to percpu variables. */
__ASM_SIZE(push) PER_CPU_VAR(svm_data + SD_save_area_pa)

/* Save @svm. */
push %_ASM_ARG1

Expand Down Expand Up @@ -124,6 +132,11 @@ SYM_FUNC_START(__svm_vcpu_run)
5: vmsave %_ASM_AX
6:

/* Restores GSBASE among other things, allowing access to percpu data. */
pop %_ASM_AX
7: vmload %_ASM_AX
8:

#ifdef CONFIG_RETPOLINE
/* IMPORTANT: Stuff the RSB immediately after VM-Exit, before RET! */
FILL_RETURN_BUFFER %_ASM_AX, RSB_CLEAR_LOOPS, X86_FEATURE_RETPOLINE
Expand Down Expand Up @@ -187,10 +200,14 @@ SYM_FUNC_START(__svm_vcpu_run)
50: cmpb $0, kvm_rebooting
jne 6b
ud2
70: cmpb $0, kvm_rebooting
jne 8b
ud2

_ASM_EXTABLE(1b, 10b)
_ASM_EXTABLE(3b, 30b)
_ASM_EXTABLE(5b, 50b)
_ASM_EXTABLE(7b, 70b)

SYM_FUNC_END(__svm_vcpu_run)

Expand Down

0 comments on commit 54da204

Please sign in to comment.