Skip to content

Commit

Permalink
x86/virt: KVM: Move VMXOFF helpers into KVM VMX
Browse files Browse the repository at this point in the history
Now that VMX is disabled in emergencies via the virt callbacks, move the
VMXOFF helpers into KVM, the only remaining user.

No functional change intended.

Reviewed-by: Kai Huang <kai.huang@intel.com>
Link: https://lore.kernel.org/r/20230721201859.2307736-11-seanjc@google.com
Signed-off-by: Sean Christopherson <seanjc@google.com>
  • Loading branch information
sean-jc committed Aug 3, 2023
1 parent b6a6af0 commit 22e420e
Show file tree
Hide file tree
Showing 2 changed files with 26 additions and 45 deletions.
42 changes: 0 additions & 42 deletions arch/x86/include/asm/virtext.h
Original file line number Diff line number Diff line change
Expand Up @@ -19,48 +19,6 @@
#include <asm/svm.h>
#include <asm/tlbflush.h>

/*
* VMX functions:
*/
/**
* cpu_vmxoff() - Disable VMX on the current CPU
*
* Disable VMX and clear CR4.VMXE (even if VMXOFF faults)
*
* Note, VMXOFF causes a #UD if the CPU is !post-VMXON, but it's impossible to
* atomically track post-VMXON state, e.g. this may be called in NMI context.
* Eat all faults as all other faults on VMXOFF faults are mode related, i.e.
* faults are guaranteed to be due to the !post-VMXON check unless the CPU is
* magically in RM, VM86, compat mode, or at CPL>0.
*/
static inline int cpu_vmxoff(void)
{
asm_volatile_goto("1: vmxoff\n\t"
_ASM_EXTABLE(1b, %l[fault])
::: "cc", "memory" : fault);

cr4_clear_bits(X86_CR4_VMXE);
return 0;

fault:
cr4_clear_bits(X86_CR4_VMXE);
return -EIO;
}

static inline int cpu_vmx_enabled(void)
{
return __read_cr4() & X86_CR4_VMXE;
}

/** Disable VMX if it is enabled on the current CPU
*/
static inline void __cpu_emergency_vmxoff(void)
{
if (cpu_vmx_enabled())
cpu_vmxoff();
}


/*
* SVM functions:
*/
Expand Down
29 changes: 26 additions & 3 deletions arch/x86/kvm/vmx/vmx.c
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,6 @@
#include <asm/mshyperv.h>
#include <asm/mwait.h>
#include <asm/spec-ctrl.h>
#include <asm/virtext.h>
#include <asm/vmx.h>

#include "capabilities.h"
Expand Down Expand Up @@ -725,6 +724,29 @@ static int vmx_set_guest_uret_msr(struct vcpu_vmx *vmx,
return ret;
}

/*
* Disable VMX and clear CR4.VMXE (even if VMXOFF faults)
*
* Note, VMXOFF causes a #UD if the CPU is !post-VMXON, but it's impossible to
* atomically track post-VMXON state, e.g. this may be called in NMI context.
* Eat all faults as all other faults on VMXOFF faults are mode related, i.e.
* faults are guaranteed to be due to the !post-VMXON check unless the CPU is
* magically in RM, VM86, compat mode, or at CPL>0.
*/
static int kvm_cpu_vmxoff(void)
{
asm_volatile_goto("1: vmxoff\n\t"
_ASM_EXTABLE(1b, %l[fault])
::: "cc", "memory" : fault);

cr4_clear_bits(X86_CR4_VMXE);
return 0;

fault:
cr4_clear_bits(X86_CR4_VMXE);
return -EIO;
}

static void vmx_emergency_disable(void)
{
int cpu = raw_smp_processor_id();
Expand All @@ -734,7 +756,8 @@ static void vmx_emergency_disable(void)
loaded_vmcss_on_cpu_link)
vmcs_clear(v->vmcs);

__cpu_emergency_vmxoff();
if (__read_cr4() & X86_CR4_VMXE)
kvm_cpu_vmxoff();
}

static void __loaded_vmcs_clear(void *arg)
Expand Down Expand Up @@ -2799,7 +2822,7 @@ static void vmx_hardware_disable(void)
{
vmclear_local_loaded_vmcss();

if (cpu_vmxoff())
if (kvm_cpu_vmxoff())
kvm_spurious_fault();

hv_reset_evmcs();
Expand Down

0 comments on commit 22e420e

Please sign in to comment.