Skip to content

Commit 22e420e

Browse files
committed
x86/virt: KVM: Move VMXOFF helpers into KVM VMX
Now that VMX is disabled in emergencies via the virt callbacks, move the VMXOFF helpers into KVM, the only remaining user. No functional change intended. Reviewed-by: Kai Huang <kai.huang@intel.com> Link: https://lore.kernel.org/r/20230721201859.2307736-11-seanjc@google.com Signed-off-by: Sean Christopherson <seanjc@google.com>
1 parent b6a6af0 commit 22e420e

File tree

2 files changed

+26
-45
lines changed

2 files changed

+26
-45
lines changed

arch/x86/include/asm/virtext.h

Lines changed: 0 additions & 42 deletions
Original file line numberDiff line numberDiff line change
@@ -19,48 +19,6 @@
1919
#include <asm/svm.h>
2020
#include <asm/tlbflush.h>
2121

22-
/*
23-
* VMX functions:
24-
*/
25-
/**
26-
* cpu_vmxoff() - Disable VMX on the current CPU
27-
*
28-
* Disable VMX and clear CR4.VMXE (even if VMXOFF faults)
29-
*
30-
* Note, VMXOFF causes a #UD if the CPU is !post-VMXON, but it's impossible to
31-
* atomically track post-VMXON state, e.g. this may be called in NMI context.
32-
* Eat all faults as all other faults on VMXOFF faults are mode related, i.e.
33-
* faults are guaranteed to be due to the !post-VMXON check unless the CPU is
34-
* magically in RM, VM86, compat mode, or at CPL>0.
35-
*/
36-
static inline int cpu_vmxoff(void)
37-
{
38-
asm_volatile_goto("1: vmxoff\n\t"
39-
_ASM_EXTABLE(1b, %l[fault])
40-
::: "cc", "memory" : fault);
41-
42-
cr4_clear_bits(X86_CR4_VMXE);
43-
return 0;
44-
45-
fault:
46-
cr4_clear_bits(X86_CR4_VMXE);
47-
return -EIO;
48-
}
49-
50-
static inline int cpu_vmx_enabled(void)
51-
{
52-
return __read_cr4() & X86_CR4_VMXE;
53-
}
54-
55-
/** Disable VMX if it is enabled on the current CPU
56-
*/
57-
static inline void __cpu_emergency_vmxoff(void)
58-
{
59-
if (cpu_vmx_enabled())
60-
cpu_vmxoff();
61-
}
62-
63-
6422
/*
6523
* SVM functions:
6624
*/

arch/x86/kvm/vmx/vmx.c

Lines changed: 26 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -47,7 +47,6 @@
4747
#include <asm/mshyperv.h>
4848
#include <asm/mwait.h>
4949
#include <asm/spec-ctrl.h>
50-
#include <asm/virtext.h>
5150
#include <asm/vmx.h>
5251

5352
#include "capabilities.h"
@@ -725,6 +724,29 @@ static int vmx_set_guest_uret_msr(struct vcpu_vmx *vmx,
725724
return ret;
726725
}
727726

727+
/*
728+
* Disable VMX and clear CR4.VMXE (even if VMXOFF faults)
729+
*
730+
* Note, VMXOFF causes a #UD if the CPU is !post-VMXON, but it's impossible to
731+
* atomically track post-VMXON state, e.g. this may be called in NMI context.
732+
* Eat all faults as all other faults on VMXOFF faults are mode related, i.e.
733+
* faults are guaranteed to be due to the !post-VMXON check unless the CPU is
734+
* magically in RM, VM86, compat mode, or at CPL>0.
735+
*/
736+
static int kvm_cpu_vmxoff(void)
737+
{
738+
asm_volatile_goto("1: vmxoff\n\t"
739+
_ASM_EXTABLE(1b, %l[fault])
740+
::: "cc", "memory" : fault);
741+
742+
cr4_clear_bits(X86_CR4_VMXE);
743+
return 0;
744+
745+
fault:
746+
cr4_clear_bits(X86_CR4_VMXE);
747+
return -EIO;
748+
}
749+
728750
static void vmx_emergency_disable(void)
729751
{
730752
int cpu = raw_smp_processor_id();
@@ -734,7 +756,8 @@ static void vmx_emergency_disable(void)
734756
loaded_vmcss_on_cpu_link)
735757
vmcs_clear(v->vmcs);
736758

737-
__cpu_emergency_vmxoff();
759+
if (__read_cr4() & X86_CR4_VMXE)
760+
kvm_cpu_vmxoff();
738761
}
739762

740763
static void __loaded_vmcs_clear(void *arg)
@@ -2799,7 +2822,7 @@ static void vmx_hardware_disable(void)
27992822
{
28002823
vmclear_local_loaded_vmcss();
28012824

2802-
if (cpu_vmxoff())
2825+
if (kvm_cpu_vmxoff())
28032826
kvm_spurious_fault();
28042827

28052828
hv_reset_evmcs();

0 commit comments

Comments
 (0)