Skip to content

Commit

Permalink
KVM: x86: Add a struct to consolidate host values, e.g. EFER, XCR0, e…
Browse files Browse the repository at this point in the history
…tc...

Add "struct kvm_host_values kvm_host" to hold the various host values
that KVM snapshots during initialization.  Bundling the host values into
a single struct simplifies adding new MSRs and other features with host
state/values that KVM cares about, and provides a one-stop shop.  E.g.
adding a new value requires one line, whereas tracking each value
individual often requires three: declaration, definition, and export.

No functional change intended.

Link: https://lore.kernel.org/r/20240423221521.2923759-2-seanjc@google.com
Signed-off-by: Sean Christopherson <seanjc@google.com>
  • Loading branch information
sean-jc committed Jun 3, 2024
1 parent c3f38fa commit 7974c06
Show file tree
Hide file tree
Showing 6 changed files with 35 additions and 40 deletions.
1 change: 0 additions & 1 deletion arch/x86/include/asm/kvm_host.h
Original file line number Diff line number Diff line change
Expand Up @@ -1853,7 +1853,6 @@ struct kvm_arch_async_pf {
};

extern u32 __read_mostly kvm_nr_uret_msrs;
extern u64 __read_mostly host_efer;
extern bool __read_mostly allow_smaller_maxphyaddr;
extern bool __read_mostly enable_apicv;
extern struct kvm_x86_ops kvm_x86_ops;
Expand Down
2 changes: 1 addition & 1 deletion arch/x86/kvm/svm/sev.c
Original file line number Diff line number Diff line change
Expand Up @@ -3324,7 +3324,7 @@ void sev_es_prepare_switch_to_guest(struct vcpu_svm *svm, struct sev_es_save_are
*/
hostsa->xcr0 = xgetbv(XCR_XFEATURE_ENABLED_MASK);
hostsa->pkru = read_pkru();
hostsa->xss = host_xss;
hostsa->xss = kvm_host.xss;

/*
* If DebugSwap is enabled, debug registers are loaded but NOT saved by
Expand Down
8 changes: 4 additions & 4 deletions arch/x86/kvm/vmx/nested.c
Original file line number Diff line number Diff line change
Expand Up @@ -2422,7 +2422,7 @@ static void prepare_vmcs02_early(struct vcpu_vmx *vmx, struct loaded_vmcs *vmcs0
if (cpu_has_load_ia32_efer()) {
if (guest_efer & EFER_LMA)
exec_control |= VM_ENTRY_IA32E_MODE;
if (guest_efer != host_efer)
if (guest_efer != kvm_host.efer)
exec_control |= VM_ENTRY_LOAD_IA32_EFER;
}
vm_entry_controls_set(vmx, exec_control);
Expand All @@ -2435,7 +2435,7 @@ static void prepare_vmcs02_early(struct vcpu_vmx *vmx, struct loaded_vmcs *vmcs0
* bits may be modified by vmx_set_efer() in prepare_vmcs02().
*/
exec_control = __vm_exit_controls_get(vmcs01);
if (cpu_has_load_ia32_efer() && guest_efer != host_efer)
if (cpu_has_load_ia32_efer() && guest_efer != kvm_host.efer)
exec_control |= VM_EXIT_LOAD_IA32_EFER;
else
exec_control &= ~VM_EXIT_LOAD_IA32_EFER;
Expand Down Expand Up @@ -4662,7 +4662,7 @@ static inline u64 nested_vmx_get_vmcs01_guest_efer(struct vcpu_vmx *vmx)
return vmcs_read64(GUEST_IA32_EFER);

if (cpu_has_load_ia32_efer())
return host_efer;
return kvm_host.efer;

for (i = 0; i < vmx->msr_autoload.guest.nr; ++i) {
if (vmx->msr_autoload.guest.val[i].index == MSR_EFER)
Expand All @@ -4673,7 +4673,7 @@ static inline u64 nested_vmx_get_vmcs01_guest_efer(struct vcpu_vmx *vmx)
if (efer_msr)
return efer_msr->data;

return host_efer;
return kvm_host.efer;
}

static void nested_vmx_restore_host_state(struct kvm_vcpu *vcpu)
Expand Down
14 changes: 7 additions & 7 deletions arch/x86/kvm/vmx/vmx.c
Original file line number Diff line number Diff line change
Expand Up @@ -259,7 +259,7 @@ static int vmx_setup_l1d_flush(enum vmx_l1d_flush_state l1tf)
return 0;
}

if (host_arch_capabilities & ARCH_CAP_SKIP_VMENTRY_L1DFLUSH) {
if (kvm_host.arch_capabilities & ARCH_CAP_SKIP_VMENTRY_L1DFLUSH) {
l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_NOT_REQUIRED;
return 0;
}
Expand Down Expand Up @@ -404,7 +404,7 @@ static void vmx_update_fb_clear_dis(struct kvm_vcpu *vcpu, struct vcpu_vmx *vmx)
* and VM-Exit.
*/
vmx->disable_fb_clear = !cpu_feature_enabled(X86_FEATURE_CLEAR_CPU_BUF) &&
(host_arch_capabilities & ARCH_CAP_FB_CLEAR_CTRL) &&
(kvm_host.arch_capabilities & ARCH_CAP_FB_CLEAR_CTRL) &&
!boot_cpu_has_bug(X86_BUG_MDS) &&
!boot_cpu_has_bug(X86_BUG_TAA);

Expand Down Expand Up @@ -1123,12 +1123,12 @@ static bool update_transition_efer(struct vcpu_vmx *vmx)
* atomically, since it's faster than switching it manually.
*/
if (cpu_has_load_ia32_efer() ||
(enable_ept && ((vmx->vcpu.arch.efer ^ host_efer) & EFER_NX))) {
(enable_ept && ((vmx->vcpu.arch.efer ^ kvm_host.efer) & EFER_NX))) {
if (!(guest_efer & EFER_LMA))
guest_efer &= ~EFER_LME;
if (guest_efer != host_efer)
if (guest_efer != kvm_host.efer)
add_atomic_switch_msr(vmx, MSR_EFER,
guest_efer, host_efer, false);
guest_efer, kvm_host.efer, false);
else
clear_atomic_switch_msr(vmx, MSR_EFER);
return false;
Expand All @@ -1141,7 +1141,7 @@ static bool update_transition_efer(struct vcpu_vmx *vmx)
clear_atomic_switch_msr(vmx, MSR_EFER);

guest_efer &= ~ignore_bits;
guest_efer |= host_efer & ignore_bits;
guest_efer |= kvm_host.efer & ignore_bits;

vmx->guest_uret_msrs[i].data = guest_efer;
vmx->guest_uret_msrs[i].mask = ~ignore_bits;
Expand Down Expand Up @@ -4357,7 +4357,7 @@ void vmx_set_constant_host_state(struct vcpu_vmx *vmx)
}

if (cpu_has_load_ia32_efer())
vmcs_write64(HOST_IA32_EFER, host_efer);
vmcs_write64(HOST_IA32_EFER, kvm_host.efer);
}

void set_cr4_guest_host_mask(struct vcpu_vmx *vmx)
Expand Down
38 changes: 15 additions & 23 deletions arch/x86/kvm/x86.c
Original file line number Diff line number Diff line change
Expand Up @@ -100,6 +100,9 @@
struct kvm_caps kvm_caps __read_mostly;
EXPORT_SYMBOL_GPL(kvm_caps);

struct kvm_host_values kvm_host __read_mostly;
EXPORT_SYMBOL_GPL(kvm_host);

#define ERR_PTR_USR(e) ((void __user *)ERR_PTR(e))

#define emul_to_vcpu(ctxt) \
Expand Down Expand Up @@ -229,21 +232,12 @@ static struct kvm_user_return_msrs __percpu *user_return_msrs;
| XFEATURE_MASK_BNDCSR | XFEATURE_MASK_AVX512 \
| XFEATURE_MASK_PKRU | XFEATURE_MASK_XTILE)

u64 __read_mostly host_efer;
EXPORT_SYMBOL_GPL(host_efer);

bool __read_mostly allow_smaller_maxphyaddr = 0;
EXPORT_SYMBOL_GPL(allow_smaller_maxphyaddr);

bool __read_mostly enable_apicv = true;
EXPORT_SYMBOL_GPL(enable_apicv);

u64 __read_mostly host_xss;
EXPORT_SYMBOL_GPL(host_xss);

u64 __read_mostly host_arch_capabilities;
EXPORT_SYMBOL_GPL(host_arch_capabilities);

const struct _kvm_stats_desc kvm_vm_stats_desc[] = {
KVM_GENERIC_VM_STATS(),
STATS_DESC_COUNTER(VM, mmu_shadow_zapped),
Expand Down Expand Up @@ -317,8 +311,6 @@ const struct kvm_stats_header kvm_vcpu_stats_header = {
sizeof(kvm_vcpu_stats_desc),
};

u64 __read_mostly host_xcr0;

static struct kmem_cache *x86_emulator_cache;

/*
Expand Down Expand Up @@ -1025,11 +1017,11 @@ void kvm_load_guest_xsave_state(struct kvm_vcpu *vcpu)

if (kvm_is_cr4_bit_set(vcpu, X86_CR4_OSXSAVE)) {

if (vcpu->arch.xcr0 != host_xcr0)
if (vcpu->arch.xcr0 != kvm_host.xcr0)
xsetbv(XCR_XFEATURE_ENABLED_MASK, vcpu->arch.xcr0);

if (guest_can_use(vcpu, X86_FEATURE_XSAVES) &&
vcpu->arch.ia32_xss != host_xss)
vcpu->arch.ia32_xss != kvm_host.xss)
wrmsrl(MSR_IA32_XSS, vcpu->arch.ia32_xss);
}

Expand All @@ -1056,12 +1048,12 @@ void kvm_load_host_xsave_state(struct kvm_vcpu *vcpu)

if (kvm_is_cr4_bit_set(vcpu, X86_CR4_OSXSAVE)) {

if (vcpu->arch.xcr0 != host_xcr0)
xsetbv(XCR_XFEATURE_ENABLED_MASK, host_xcr0);
if (vcpu->arch.xcr0 != kvm_host.xcr0)
xsetbv(XCR_XFEATURE_ENABLED_MASK, kvm_host.xcr0);

if (guest_can_use(vcpu, X86_FEATURE_XSAVES) &&
vcpu->arch.ia32_xss != host_xss)
wrmsrl(MSR_IA32_XSS, host_xss);
vcpu->arch.ia32_xss != kvm_host.xss)
wrmsrl(MSR_IA32_XSS, kvm_host.xss);
}

}
Expand Down Expand Up @@ -1628,7 +1620,7 @@ static bool kvm_is_immutable_feature_msr(u32 msr)

static u64 kvm_get_arch_capabilities(void)
{
u64 data = host_arch_capabilities & KVM_SUPPORTED_ARCH_CAP;
u64 data = kvm_host.arch_capabilities & KVM_SUPPORTED_ARCH_CAP;

/*
* If nx_huge_pages is enabled, KVM's shadow paging will ensure that
Expand Down Expand Up @@ -9781,19 +9773,19 @@ int kvm_x86_vendor_init(struct kvm_x86_init_ops *ops)
kvm_caps.supported_mce_cap = MCG_CTL_P | MCG_SER_P;

if (boot_cpu_has(X86_FEATURE_XSAVE)) {
host_xcr0 = xgetbv(XCR_XFEATURE_ENABLED_MASK);
kvm_caps.supported_xcr0 = host_xcr0 & KVM_SUPPORTED_XCR0;
kvm_host.xcr0 = xgetbv(XCR_XFEATURE_ENABLED_MASK);
kvm_caps.supported_xcr0 = kvm_host.xcr0 & KVM_SUPPORTED_XCR0;
}

rdmsrl_safe(MSR_EFER, &host_efer);
rdmsrl_safe(MSR_EFER, &kvm_host.efer);

if (boot_cpu_has(X86_FEATURE_XSAVES))
rdmsrl(MSR_IA32_XSS, host_xss);
rdmsrl(MSR_IA32_XSS, kvm_host.xss);

kvm_init_pmu_capability(ops->pmu_ops);

if (boot_cpu_has(X86_FEATURE_ARCH_CAPABILITIES))
rdmsrl(MSR_IA32_ARCH_CAPABILITIES, host_arch_capabilities);
rdmsrl(MSR_IA32_ARCH_CAPABILITIES, kvm_host.arch_capabilities);

r = ops->hardware_setup();
if (r != 0)
Expand Down
12 changes: 8 additions & 4 deletions arch/x86/kvm/x86.h
Original file line number Diff line number Diff line change
Expand Up @@ -33,6 +33,13 @@ struct kvm_caps {
u64 supported_perf_cap;
};

struct kvm_host_values {
u64 efer;
u64 xcr0;
u64 xss;
u64 arch_capabilities;
};

void kvm_spurious_fault(void);

#define KVM_NESTED_VMENTER_CONSISTENCY_CHECK(consistency_check) \
Expand Down Expand Up @@ -325,11 +332,8 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
int emulation_type, void *insn, int insn_len);
fastpath_t handle_fastpath_set_msr_irqoff(struct kvm_vcpu *vcpu);

extern u64 host_xcr0;
extern u64 host_xss;
extern u64 host_arch_capabilities;

extern struct kvm_caps kvm_caps;
extern struct kvm_host_values kvm_host;

extern bool enable_pmu;

Expand Down

0 comments on commit 7974c06

Please sign in to comment.