Skip to content

Commit

Permalink
KVM: x86/mmu: Add struct and helpers to retrieve MMU role bits from regs
Browse files Browse the repository at this point in the history
Introduce "struct kvm_mmu_role_regs" to hold the register state that is
incorporated into the mmu_role.  For nested TDP, the register state that
is factored into the MMU isn't vCPU state; the dedicated struct will be
used to propagate the correct state throughout the flows without having
to pass multiple params, and also provides helpers for the various flag
accessors.

Intentionally make the new helpers cumbersome/ugly by prepending four
underscores.  In the not-too-distant future, it will be preferable to use
the mmu_role to query bits as the mmu_role can drop irrelevant bits
without creating contradictions, e.g. clearing CR4 bits when CR0.PG=0.
Reserve the clean helper names (no underscores) for the mmu_role.

Add a helper for vCPU conversion, which is the common case.

No functional change intended.

Signed-off-by: Sean Christopherson <seanjc@google.com>
  • Loading branch information
sean-jc authored and intel-lab-lkp committed Jun 22, 2021
1 parent a3bad97 commit 01d7a01
Showing 1 changed file with 53 additions and 13 deletions.
66 changes: 53 additions & 13 deletions arch/x86/kvm/mmu/mmu.c
Original file line number Diff line number Diff line change
Expand Up @@ -176,9 +176,46 @@ static void mmu_spte_set(u64 *sptep, u64 spte);
static union kvm_mmu_page_role
kvm_mmu_calc_root_page_role(struct kvm_vcpu *vcpu);

struct kvm_mmu_role_regs {
const unsigned long cr0;
const unsigned long cr4;
const u64 efer;
};

#define CREATE_TRACE_POINTS
#include "mmutrace.h"

/*
* Yes, lot's of underscores. They're a hint that you probably shouldn't be
* reading from the role_regs. Once the mmu_role is constructed, it becomes
* the single source of truth for the MMU's state.
*/
#define BUILD_MMU_ROLE_REGS_ACCESSOR(reg, name, flag) \
static inline bool ____is_##reg##_##name(struct kvm_mmu_role_regs *regs)\
{ \
return !!(regs->reg & flag); \
}
BUILD_MMU_ROLE_REGS_ACCESSOR(cr0, pg, X86_CR0_PG);
BUILD_MMU_ROLE_REGS_ACCESSOR(cr0, wp, X86_CR0_WP);
BUILD_MMU_ROLE_REGS_ACCESSOR(cr4, pse, X86_CR4_PSE);
BUILD_MMU_ROLE_REGS_ACCESSOR(cr4, pae, X86_CR4_PAE);
BUILD_MMU_ROLE_REGS_ACCESSOR(cr4, smep, X86_CR4_SMEP);
BUILD_MMU_ROLE_REGS_ACCESSOR(cr4, smap, X86_CR4_SMAP);
BUILD_MMU_ROLE_REGS_ACCESSOR(cr4, pke, X86_CR4_PKE);
BUILD_MMU_ROLE_REGS_ACCESSOR(cr4, la57, X86_CR4_LA57);
BUILD_MMU_ROLE_REGS_ACCESSOR(efer, nx, EFER_NX);
BUILD_MMU_ROLE_REGS_ACCESSOR(efer, lma, EFER_LMA);

struct kvm_mmu_role_regs vcpu_to_role_regs(struct kvm_vcpu *vcpu)
{
struct kvm_mmu_role_regs regs = {
.cr0 = kvm_read_cr0_bits(vcpu, KVM_MMU_CR0_ROLE_BITS),
.cr4 = kvm_read_cr4_bits(vcpu, KVM_MMU_CR4_ROLE_BITS),
.efer = vcpu->arch.efer,
};

return regs;
}

static inline bool kvm_available_flush_tlb_with_range(void)
{
Expand Down Expand Up @@ -4654,14 +4691,14 @@ kvm_calc_shadow_mmu_root_page_role(struct kvm_vcpu *vcpu, bool base_only)
}

static void shadow_mmu_init_context(struct kvm_vcpu *vcpu, struct kvm_mmu *context,
unsigned long cr0, unsigned long cr4,
u64 efer, union kvm_mmu_role new_role)
struct kvm_mmu_role_regs *regs,
union kvm_mmu_role new_role)
{
if (!(cr0 & X86_CR0_PG))
if (!____is_cr0_pg(regs))
nonpaging_init_context(vcpu, context);
else if (efer & EFER_LMA)
else if (____is_efer_lma(regs))
paging64_init_context(vcpu, context);
else if (cr4 & X86_CR4_PAE)
else if (____is_cr4_pae(regs))
paging32E_init_context(vcpu, context);
else
paging32_init_context(vcpu, context);
Expand All @@ -4672,15 +4709,15 @@ static void shadow_mmu_init_context(struct kvm_vcpu *vcpu, struct kvm_mmu *conte
reset_shadow_zero_bits_mask(vcpu, context);
}

static void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu, unsigned long cr0,
unsigned long cr4, u64 efer)
static void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu,
struct kvm_mmu_role_regs *regs)
{
struct kvm_mmu *context = &vcpu->arch.root_mmu;
union kvm_mmu_role new_role =
kvm_calc_shadow_mmu_root_page_role(vcpu, false);

if (new_role.as_u64 != context->mmu_role.as_u64)
shadow_mmu_init_context(vcpu, context, cr0, cr4, efer, new_role);
shadow_mmu_init_context(vcpu, context, regs, new_role);
}

static union kvm_mmu_role
Expand All @@ -4699,12 +4736,17 @@ void kvm_init_shadow_npt_mmu(struct kvm_vcpu *vcpu, unsigned long cr0,
unsigned long cr4, u64 efer, gpa_t nested_cr3)
{
struct kvm_mmu *context = &vcpu->arch.guest_mmu;
struct kvm_mmu_role_regs regs = {
.cr0 = cr0,
.cr4 = cr4,
.efer = efer,
};
union kvm_mmu_role new_role = kvm_calc_shadow_npt_root_page_role(vcpu);

__kvm_mmu_new_pgd(vcpu, nested_cr3, new_role.base);

if (new_role.as_u64 != context->mmu_role.as_u64)
shadow_mmu_init_context(vcpu, context, cr0, cr4, efer, new_role);
shadow_mmu_init_context(vcpu, context, &regs, new_role);

/*
* Redo the shadow bits, the reset done by shadow_mmu_init_context()
Expand Down Expand Up @@ -4773,11 +4815,9 @@ EXPORT_SYMBOL_GPL(kvm_init_shadow_ept_mmu);
static void init_kvm_softmmu(struct kvm_vcpu *vcpu)
{
struct kvm_mmu *context = &vcpu->arch.root_mmu;
struct kvm_mmu_role_regs regs = vcpu_to_role_regs(vcpu);

kvm_init_shadow_mmu(vcpu,
kvm_read_cr0_bits(vcpu, KVM_MMU_CR0_ROLE_BITS),
kvm_read_cr4_bits(vcpu, KVM_MMU_CR4_ROLE_BITS),
vcpu->arch.efer);
kvm_init_shadow_mmu(vcpu, &regs);

context->get_guest_pgd = get_cr3;
context->get_pdptr = kvm_pdptr_read;
Expand Down

0 comments on commit 01d7a01

Please sign in to comment.