Skip to content

Commit a102a67

Browse files
Sean Christophersonbonzini
authored andcommitted
KVM: x86/mmu: Don't drop level/direct from MMU role calculation
Use the calculated role as-is when propagating it to kvm_mmu.mmu_role, i.e. stop masking off meaningful fields. The concept of masking off fields came from kvm_mmu_pte_write(), which (correctly) ignores certain fields when comparing kvm_mmu_page.role against kvm_mmu.mmu_role, e.g. the current mmu's access and level have no relation to a shadow page's access and level. Masking off the level causes problems for 5-level paging, e.g. CR4.LA57 has its own redundant flag in the extended role, and nested EPT would need a similar hack to support 5-level paging for L2. Opportunistically rework the mask for kvm_mmu_pte_write() to define the fields that should be ignored as opposed to the fields that should be checked, i.e. make it opt-out instead of opt-in so that new fields are automatically picked up. While doing so, stop ignoring "direct". The field is effectively ignored anyways because kvm_mmu_pte_write() is only reached with an indirect mmu and the loop only walks indirect shadow pages, but double checking "direct" literally costs nothing. Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
1 parent e743664 commit a102a67

File tree

1 file changed

+18
-17
lines changed

1 file changed

+18
-17
lines changed

arch/x86/kvm/mmu/mmu.c

Lines changed: 18 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -215,17 +215,6 @@ struct kvm_shadow_walk_iterator {
215215
unsigned index;
216216
};
217217

218-
static const union kvm_mmu_page_role mmu_base_role_mask = {
219-
.cr0_wp = 1,
220-
.gpte_is_8_bytes = 1,
221-
.nxe = 1,
222-
.smep_andnot_wp = 1,
223-
.smap_andnot_wp = 1,
224-
.smm = 1,
225-
.guest_mode = 1,
226-
.ad_disabled = 1,
227-
};
228-
229218
#define for_each_shadow_entry_using_root(_vcpu, _root, _addr, _walker) \
230219
for (shadow_walk_init_using_root(&(_walker), (_vcpu), \
231220
(_root), (_addr)); \
@@ -4930,7 +4919,6 @@ static void init_kvm_tdp_mmu(struct kvm_vcpu *vcpu)
49304919
union kvm_mmu_role new_role =
49314920
kvm_calc_tdp_mmu_root_page_role(vcpu, false);
49324921

4933-
new_role.base.word &= mmu_base_role_mask.word;
49344922
if (new_role.as_u64 == context->mmu_role.as_u64)
49354923
return;
49364924

@@ -5002,7 +4990,6 @@ void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu)
50024990
union kvm_mmu_role new_role =
50034991
kvm_calc_shadow_mmu_root_page_role(vcpu, false);
50044992

5005-
new_role.base.word &= mmu_base_role_mask.word;
50064993
if (new_role.as_u64 == context->mmu_role.as_u64)
50074994
return;
50084995

@@ -5059,7 +5046,6 @@ void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly,
50595046

50605047
__kvm_mmu_new_cr3(vcpu, new_eptp, new_role.base, false);
50615048

5062-
new_role.base.word &= mmu_base_role_mask.word;
50635049
if (new_role.as_u64 == context->mmu_role.as_u64)
50645050
return;
50655051

@@ -5100,7 +5086,6 @@ static void init_kvm_nested_mmu(struct kvm_vcpu *vcpu)
51005086
union kvm_mmu_role new_role = kvm_calc_mmu_role_common(vcpu, false);
51015087
struct kvm_mmu *g_context = &vcpu->arch.nested_mmu;
51025088

5103-
new_role.base.word &= mmu_base_role_mask.word;
51045089
if (new_role.as_u64 == g_context->mmu_role.as_u64)
51055090
return;
51065091

@@ -5339,6 +5324,22 @@ static u64 *get_written_sptes(struct kvm_mmu_page *sp, gpa_t gpa, int *nspte)
53395324
return spte;
53405325
}
53415326

5327+
/*
5328+
* Ignore various flags when determining if a SPTE can be immediately
5329+
* overwritten for the current MMU.
5330+
* - level: explicitly checked in mmu_pte_write_new_pte(), and will never
5331+
* match the current MMU role, as MMU's level tracks the root level.
5332+
* - access: updated based on the new guest PTE
5333+
* - quadrant: handled by get_written_sptes()
5334+
* - invalid: always false (loop only walks valid shadow pages)
5335+
*/
5336+
static const union kvm_mmu_page_role role_ign = {
5337+
.level = 0xf,
5338+
.access = 0x7,
5339+
.quadrant = 0x3,
5340+
.invalid = 0x1,
5341+
};
5342+
53425343
static void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
53435344
const u8 *new, int bytes,
53445345
struct kvm_page_track_notifier_node *node)
@@ -5394,8 +5395,8 @@ static void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
53945395
entry = *spte;
53955396
mmu_page_zap_pte(vcpu->kvm, sp, spte);
53965397
if (gentry &&
5397-
!((sp->role.word ^ base_role)
5398-
& mmu_base_role_mask.word) && rmap_can_add(vcpu))
5398+
!((sp->role.word ^ base_role) & ~role_ign.word) &&
5399+
rmap_can_add(vcpu))
53995400
mmu_pte_write_new_pte(vcpu, sp, spte, &gentry);
54005401
if (need_remote_flush(entry, *spte))
54015402
remote_flush = true;

0 commit comments

Comments
 (0)