Skip to content

Commit 732f576

Browse files
committed
KVM: x86/mmu: Add helper to convert root hpa to shadow page
Add a dedicated helper for converting a root hpa to a shadow page in anticipation of using a "dummy" root to handle the scenario where KVM needs to load a valid shadow root (from hardware's perspective), but the guest doesn't have a visible root to shadow. Similar to PAE roots, the dummy root won't have an associated kvm_mmu_page and will need special handling when finding a shadow page given a root. Opportunistically retrieve the root shadow page in kvm_mmu_sync_roots() *after* verifying the root is unsync (the dummy root can never be unsync). Link: https://lore.kernel.org/r/20230729005200.1057358-2-seanjc@google.com Signed-off-by: Sean Christopherson <seanjc@google.com>
1 parent 2d28b12 commit 732f576

File tree

3 files changed

+23
-16
lines changed

3 files changed

+23
-16
lines changed

arch/x86/kvm/mmu/mmu.c

Lines changed: 13 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -3547,11 +3547,7 @@ static void mmu_free_root_page(struct kvm *kvm, hpa_t *root_hpa,
35473547
if (!VALID_PAGE(*root_hpa))
35483548
return;
35493549

3550-
/*
3551-
* The "root" may be a special root, e.g. a PAE entry, treat it as a
3552-
* SPTE to ensure any non-PA bits are dropped.
3553-
*/
3554-
sp = spte_to_child_sp(*root_hpa);
3550+
sp = root_to_sp(*root_hpa);
35553551
if (WARN_ON_ONCE(!sp))
35563552
return;
35573553

@@ -3597,7 +3593,7 @@ void kvm_mmu_free_roots(struct kvm *kvm, struct kvm_mmu *mmu,
35973593
&invalid_list);
35983594

35993595
if (free_active_root) {
3600-
if (to_shadow_page(mmu->root.hpa)) {
3596+
if (root_to_sp(mmu->root.hpa)) {
36013597
mmu_free_root_page(kvm, &mmu->root.hpa, &invalid_list);
36023598
} else if (mmu->pae_root) {
36033599
for (i = 0; i < 4; ++i) {
@@ -3621,6 +3617,7 @@ EXPORT_SYMBOL_GPL(kvm_mmu_free_roots);
36213617
void kvm_mmu_free_guest_mode_roots(struct kvm *kvm, struct kvm_mmu *mmu)
36223618
{
36233619
unsigned long roots_to_free = 0;
3620+
struct kvm_mmu_page *sp;
36243621
hpa_t root_hpa;
36253622
int i;
36263623

@@ -3635,8 +3632,8 @@ void kvm_mmu_free_guest_mode_roots(struct kvm *kvm, struct kvm_mmu *mmu)
36353632
if (!VALID_PAGE(root_hpa))
36363633
continue;
36373634

3638-
if (!to_shadow_page(root_hpa) ||
3639-
to_shadow_page(root_hpa)->role.guest_mode)
3635+
sp = root_to_sp(root_hpa);
3636+
if (!sp || sp->role.guest_mode)
36403637
roots_to_free |= KVM_MMU_ROOT_PREVIOUS(i);
36413638
}
36423639

@@ -3991,7 +3988,7 @@ static bool is_unsync_root(hpa_t root)
39913988
* requirement isn't satisfied.
39923989
*/
39933990
smp_rmb();
3994-
sp = to_shadow_page(root);
3991+
sp = root_to_sp(root);
39953992

39963993
/*
39973994
* PAE roots (somewhat arbitrarily) aren't backed by shadow pages, the
@@ -4021,11 +4018,12 @@ void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu)
40214018

40224019
if (vcpu->arch.mmu->cpu_role.base.level >= PT64_ROOT_4LEVEL) {
40234020
hpa_t root = vcpu->arch.mmu->root.hpa;
4024-
sp = to_shadow_page(root);
40254021

40264022
if (!is_unsync_root(root))
40274023
return;
40284024

4025+
sp = root_to_sp(root);
4026+
40294027
write_lock(&vcpu->kvm->mmu_lock);
40304028
mmu_sync_children(vcpu, sp, true);
40314029
write_unlock(&vcpu->kvm->mmu_lock);
@@ -4355,7 +4353,7 @@ static int kvm_faultin_pfn(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault,
43554353
static bool is_page_fault_stale(struct kvm_vcpu *vcpu,
43564354
struct kvm_page_fault *fault)
43574355
{
4358-
struct kvm_mmu_page *sp = to_shadow_page(vcpu->arch.mmu->root.hpa);
4356+
struct kvm_mmu_page *sp = root_to_sp(vcpu->arch.mmu->root.hpa);
43594357

43604358
/* Special roots, e.g. pae_root, are not backed by shadow pages. */
43614359
if (sp && is_obsolete_sp(vcpu->kvm, sp))
@@ -4535,7 +4533,7 @@ static inline bool is_root_usable(struct kvm_mmu_root_info *root, gpa_t pgd,
45354533
{
45364534
return (role.direct || pgd == root->pgd) &&
45374535
VALID_PAGE(root->hpa) &&
4538-
role.word == to_shadow_page(root->hpa)->role.word;
4536+
role.word == root_to_sp(root->hpa)->role.word;
45394537
}
45404538

45414539
/*
@@ -4609,7 +4607,7 @@ static bool fast_pgd_switch(struct kvm *kvm, struct kvm_mmu *mmu,
46094607
* having to deal with PDPTEs. We may add support for 32-bit hosts/VMs
46104608
* later if necessary.
46114609
*/
4612-
if (VALID_PAGE(mmu->root.hpa) && !to_shadow_page(mmu->root.hpa))
4610+
if (VALID_PAGE(mmu->root.hpa) && !root_to_sp(mmu->root.hpa))
46134611
kvm_mmu_free_roots(kvm, mmu, KVM_MMU_ROOT_CURRENT);
46144612

46154613
if (VALID_PAGE(mmu->root.hpa))
@@ -4657,7 +4655,7 @@ void kvm_mmu_new_pgd(struct kvm_vcpu *vcpu, gpa_t new_pgd)
46574655
*/
46584656
if (!new_role.direct)
46594657
__clear_sp_write_flooding_count(
4660-
to_shadow_page(vcpu->arch.mmu->root.hpa));
4658+
root_to_sp(vcpu->arch.mmu->root.hpa));
46614659
}
46624660
EXPORT_SYMBOL_GPL(kvm_mmu_new_pgd);
46634661

@@ -5526,7 +5524,7 @@ static bool is_obsolete_root(struct kvm *kvm, hpa_t root_hpa)
55265524
* (c) KVM doesn't track previous roots for PAE paging, and the guest
55275525
* is unlikely to zap an in-use PGD.
55285526
*/
5529-
sp = to_shadow_page(root_hpa);
5527+
sp = root_to_sp(root_hpa);
55305528
return !sp || is_obsolete_sp(kvm, sp);
55315529
}
55325530

arch/x86/kvm/mmu/spte.h

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -236,6 +236,15 @@ static inline struct kvm_mmu_page *sptep_to_sp(u64 *sptep)
236236
return to_shadow_page(__pa(sptep));
237237
}
238238

239+
static inline struct kvm_mmu_page *root_to_sp(hpa_t root)
240+
{
241+
/*
242+
* The "root" may be a special root, e.g. a PAE entry, treat it as a
243+
* SPTE to ensure any non-PA bits are dropped.
244+
*/
245+
return spte_to_child_sp(root);
246+
}
247+
239248
static inline bool is_mmio_spte(u64 spte)
240249
{
241250
return (spte & shadow_mmio_mask) == shadow_mmio_value &&

arch/x86/kvm/mmu/tdp_mmu.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -689,7 +689,7 @@ static inline void tdp_mmu_iter_set_spte(struct kvm *kvm, struct tdp_iter *iter,
689689
else
690690

691691
#define tdp_mmu_for_each_pte(_iter, _mmu, _start, _end) \
692-
for_each_tdp_pte(_iter, to_shadow_page(_mmu->root.hpa), _start, _end)
692+
for_each_tdp_pte(_iter, root_to_sp(_mmu->root.hpa), _start, _end)
693693

694694
/*
695695
* Yield if the MMU lock is contended or this thread needs to return control

0 commit comments

Comments
 (0)