@@ -3547,11 +3547,7 @@ static void mmu_free_root_page(struct kvm *kvm, hpa_t *root_hpa,
3547
3547
if (!VALID_PAGE (* root_hpa ))
3548
3548
return ;
3549
3549
3550
- /*
3551
- * The "root" may be a special root, e.g. a PAE entry, treat it as a
3552
- * SPTE to ensure any non-PA bits are dropped.
3553
- */
3554
- sp = spte_to_child_sp (* root_hpa );
3550
+ sp = root_to_sp (* root_hpa );
3555
3551
if (WARN_ON_ONCE (!sp ))
3556
3552
return ;
3557
3553
@@ -3597,7 +3593,7 @@ void kvm_mmu_free_roots(struct kvm *kvm, struct kvm_mmu *mmu,
3597
3593
& invalid_list );
3598
3594
3599
3595
if (free_active_root ) {
3600
- if (to_shadow_page (mmu -> root .hpa )) {
3596
+ if (root_to_sp (mmu -> root .hpa )) {
3601
3597
mmu_free_root_page (kvm , & mmu -> root .hpa , & invalid_list );
3602
3598
} else if (mmu -> pae_root ) {
3603
3599
for (i = 0 ; i < 4 ; ++ i ) {
@@ -3621,6 +3617,7 @@ EXPORT_SYMBOL_GPL(kvm_mmu_free_roots);
3621
3617
void kvm_mmu_free_guest_mode_roots (struct kvm * kvm , struct kvm_mmu * mmu )
3622
3618
{
3623
3619
unsigned long roots_to_free = 0 ;
3620
+ struct kvm_mmu_page * sp ;
3624
3621
hpa_t root_hpa ;
3625
3622
int i ;
3626
3623
@@ -3635,8 +3632,8 @@ void kvm_mmu_free_guest_mode_roots(struct kvm *kvm, struct kvm_mmu *mmu)
3635
3632
if (!VALID_PAGE (root_hpa ))
3636
3633
continue ;
3637
3634
3638
- if (! to_shadow_page ( root_hpa ) ||
3639
- to_shadow_page ( root_hpa ) -> role .guest_mode )
3635
+ sp = root_to_sp ( root_hpa );
3636
+ if (! sp || sp -> role .guest_mode )
3640
3637
roots_to_free |= KVM_MMU_ROOT_PREVIOUS (i );
3641
3638
}
3642
3639
@@ -3991,7 +3988,7 @@ static bool is_unsync_root(hpa_t root)
3991
3988
* requirement isn't satisfied.
3992
3989
*/
3993
3990
smp_rmb ();
3994
- sp = to_shadow_page (root );
3991
+ sp = root_to_sp (root );
3995
3992
3996
3993
/*
3997
3994
* PAE roots (somewhat arbitrarily) aren't backed by shadow pages, the
@@ -4021,11 +4018,12 @@ void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu)
4021
4018
4022
4019
if (vcpu -> arch .mmu -> cpu_role .base .level >= PT64_ROOT_4LEVEL ) {
4023
4020
hpa_t root = vcpu -> arch .mmu -> root .hpa ;
4024
- sp = to_shadow_page (root );
4025
4021
4026
4022
if (!is_unsync_root (root ))
4027
4023
return ;
4028
4024
4025
+ sp = root_to_sp (root );
4026
+
4029
4027
write_lock (& vcpu -> kvm -> mmu_lock );
4030
4028
mmu_sync_children (vcpu , sp , true);
4031
4029
write_unlock (& vcpu -> kvm -> mmu_lock );
@@ -4355,7 +4353,7 @@ static int kvm_faultin_pfn(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault,
4355
4353
static bool is_page_fault_stale (struct kvm_vcpu * vcpu ,
4356
4354
struct kvm_page_fault * fault )
4357
4355
{
4358
- struct kvm_mmu_page * sp = to_shadow_page (vcpu -> arch .mmu -> root .hpa );
4356
+ struct kvm_mmu_page * sp = root_to_sp (vcpu -> arch .mmu -> root .hpa );
4359
4357
4360
4358
/* Special roots, e.g. pae_root, are not backed by shadow pages. */
4361
4359
if (sp && is_obsolete_sp (vcpu -> kvm , sp ))
@@ -4535,7 +4533,7 @@ static inline bool is_root_usable(struct kvm_mmu_root_info *root, gpa_t pgd,
4535
4533
{
4536
4534
return (role .direct || pgd == root -> pgd ) &&
4537
4535
VALID_PAGE (root -> hpa ) &&
4538
- role .word == to_shadow_page (root -> hpa )-> role .word ;
4536
+ role .word == root_to_sp (root -> hpa )-> role .word ;
4539
4537
}
4540
4538
4541
4539
/*
@@ -4609,7 +4607,7 @@ static bool fast_pgd_switch(struct kvm *kvm, struct kvm_mmu *mmu,
4609
4607
* having to deal with PDPTEs. We may add support for 32-bit hosts/VMs
4610
4608
* later if necessary.
4611
4609
*/
4612
- if (VALID_PAGE (mmu -> root .hpa ) && !to_shadow_page (mmu -> root .hpa ))
4610
+ if (VALID_PAGE (mmu -> root .hpa ) && !root_to_sp (mmu -> root .hpa ))
4613
4611
kvm_mmu_free_roots (kvm , mmu , KVM_MMU_ROOT_CURRENT );
4614
4612
4615
4613
if (VALID_PAGE (mmu -> root .hpa ))
@@ -4657,7 +4655,7 @@ void kvm_mmu_new_pgd(struct kvm_vcpu *vcpu, gpa_t new_pgd)
4657
4655
*/
4658
4656
if (!new_role .direct )
4659
4657
__clear_sp_write_flooding_count (
4660
- to_shadow_page (vcpu -> arch .mmu -> root .hpa ));
4658
+ root_to_sp (vcpu -> arch .mmu -> root .hpa ));
4661
4659
}
4662
4660
EXPORT_SYMBOL_GPL (kvm_mmu_new_pgd );
4663
4661
@@ -5526,7 +5524,7 @@ static bool is_obsolete_root(struct kvm *kvm, hpa_t root_hpa)
5526
5524
* (c) KVM doesn't track previous roots for PAE paging, and the guest
5527
5525
* is unlikely to zap an in-use PGD.
5528
5526
*/
5529
- sp = to_shadow_page (root_hpa );
5527
+ sp = root_to_sp (root_hpa );
5530
5528
return !sp || is_obsolete_sp (kvm , sp );
5531
5529
}
5532
5530
0 commit comments